crypto: meson-txlx: enable amlogic crypto dma
authorMatthew Shyu <matthew.shyu@amlogic.com>
Tue, 7 Nov 2017 09:10:22 +0000 (17:10 +0800)
committerBo Yang <bo.yang@amlogic.com>
Wed, 27 Dec 2017 08:38:34 +0000 (01:38 -0700)
PD#154260: enable crypto driver

restructure crypto driver on top of dma and enable it on txlx

Change-Id: I0ffc0aaf9ad94c9652b9141bf92201c92463a893
Signed-off-by: Matthew Shyu <matthew.shyu@amlogic.com>
12 files changed:
Documentation/devicetree/bindings/crypto/aml-crypto.txt
MAINTAINERS
arch/arm64/boot/dts/amlogic/mesonaxg.dtsi
arch/arm64/boot/dts/amlogic/mesongxl.dtsi
arch/arm64/boot/dts/amlogic/mesongxm.dtsi
drivers/amlogic/crypto/Makefile
drivers/amlogic/crypto/aml-aes-dma.c
drivers/amlogic/crypto/aml-crypto-dma.c
drivers/amlogic/crypto/aml-crypto-dma.h
drivers/amlogic/crypto/aml-dma.c [new file with mode: 0644]
drivers/amlogic/crypto/aml-sha-dma.c
drivers/amlogic/crypto/aml-tdes-dma.c

index aecf7cf..edadf24 100644 (file)
@@ -60,22 +60,29 @@ aml_sha{
 };
 
 * New DMA for GXL and beyond
+* Dma engine for crypto operations
+Required properties:
+- compatible : Should be "amlogic,aml_gxl_dma" or "amlogic,aml_txlx_dma".
+- reg:  Should contain the base address of regs
+- interrupts: Should contain the IRQ line for DMA.
+
+Example:
+aml_dma {
+       compatible = "amlogic,aml_gxl_dma";
+       reg = <0x0 0xff63e000 0x0 0x48>;
+       interrupts = <0 180 1>;
+};
+
 * Advanced Encryption Standard (AES)
 
 Required properties:
 - compatible : Should be "amlogic,aes".
 - dev_name : Should be "aml_aes"
-- interrupts: Should contain the IRQ line for the AES.
-- resets: Should contain the clock to enable the module
-- reg:  Should contain the base address of regs
 
 Example:
 aml_aes{
        compatible = "amlogic,aes_dma";
        dev_name = "aml_aes_dma";
-       interrupts = <0 188 1
-               0 189 1>;
-       reg = <0x0 0xc883e000 0x0 0x28>;
 };
 
 
@@ -84,33 +91,21 @@ aml_aes{
 Required properties:
 - compatible : Should be "amlogic,des,tdes".
 - dev_name : Should be "aml_aes"
-- interrupts: Should contain the IRQ line for the TDES.
-- resets: Should contain the clock to enable the module
-- reg:  Should contain the base address of regs
 
 Example:
 aml_tdes{
        compatible = "amlogic,des_dma,tdes_dma";
        dev_name = "aml_tdes_dma";
-       interrupts = <0 188 1
-               0 189 1>;
-       reg = <0x0 0xc883e000 0x0 0x28>;
 };
 * Secure Hash Algorithm (SHA1/SHA224/SHA256/HMAC)
 
 Required properties:
 - compatible : Should be "amlogic,sha".
 - dev_name : Should be "aml_sha"
-- interrupts: Should contain the IRQ line for the SHA.
-- resets: Should contain the clock to enable the module
-- reg:  Should contain the base address of regs
 
 Example:
 aml_sha{
        compatible = "amlogic,sha_dma";
        dev_name = "aml_sha_dma";
-       interrupts = <0 188 1
-               0 189 1>;
-       reg = <0x0 0xc883e000 0x0 0x28>;
 };
 
index 3874828..5ec4ed6 100644 (file)
@@ -13446,7 +13446,16 @@ F:     scripts/amlogic/configs/
 
 AMLOGIC CRYPTO DMA BLKMV
 M:     Matthew Shyu <matthew.shyu@amlogic.com>
-F:     drivers/amlogic/crypto/*
+F:     drivers/amlogic/crypto/aml-aes-blkmv.c
+F:     drivers/amlogic/crypto/aml-aes-dma.c
+F:     drivers/amlogic/crypto/aml-crypto-blkmv.c
+F:     drivers/amlogic/crypto/aml-crypto-blkmv.h
+F:     drivers/amlogic/crypto/aml-crypto-dma.c
+F:     drivers/amlogic/crypto/aml-crypto-dma.h
+F:     drivers/amlogic/crypto/aml-dma.c
+F:     drivers/amlogic/crypto/aml-sha-dma.c
+F:     drivers/amlogic/crypto/aml-tdes-blkmv.c
+F:     drivers/amlogic/crypto/aml-tdes-dma.c
 
 AMLOGIC saradc
 M:     Xingyu Chen <xingyu.chen@amlogic.com>
index 9d3b953..a70599b 100644 (file)
                pinctrl-0=<&i2c_slave_pin>;
        };
 
-       aml_aes {
-               compatible = "amlogic,aes_dma";
-               dev_name = "aml_aes_dma";
-               status = "okay";
-               interrupts = <0 180 1 0 181 1>;
+       aml_dma {
+               compatible = "amlogic,aml_txlx_dma";
                reg = <0x0 0xff63e000 0x0 0x48>;
-       };
+               interrupts = <0 180 1>;
 
-       aml_sha {
-               compatible = "amlogic,sha_dma";
-               dev_name = "aml_sha_dma";
-               status = "okay";
-               interrupts = <0 180 1 0 181 1>;
-               reg = <0x0 0xff63e000 0x0 0x48>;
+               aml_aes {
+                       compatible = "amlogic,aes_dma";
+                       dev_name = "aml_aes_dma";
+                       status = "okay";
+               };
+
+               aml_sha {
+                       compatible = "amlogic,sha_dma";
+                       dev_name = "aml_sha_dma";
+                       status = "okay";
+               };
        };
 
        saradc:saradc {
index 79a7453..1b01b9d 100644 (file)
 };
 
 /{
-       aml_aes {
-               compatible = "amlogic,aes_dma";
-               dev_name = "aml_aes_dma";
-               status = "okay";
-               interrupts = <0 188 1 0 189 1>;
+       aml_dma {
+               compatible = "amlogic,aml_gxl_dma";
                reg = <0x0 0xc883e000 0x0 0x28>;
-       };
+               interrupts = <0 180 1>;
 
-       aml_tdes {
-               compatible = "amlogic,des_dma,tdes_dma";
-               dev_name = "aml_tdes_dma";
-               status = "okay";
-               interrupts = <0 188 1 0 189 1>;
-               reg = <0x0 0xc883e000 0x0 0x28>;
-       };
+               aml_aes {
+                       compatible = "amlogic,aes_dma";
+                       dev_name = "aml_aes_dma";
+                       status = "okay";
+               };
 
-       aml_sha {
-               compatible = "amlogic,sha_dma";
-               dev_name = "aml_sha_dma";
-               status = "okay";
-               interrupts = <0 188 1 0 189 1>;
-               reg = <0x0 0xc883e000 0x0 0x28>;
+               aml_tdes {
+                       compatible = "amlogic,des_dma,tdes_dma";
+                       dev_name = "aml_tdes_dma";
+                       status = "okay";
+               };
        };
 
        audio_data:audio_data {
index b33ce50..5ca347a 100644 (file)
 };
 
 /{
-       aml_aes {
-               compatible = "amlogic,aes_dma";
-               dev_name = "aml_aes_dma";
-               status = "okay";
-               interrupts = <0 188 1 0 189 1>;
+       aml_dma {
+               compatible = "amlogic,aml_gxl_dma";
                reg = <0x0 0xc883e000 0x0 0x28>;
-       };
+               interrupts = <0 180 1>;
 
-       aml_tdes {
-               compatible = "amlogic,des_dma,tdes_dma";
-               dev_name = "aml_tdes_dma";
-               status = "okay";
-               interrupts = <0 188 1 0 189 1>;
-               reg = <0x0 0xc883e000 0x0 0x28>;
-       };
+               aml_aes {
+                       compatible = "amlogic,aes_dma";
+                       dev_name = "aml_aes_dma";
+                       status = "okay";
+               };
 
-       aml_sha {
-               compatible = "amlogic,sha_dma";
-               dev_name = "aml_sha_dma";
-               status = "okay";
-               interrupts = <0 188 1 0 189 1>;
-               reg = <0x0 0xc883e000 0x0 0x28>;
+               aml_tdes {
+                       compatible = "amlogic,des_dma,tdes_dma";
+                       dev_name = "aml_tdes_dma";
+                       status = "okay";
+               };
        };
 
        audio_data:audio_data {
index 0271f40..e16c924 100644 (file)
@@ -1,7 +1,8 @@
+obj-$(CONFIG_AMLOGIC_CRYPTO_DMA)                               += aml-cryp-dma.o
+aml-cryp-dma-objs:= aml-dma.o aml-crypto-dma.o
 obj-$(CONFIG_AMLOGIC_CRYPTO_DMA)                               += aml-aes-dma.o
 obj-$(CONFIG_AMLOGIC_CRYPTO_DMA)                               += aml-tdes-dma.o
-obj-$(CONFIG_AMLOGIC_CRYPTO_DMA)                               += aml-crypto-dma.o
 obj-$(CONFIG_AMLOGIC_CRYPTO_DMA)                               += aml-sha-dma.o
-obj-$(CONFIG_AMLOGIC_CRYPTO_BLKMV)                             += aml-aes-blkmv.o
-obj-$(CONFIG_AMLOGIC_CRYPTO_BLKMV)                             += aml-tdes-blkmv.o
-obj-$(CONFIG_AMLOGIC_CRYPTO_BLKMV)                             += aml-crypto-blkmv.o
+obj-$(CONFIG_AMLOGIC_CRYPTO_BLKMV)                             += aml-aes-blkmv.o
+obj-$(CONFIG_AMLOGIC_CRYPTO_BLKMV)                             += aml-tdes-blkmv.o
+obj-$(CONFIG_AMLOGIC_CRYPTO_BLKMV)                             += aml-crypto-blkmv.o
index e0a3406..ee47c9c 100644 (file)
@@ -40,7 +40,6 @@
 #include <crypto/hash.h>
 #include <crypto/internal/hash.h>
 #include <linux/amlogic/iomap.h>
-#include <linux/amlogic/cpu_version.h>
 #include "aml-crypto-dma.h"
 
 /* AES flags */
 #define AML_AES_QUEUE_LENGTH   50
 #define AML_AES_DMA_THRESHOLD          16
 
-#define DMA_THREAD_REG (get_dma_t0_offset() + AES_THREAD_INDEX)
-#define DMA_STATUS_REG (get_dma_sts0_offset() + AES_THREAD_INDEX)
-
-u8 map_in_aes_dma;
 struct aml_aes_dev;
 
 struct aml_aes_ctx {
@@ -86,7 +81,9 @@ struct aml_aes_dev {
        unsigned long           flags;
        int     err;
 
-       spinlock_t              lock;
+       struct aml_dma_dev      *dma;
+       uint32_t thread;
+       uint32_t status;
        struct crypto_queue     queue;
 
        struct tasklet_struct   done_task;
@@ -125,14 +122,15 @@ static struct aml_aes_drv aml_aes = {
        .lock = __SPIN_LOCK_UNLOCKED(aml_aes.lock),
 };
 
-static void set_aes_key_iv(struct aml_aes_dev *dd, u32 *key,
+static int set_aes_key_iv(struct aml_aes_dev *dd, u32 *key,
                uint32_t keylen, u32 *iv, uint8_t swap)
 {
        struct dma_dsc *dsc = dd->descriptor;
        uint32_t key_iv[12];
        uint32_t *piv = key_iv + 8;
        int32_t len = keylen;
-       dma_addr_t dma_addr_key;
+       dma_addr_t dma_addr_key = 0;
+       uint32_t i = 0;
 
        memset(key_iv, 0, sizeof(key_iv));
        memcpy(key_iv, key, keylen);
@@ -148,43 +146,42 @@ static void set_aes_key_iv(struct aml_aes_dev *dd, u32 *key,
                len = 48; /* full key storage */
        }
 
+       if (!len)
+               return -EPERM;
+
        dma_addr_key = dma_map_single(dd->dev, key_iv,
                        sizeof(key_iv), DMA_TO_DEVICE);
 
-       if (cpu_after_eq(MESON_CPU_MAJOR_ID_AXG)) {
-               uint32_t i = 0;
-               while (len > 0) {
-                       dsc[i].src_addr = (uint32_t)dma_addr_key + i * 16;
-                       dsc[i].tgt_addr = i * 16;
-                       dsc[i].dsc_cfg.d32 = 0;
-                       dsc[i].dsc_cfg.b.length = len > 16 ? 16 : len;
-                       dsc[i].dsc_cfg.b.mode = MODE_KEY;
-                       dsc[i].dsc_cfg.b.eoc = 0;
-                       dsc[i].dsc_cfg.b.owner = 1;
-                       i++;
-                       len -= 16;
-               }
-               dsc[i - 1].dsc_cfg.b.eoc = 1;
-       } else {
-               dsc->src_addr = (uint32_t)dma_addr_key;
-               dsc->tgt_addr = 0;
-               dsc->dsc_cfg.d32 = 0;
-               dsc->dsc_cfg.b.length = len;
-               dsc->dsc_cfg.b.mode = MODE_KEY;
-               dsc->dsc_cfg.b.eoc = 1;
-               dsc->dsc_cfg.b.owner = 1;
+       if (dma_mapping_error(dd->dev, dma_addr_key)) {
+               dev_err(dd->dev, "error mapping dma_addr_key\n");
+               return -EINVAL;
        }
 
+       while (len > 0) {
+               dsc[i].src_addr = (uint32_t)dma_addr_key + i * 16;
+               dsc[i].tgt_addr = i * 16;
+               dsc[i].dsc_cfg.d32 = 0;
+               dsc[i].dsc_cfg.b.length = len > 16 ? 16 : len;
+               dsc[i].dsc_cfg.b.mode = MODE_KEY;
+               dsc[i].dsc_cfg.b.eoc = 0;
+               dsc[i].dsc_cfg.b.owner = 1;
+               i++;
+               len -= 16;
+       }
+       dsc[i - 1].dsc_cfg.b.eoc = 1;
+
        dma_sync_single_for_device(dd->dev, dd->dma_descript_tab,
                        PAGE_SIZE, DMA_TO_DEVICE);
-       aml_write_crypto_reg(DMA_THREAD_REG,
+       aml_write_crypto_reg(dd->thread,
                        (uintptr_t) dd->dma_descript_tab | 2);
-       aml_dma_debug(dsc, 1, __func__);
-       while (aml_read_crypto_reg(DMA_STATUS_REG) == 0)
+       aml_dma_debug(dsc, i, __func__, dd->thread, dd->status);
+       while (aml_read_crypto_reg(dd->status) == 0)
                ;
-       aml_write_crypto_reg(DMA_STATUS_REG, 0xf);
+       aml_write_crypto_reg(dd->status, 0xf);
        dma_unmap_single(dd->dev, dma_addr_key,
                        sizeof(key_iv), DMA_TO_DEVICE);
+
+       return 0;
 }
 
 static size_t aml_aes_sg_copy(struct scatterlist **sg, size_t *offset,
@@ -302,6 +299,7 @@ static void aml_aes_finish_req(struct aml_aes_dev *dd, int32_t err)
        struct ablkcipher_request *req = dd->req;
 
        dd->flags &= ~AES_FLAGS_BUSY;
+       dd->dma->dma_busy = 0;
        req->base.complete(&req->base, err);
 }
 
@@ -333,8 +331,8 @@ static int aml_aes_crypt_dma(struct aml_aes_dev *dd, struct dma_dsc *dsc,
        dma_sync_single_for_device(dd->dev, dd->dma_descript_tab,
                        PAGE_SIZE, DMA_TO_DEVICE);
 
-       aml_dma_debug(dsc, nents, __func__);
-       aml_write_crypto_reg(DMA_THREAD_REG, dd->dma_descript_tab | 2);
+       aml_dma_debug(dsc, nents, __func__, dd->thread, dd->status);
+       aml_write_crypto_reg(dd->thread, dd->dma_descript_tab | 2);
        return 0;
 }
 
@@ -405,13 +403,14 @@ static int aml_aes_write_ctrl(struct aml_aes_dev *dd)
                return err;
 
        if (dd->flags & AES_FLAGS_CBC)
-               set_aes_key_iv(dd, dd->ctx->key, dd->ctx->keylen,
+               err = set_aes_key_iv(dd, dd->ctx->key, dd->ctx->keylen,
                                dd->req->info, 0);
        else if  (dd->flags & AES_FLAGS_CTR)
-               set_aes_key_iv(dd, dd->ctx->key, dd->ctx->keylen,
+               err = set_aes_key_iv(dd, dd->ctx->key, dd->ctx->keylen,
                                dd->req->info, 1);
        else
-               set_aes_key_iv(dd, dd->ctx->key, dd->ctx->keylen, NULL, 0);
+               err = set_aes_key_iv(dd, dd->ctx->key, dd->ctx->keylen,
+                               NULL, 0);
 
        return err;
 }
@@ -425,19 +424,21 @@ static int aml_aes_handle_queue(struct aml_aes_dev *dd,
        unsigned long flags;
        int32_t err, ret = 0;
 
-       spin_lock_irqsave(&dd->lock, flags);
+       spin_lock_irqsave(&dd->dma->dma_lock, flags);
        if (req)
                ret = ablkcipher_enqueue_request(&dd->queue, req);
 
-       if (dd->flags & AES_FLAGS_BUSY) {
-               spin_unlock_irqrestore(&dd->lock, flags);
+       if (dd->flags & AES_FLAGS_BUSY || dd->dma->dma_busy) {
+               spin_unlock_irqrestore(&dd->dma->dma_lock, flags);
                return ret;
        }
        backlog = crypto_get_backlog(&dd->queue);
        async_req = crypto_dequeue_request(&dd->queue);
-       if (async_req)
+       if (async_req) {
                dd->flags |= AES_FLAGS_BUSY;
-       spin_unlock_irqrestore(&dd->lock, flags);
+               dd->dma->dma_busy = 1;
+       }
+       spin_unlock_irqrestore(&dd->dma->dma_lock, flags);
 
        if (!async_req)
                return ret;
@@ -700,7 +701,7 @@ static struct crypto_alg aes_algs[] = {
        {
                .cra_name         = "cbc(aes)",
                .cra_driver_name  = "cbc-aes-aml",
-               .cra_priority   = 300,
+               .cra_priority   = 100,
                .cra_flags      = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
                .cra_blocksize  = AES_BLOCK_SIZE,
                .cra_ctxsize    = sizeof(struct aml_aes_ctx),
@@ -721,7 +722,7 @@ static struct crypto_alg aes_algs[] = {
        {
                .cra_name        = "ctr(aes)",
                .cra_driver_name = "ctr-aes-aml",
-               .cra_priority    = 300,
+               .cra_priority    = 100,
                .cra_flags      = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
                .cra_blocksize  = AES_BLOCK_SIZE,
                .cra_ctxsize    = sizeof(struct aml_aes_ctx),
@@ -756,7 +757,7 @@ static void aml_aes_done_task(unsigned long data)
        err = aml_aes_crypt_dma_stop(dd);
 
        aml_dma_debug(dd->descriptor, dd->fast_nents ?
-                       dd->fast_nents : 1, __func__);
+                       dd->fast_nents : 1, __func__, dd->thread, dd->status);
 
        err = dd->err ? : err;
 
@@ -785,13 +786,13 @@ static void aml_aes_done_task(unsigned long data)
 static irqreturn_t aml_aes_irq(int irq, void *dev_id)
 {
        struct aml_aes_dev *aes_dd = dev_id;
-       uint8_t status = aml_read_crypto_reg(DMA_STATUS_REG);
+       uint8_t status = aml_read_crypto_reg(aes_dd->status);
 
        if (status) {
                if (status == 0x1)
                        pr_err("irq overwrite\n");
                if (AES_FLAGS_DMA & aes_dd->flags) {
-                       aml_write_crypto_reg(DMA_STATUS_REG, 0xf);
+                       aml_write_crypto_reg(aes_dd->status, 0xf);
                        tasklet_schedule(&aes_dd->done_task);
                        return IRQ_HANDLED;
                } else {
@@ -833,8 +834,6 @@ static int aml_aes_probe(struct platform_device *pdev)
 {
        struct aml_aes_dev *aes_dd;
        struct device *dev = &pdev->dev;
-       struct resource *res_irq = 0;
-       struct resource *res_base = 0;
        int err = -EPERM;
 
        aes_dd = kzalloc(sizeof(struct aml_aes_dev), GFP_KERNEL);
@@ -844,19 +843,11 @@ static int aml_aes_probe(struct platform_device *pdev)
        }
 
        aes_dd->dev = dev;
+       aes_dd->dma = dev_get_drvdata(dev->parent);
+       aes_dd->thread = aes_dd->dma->thread;
+       aes_dd->status = aes_dd->dma->status;
+       aes_dd->irq = aes_dd->dma->irq;
        platform_set_drvdata(pdev, aes_dd);
-       res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, AES_THREAD_INDEX);
-       res_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res_base) {
-               dev_err(dev, "error to get normal IORESOURCE_MEM.\n");
-               goto aes_dd_err;
-       } else {
-               if (!cryptoreg_offset) {
-                       cryptoreg_offset = ioremap(res_base->start,
-                                       resource_size(res_base));
-                       map_in_aes_dma = 1;
-               }
-       }
 
        INIT_LIST_HEAD(&aes_dd->list);
 
@@ -866,9 +857,6 @@ static int aml_aes_probe(struct platform_device *pdev)
                        (unsigned long)aes_dd);
 
        crypto_init_queue(&aes_dd->queue, AML_AES_QUEUE_LENGTH);
-
-       aes_dd->irq = res_irq->start;
-
        err = request_irq(aes_dd->irq, aml_aes_irq, IRQF_SHARED, "aml-aes",
                        aes_dd);
        if (err) {
@@ -905,11 +893,6 @@ err_aes_buff:
        free_irq(aes_dd->irq, aes_dd);
 aes_irq_err:
 
-       if (map_in_aes_dma) {
-               iounmap(cryptoreg_offset);
-               map_in_aes_dma = 0;
-       }
-
        tasklet_kill(&aes_dd->done_task);
        tasklet_kill(&aes_dd->queue_task);
        kfree(aes_dd);
@@ -936,12 +919,6 @@ static int aml_aes_remove(struct platform_device *pdev)
        tasklet_kill(&aes_dd->done_task);
        tasklet_kill(&aes_dd->queue_task);
 
-       if (map_in_aes_dma) {
-               iounmap(cryptoreg_offset);
-               map_in_aes_dma = 0;
-       }
-
-
        if (aes_dd->irq > 0)
                free_irq(aes_dd->irq, aes_dd);
 
index 1914074..5417c2a 100644 (file)
@@ -40,8 +40,7 @@
 #include <linux/amlogic/iomap.h>
 #include <linux/amlogic/cpu_version.h>
 #include "aml-crypto-dma.h"
-#if 1
-void __iomem *cryptoreg_offset;
+
 u32 swap_ulong32(u32 val)
 {
        u32 res = 0;
@@ -50,77 +49,67 @@ u32 swap_ulong32(u32 val)
                (((val >> 16) & 0xff) << 8) + ((val >> 24) & 0xff);
        return res;
 }
-void aml_write_crypto_reg(u32 addr, u32 data)
-{
-       writel(data, cryptoreg_offset + (addr << 2));
-}
-
-u32 aml_read_crypto_reg(u32 addr)
-{
-       return readl(cryptoreg_offset + (addr << 2));
-}
-#endif
+EXPORT_SYMBOL_GPL(swap_ulong32);
 
-u32 get_dma_t0_offset(void)
+void aml_write_crypto_reg(u32 addr, u32 data)
 {
-       if (cpu_after_eq(MESON_CPU_MAJOR_ID_TXLX))
-               return TXLX_DMA_T0;
+       if (cryptoreg)
+               writel(data, cryptoreg + (addr << 2));
        else
-               return GXL_DMA_T0;
+               pr_err("crypto reg mapping is not initailized\n");
 }
+EXPORT_SYMBOL_GPL(aml_write_crypto_reg);
 
-u32 get_dma_sts0_offset(void)
+u32 aml_read_crypto_reg(u32 addr)
 {
-       if (cpu_after_eq(MESON_CPU_MAJOR_ID_TXLX))
-               return TXLX_DMA_STS0;
-       else
-               return GXL_DMA_STS0;
+       if (!cryptoreg) {
+               pr_err("crypto reg mapping is not initailized\n");
+               return 0;
+       }
+       return readl(cryptoreg + (addr << 2));
 }
+EXPORT_SYMBOL_GPL(aml_read_crypto_reg);
 
-void aml_dma_debug(struct dma_dsc *dsc, u32 nents, const char *msg)
+void aml_dma_debug(struct dma_dsc *dsc, u32 nents, const char *msg,
+               u32 thread, u32 status)
 {
-#if AML_CRYPTO_DEBUG
        u32 i = 0;
-       u32 DMA_T0 = get_dma_t0_offset();
-       u32 DMA_STS0 = get_dma_sts0_offset();
 
-       pr_err("begin %s\n", msg);
-       for (i = 0; i < 1; i++)
-               pr_err("reg(%lu) = 0x%8x\n", (uintptr_t)(DMA_T0 + i),
-                               aml_read_crypto_reg(DMA_T0 + i));
-       for (i = 0; i < 1; i++)
-               pr_err("reg(%lu) = 0x%8x\n", (uintptr_t)(DMA_STS0 + i),
-                               aml_read_crypto_reg(DMA_STS0 + i));
+       pr_debug("begin %s\n", msg);
+       pr_debug("reg(%u) = 0x%8x\n", thread,
+                       aml_read_crypto_reg(thread));
+       pr_debug("reg(%u) = 0x%8x\n", status,
+                       aml_read_crypto_reg(status));
        for (i = 0; i < nents; i++) {
-               pr_err("desc (%4x) (len) = 0x%8x\n", i,
+               pr_debug("desc (%4x) (len) = 0x%8x\n", i,
                                dsc[i].dsc_cfg.b.length);
-               pr_err("desc (%4x) (irq) = 0x%8x\n", i,
+               pr_debug("desc (%4x) (irq) = 0x%8x\n", i,
                                dsc[i].dsc_cfg.b.irq);
-               pr_err("desc (%4x) (eoc) = 0x%8x\n", i,
+               pr_debug("desc (%4x) (eoc) = 0x%8x\n", i,
                                dsc[i].dsc_cfg.b.eoc);
-               pr_err("desc (%4x) (lop) = 0x%8x\n", i,
+               pr_debug("desc (%4x) (lop) = 0x%8x\n", i,
                                dsc[i].dsc_cfg.b.loop);
-               pr_err("desc (%4x) (mod) = 0x%8x\n", i,
+               pr_debug("desc (%4x) (mod) = 0x%8x\n", i,
                                dsc[i].dsc_cfg.b.mode);
-               pr_err("desc (%4x) (beg) = 0x%8x\n", i,
+               pr_debug("desc (%4x) (beg) = 0x%8x\n", i,
                                dsc[i].dsc_cfg.b.begin);
-               pr_err("desc (%4x) (end) = 0x%8x\n", i,
+               pr_debug("desc (%4x) (end) = 0x%8x\n", i,
                                dsc[i].dsc_cfg.b.end);
-               pr_err("desc (%4x) (opm) = 0x%8x\n", i,
+               pr_debug("desc (%4x) (opm) = 0x%8x\n", i,
                                dsc[i].dsc_cfg.b.op_mode);
-               pr_err("desc (%4x) (enc) = 0x%8x\n", i,
+               pr_debug("desc (%4x) (enc) = 0x%8x\n", i,
                                dsc[i].dsc_cfg.b.enc_sha_only);
-               pr_err("desc (%4x) (blk) = 0x%8x\n", i,
+               pr_debug("desc (%4x) (blk) = 0x%8x\n", i,
                                dsc[i].dsc_cfg.b.block);
-               pr_err("desc (%4x) (err) = 0x%8x\n", i,
+               pr_debug("desc (%4x) (err) = 0x%8x\n", i,
                                dsc[i].dsc_cfg.b.error);
-               pr_err("desc (%4x) (own) = 0x%8x\n", i,
+               pr_debug("desc (%4x) (own) = 0x%8x\n", i,
                                dsc[i].dsc_cfg.b.owner);
-               pr_err("desc (%4x) (src) = 0x%8x\n", i,
+               pr_debug("desc (%4x) (src) = 0x%8x\n", i,
                                dsc[i].src_addr);
-               pr_err("desc (%4x) (tgt) = 0x%8x\n", i,
+               pr_debug("desc (%4x) (tgt) = 0x%8x\n", i,
                                dsc[i].tgt_addr);
        }
-       pr_err("end %s\n", msg);
-#endif
+       pr_debug("end %s\n", msg);
 }
+EXPORT_SYMBOL_GPL(aml_dma_debug);
index 3c845a1..2b342b2 100644 (file)
@@ -19,8 +19,6 @@
 #define _AML_CRYPTO_H_
 #include <linux/io.h>
 
-#define AML_CRYPTO_DEBUG    0
-
  /* Reserved 4096 bytes and table is 12 bytes each */
 #define MAX_NUM_TABLES 341
 
@@ -90,12 +88,6 @@ enum TXLX_DMA_REG_OFFSETS {
 #define MODE_TDES_2K 0xe
 #define MODE_TDES_3K 0xf
 
-/* Thread 2, 3 are for secure threads */
-#define AES_THREAD_INDEX 0
-#define TDES_THREAD_INDEX 0
-#define SHA_THREAD_INDEX 0
-#define HMAC_THREAD_INDEX 0
-
 struct dma_dsc {
        union {
                uint32_t d32;
@@ -118,14 +110,22 @@ struct dma_dsc {
        uint32_t tgt_addr;
 };
 
-extern void __iomem *cryptoreg_offset;
-extern u32 secure_cryptoreg_offset;
+struct aml_dma_dev {
+       spinlock_t dma_lock;
+       uint32_t thread;
+       uint32_t status;
+       int     irq;
+       uint8_t dma_busy;
+};
 
 u32 swap_ulong32(u32 val);
 void aml_write_crypto_reg(u32 addr, u32 data);
 u32 aml_read_crypto_reg(u32 addr);
-void aml_dma_debug(struct dma_dsc *dsc, u32 nents, const char *msg);
+void aml_dma_debug(struct dma_dsc *dsc, u32 nents, const char *msg,
+               u32 thread, u32 status);
 
 u32 get_dma_t0_offset(void);
 u32 get_dma_sts0_offset(void);
+
+extern void __iomem *cryptoreg;
 #endif
diff --git a/drivers/amlogic/crypto/aml-dma.c b/drivers/amlogic/crypto/aml-dma.c
new file mode 100644 (file)
index 0000000..1f77fde
--- /dev/null
@@ -0,0 +1,157 @@
+/*
+ * drivers/amlogic/crypto/aml-dma.c
+ *
+ * Copyright (C) 2017 Amlogic, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/hw_random.h>
+#include <linux/platform_device.h>
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <linux/cryptohash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <linux/of_platform.h>
+#include "aml-crypto-dma.h"
+
+void __iomem *cryptoreg;
+
+struct meson_dma_data {
+       uint32_t thread;
+       uint32_t status;
+};
+
+struct meson_dma_data meson_gxl_data = {
+       .thread = GXL_DMA_T0,
+       .status = GXL_DMA_STS0,
+};
+
+struct meson_dma_data meson_txlx_data = {
+       .thread = TXLX_DMA_T0,
+       .status = TXLX_DMA_STS0,
+};
+#ifdef CONFIG_OF
+static const struct of_device_id aml_dma_dt_match[] = {
+       {       .compatible = "amlogic,aml_gxl_dma",
+               .data = &meson_gxl_data,
+       },
+       {       .compatible = "amlogic,aml_txlx_dma",
+               .data = &meson_txlx_data,
+       },
+       {},
+};
+MODULE_DEVICE_TABLE(of, aml_dma_dt_match);
+#else
+#define aml_aes_dt_match NULL
+#endif
+
+static int aml_dma_probe(struct platform_device *pdev)
+{
+       struct aml_dma_dev *dma_dd;
+       struct device *dev = &pdev->dev;
+       struct device_node *np = dev->of_node;
+       struct resource *res_base = 0;
+       struct resource *res_irq = 0;
+       const struct of_device_id *match;
+       int err = -EPERM;
+       const struct meson_dma_data *priv_data;
+
+       dma_dd = kzalloc(sizeof(struct aml_dma_dev), GFP_KERNEL);
+       if (dma_dd == NULL) {
+               err = -ENOMEM;
+               goto dma_err;
+       }
+
+       match = of_match_device(aml_dma_dt_match, &pdev->dev);
+       priv_data = match->data;
+       dma_dd->thread = priv_data->thread;
+       dma_dd->status = priv_data->status;
+       res_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res_base) {
+               dev_err(dev, "error to get normal IORESOURCE_MEM.\n");
+               goto dma_err;
+       } else {
+               cryptoreg = ioremap(res_base->start,
+                               resource_size(res_base));
+               if (!cryptoreg) {
+                       dev_err(dev, "failed to remap crypto reg\n");
+                       goto dma_err;
+               }
+       }
+
+       res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       dma_dd->irq = res_irq->start;
+       dma_dd->dma_busy = 0;
+       platform_set_drvdata(pdev, dma_dd);
+       dev_info(dev, "Aml dma\n");
+
+       err = of_platform_populate(np, NULL, NULL, dev);
+
+       if (err != 0)
+               iounmap(cryptoreg);
+
+       return err;
+
+dma_err:
+       dev_err(dev, "initialization failed.\n");
+
+       return err;
+}
+
+static int aml_dma_remove(struct platform_device *pdev)
+{
+       struct aml_dma_dev *dma_dd;
+
+       dma_dd = platform_get_drvdata(pdev);
+       if (!dma_dd)
+               return -ENODEV;
+
+       iounmap(cryptoreg);
+       kfree(dma_dd);
+
+       return 0;
+}
+
+static struct platform_driver aml_dma_driver = {
+       .probe          = aml_dma_probe,
+       .remove         = aml_dma_remove,
+       .driver         = {
+               .name   = "aml_dma",
+               .owner  = THIS_MODULE,
+               .of_match_table = aml_dma_dt_match,
+       },
+};
+
+module_platform_driver(aml_dma_driver);
+
+MODULE_DESCRIPTION("Aml crypto DMA support.");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("matthew.shyu <matthew.shyu@amlogic.com>");
index 4537671..70851e4 100644 (file)
@@ -41,7 +41,6 @@
 #include <crypto/hash.h>
 #include <crypto/internal/hash.h>
 #include <linux/amlogic/iomap.h>
-#include <linux/amlogic/cpu_version.h>
 #include "aml-crypto-dma.h"
 
 /* SHA flags */
 
 #define SHA_BUFFER_LEN         PAGE_SIZE
 
-#define DMA_THREAD_REG (get_dma_t0_offset() + SHA_THREAD_INDEX)
-#define DMA_STATUS_REG (get_dma_sts0_offset() + SHA_THREAD_INDEX)
-u8 map_in_sha_dma;
-
 struct aml_sha_dev;
 
 struct aml_sha_reqctx {
@@ -112,7 +107,9 @@ struct aml_sha_dev {
        struct device           *dev;
        int                     irq;
 
-       spinlock_t              lock;
+       struct aml_dma_dev      *dma;
+       uint32_t thread;
+       uint32_t status;
        int                     err;
        struct tasklet_struct   done_task;
 
@@ -187,10 +184,7 @@ static int aml_sha_init(struct ahash_request *req)
 
        ctx->flags = 0;
 
-#if AML_CRYPTO_DEBUG
-       dev_info(dd->dev, "init: digest size: %d\n",
-               crypto_ahash_digestsize(tfm));
-#endif
+       pr_info("init: digest size: %d\n", crypto_ahash_digestsize(tfm));
 
        switch (crypto_ahash_digestsize(tfm)) {
        case SHA1_DIGEST_SIZE:
@@ -228,10 +222,8 @@ static int aml_sha_xmit_dma(struct aml_sha_dev *dd, struct dma_dsc *dsc,
        struct aml_sha_reqctx *ctx = ahash_request_ctx(dd->req);
        size_t length = 0;
 
-#if AML_CRYPTO_DEBUG
-       dev_info(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, nents: %u, final: %d\n",
+       pr_info("xmit_dma: digcnt: 0x%llx 0x%llx, nents: %u, final: %d\n",
                ctx->digcnt[1], ctx->digcnt[0], nents, final);
-#endif
 
        mode = MODE_SHA1;
 
@@ -274,7 +266,13 @@ static int aml_sha_xmit_dma(struct aml_sha_dev *dd, struct dma_dsc *dsc,
        }
        ctx->dma_descript_tab = dma_map_single(dd->dev, ctx->descriptor,
                        PAGE_SIZE, DMA_FROM_DEVICE);
-       aml_dma_debug(dsc, nents, __func__);
+       if (dma_mapping_error(dd->dev, ctx->dma_descript_tab)) {
+               dev_err(dd->dev, "mapping descriptor failed\n");
+               dma_unmap_single(dd->dev, ctx->hash_addr,
+                               SHA256_DIGEST_SIZE, DMA_FROM_DEVICE);
+               return -EINVAL;
+       }
+       aml_dma_debug(dsc, nents, __func__, dd->thread, dd->status);
        /* should be non-zero before next lines to disable clocks later */
        for (i = 0; i < nents; i++) {
                length = dsc->dsc_cfg.b.length;
@@ -288,13 +286,11 @@ static int aml_sha_xmit_dma(struct aml_sha_dev *dd, struct dma_dsc *dsc,
 
        dd->flags |=  SHA_FLAGS_DMA_ACTIVE;
 
-#if AML_CRYPTO_DEBUG
-       dev_info(dd->dev,
-               "xmit before : digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n",
+       pr_info("xmit before : digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n",
                ctx->digcnt[1], ctx->digcnt[0], length, final);
-#endif
+
        /* Start DMA transfer */
-       aml_write_crypto_reg(DMA_THREAD_REG,
+       aml_write_crypto_reg(dd->thread,
                        (uintptr_t) ctx->dma_descript_tab | 2);
 
        return -EINPROGRESS;
@@ -341,11 +337,8 @@ static int aml_sha_update_dma_slow(struct aml_sha_dev *dd)
 
        final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
 
-#if AML_CRYPTO_DEBUG
-       dev_info(dd->dev,
-       "slow: bufcnt: %zd, digcnt: 0x%llx 0x%llx, final: %d, total: %u\n",
+       pr_info("slow: bufcnt: %zd, digcnt: 0x%llx 0x%llx, final: %d, total: %u\n",
         ctx->bufcnt, ctx->digcnt[1], ctx->digcnt[0], final, ctx->total);
-#endif
 
        if (IS_ALIGNED(ctx->bufcnt, ctx->block_size) || final) {
                count = ctx->bufcnt;
@@ -362,10 +355,8 @@ static int aml_sha_update_dma_start(struct aml_sha_dev *dd)
        struct scatterlist *sg;
        struct dma_dsc *dsc = ctx->descriptor;
 
-#if AML_CRYPTO_DEBUG
-               dev_info(dd->dev, "start: total: %u, fast_nents: %u offset: %u\n",
-                               ctx->total, ctx->fast_nents, ctx->offset);
-#endif
+       pr_info("start: total: %u, fast_nents: %u offset: %u\n",
+                       ctx->total, ctx->fast_nents, ctx->offset);
 
        if (!ctx->total)
                return 0;
@@ -393,12 +384,9 @@ static int aml_sha_update_dma_start(struct aml_sha_dev *dd)
        sg = ctx->sg;
 
        while (ctx->total && ctx->fast_nents < MAX_NUM_TABLES && sg) {
-#if AML_CRYPTO_DEBUG
-               dev_info(dd->dev,
-               "fast: dig: 0x%llx 0x%llx, bufcnt: %zd, total: %u, sglen: %u\n",
+               pr_info("fast: dig: 0x%llx 0x%llx, bufcnt: %zd, total: %u, sglen: %u\n",
                ctx->digcnt[1], ctx->digcnt[0],
                ctx->bufcnt, ctx->total, ctx->sg->length);
-#endif
 
                length = min(ctx->total, sg->length);
 
@@ -423,10 +411,8 @@ static int aml_sha_update_dma_start(struct aml_sha_dev *dd)
                sg = sg_next(sg);
                ctx->fast_nents++;
 
-#if AML_CRYPTO_DEBUG
-               dev_info(dd->dev, "fast: total: %u, offset: %u, tail: %u\n",
+               pr_info("fast: total: %u, offset: %u, tail: %u\n",
                                ctx->total, ctx->offset, tail);
-#endif
 
                if (tail)
                        break;
@@ -457,15 +443,11 @@ static int aml_sha_update_dma_stop(struct aml_sha_dev *dd)
 static int aml_sha_update_req(struct aml_sha_dev *dd)
 {
        int err;
-#if AML_CRYPTO_DEBUG
        struct ahash_request *req = dd->req;
        struct aml_sha_reqctx *ctx = ahash_request_ctx(req);
-#endif
 
-#if AML_CRYPTO_DEBUG
-       dev_info(dd->dev, "update_req: total: %u, digcnt: 0x%llx 0x%llx\n",
+       pr_info("update_req: total: %u, digcnt: 0x%llx 0x%llx\n",
                ctx->total, ctx->digcnt[1], ctx->digcnt[0]);
-#endif
 
        err = aml_sha_update_dma_start(dd);
 
@@ -478,9 +460,7 @@ static int aml_sha_final_req(struct aml_sha_dev *dd)
 
        err = aml_sha_update_dma_slow(dd);
 
-#if AML_CRYPTO_DEBUG
-       dev_info(dd->dev, "final_req: err: %d\n", err);
-#endif
+       pr_info("final_req: err: %d\n", err);
 
        return err;
 }
@@ -521,11 +501,22 @@ static int aml_sha_finish_hmac(struct ahash_request *req)
 
        ctx->hash_addr = dma_map_single(dd->dev, ctx->digest,
                        SHA256_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+       if (dma_mapping_error(dd->dev, ctx->hash_addr)) {
+               dev_err(dd->dev, "mapping hash addr failed: %d\n",
+                               SHA256_DIGEST_SIZE);
+               return -EINVAL;
+       }
 
-       key = kmalloc(tctx->keylen, GFP_KERNEL);
+       key = kmalloc(tctx->keylen, GFP_ATOMIC);
        memcpy(key, tctx->key, tctx->keylen);
        dma_key = dma_map_single(dd->dev, key,
                        tctx->keylen, DMA_TO_DEVICE);
+       if (dma_mapping_error(dd->dev, dma_key)) {
+               dev_err(dd->dev, "mapping key addr failed: %d\n", tctx->keylen);
+               dma_unmap_single(dd->dev, ctx->hash_addr,
+                               SHA256_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+               return -EINVAL;
+       }
 
        mode = MODE_SHA1;
        if (ctx->flags & SHA_FLAGS_SHA224)
@@ -566,12 +557,21 @@ static int aml_sha_finish_hmac(struct ahash_request *req)
 
        ctx->dma_descript_tab = dma_map_single(dd->dev, ctx->descriptor,
                        PAGE_SIZE, DMA_TO_DEVICE);
-       aml_dma_debug(dsc, 2, __func__);
-       aml_write_crypto_reg(DMA_THREAD_REG,
+       if (dma_mapping_error(dd->dev, ctx->dma_descript_tab)) {
+               dev_err(dd->dev, "mapping descriptor failed\n");
+               dma_unmap_single(dd->dev, dma_key,
+                               tctx->keylen, DMA_TO_DEVICE);
+               dma_unmap_single(dd->dev, ctx->hash_addr,
+                               SHA256_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+               return -EINVAL;
+       }
+
+       aml_dma_debug(dsc, 2, __func__, dd->thread, dd->status);
+       aml_write_crypto_reg(dd->thread,
                        (uintptr_t) ctx->dma_descript_tab | 2);
-       while (aml_read_crypto_reg(DMA_STATUS_REG) == 0)
+       while (aml_read_crypto_reg(dd->status) == 0)
                ;
-       aml_write_crypto_reg(DMA_STATUS_REG, 0xf);
+       aml_write_crypto_reg(dd->status, 0xf);
        dma_unmap_single(dd->dev, dma_key,
                        tctx->keylen, DMA_TO_DEVICE);
        dma_unmap_single(dd->dev, ctx->hash_addr,
@@ -587,9 +587,6 @@ static int aml_sha_finish(struct ahash_request *req)
        struct aml_sha_reqctx *ctx = ahash_request_ctx(req);
        struct aml_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
        int err = 0;
-#if AML_CRYPTO_DEBUG
-       struct aml_sha_dev *dd = ctx->dd;
-#endif
 
        if (ctx->digcnt[0] || ctx->digcnt[1]) {
                if (tctx->flags & SHA_FLAGS_HMAC)
@@ -597,10 +594,8 @@ static int aml_sha_finish(struct ahash_request *req)
                aml_sha_copy_ready_hash(req);
        }
 
-#if AML_CRYPTO_DEBUG
-       dev_info(dd->dev, "finish digcnt: 0x%llx 0x%llx, bufcnt: %zd\n",
+       pr_info("finish digcnt: 0x%llx 0x%llx, bufcnt: %zd\n",
                        ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt);
-#endif
 
        return err;
 }
@@ -618,6 +613,10 @@ static void aml_sha_finish_req(struct ahash_request *req, int err)
        }
 
        /* atomic operation is not needed here */
+       if (dd->flags & SHA_FLAGS_BUSY) {
+               dd->flags &= ~SHA_FLAGS_BUSY;
+               dd->dma->dma_busy = 0;
+       }
        dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL |
                        SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY);
 
@@ -645,57 +644,51 @@ static void aml_sha_state_restore(struct ahash_request *req)
        struct aml_sha_dev *dd = tctx->dd;
        dma_addr_t dma_ctx;
        struct dma_dsc *dsc = ctx->descriptor;
-       unsigned long flags;
+       uint32_t i = 0;
+       int32_t len = sizeof(tctx->state);
 
        if (!ctx->digcnt[0] && !ctx->digcnt[1])
                return;
 
-       if (!cpu_after_eq(MESON_CPU_MAJOR_ID_TXLX))
-               return;
-
-       spin_lock_irqsave(&dd->lock, flags);
        dma_ctx = dma_map_single(dd->dev, tctx->state,
                        sizeof(tctx->state), DMA_TO_DEVICE);
+       if (dma_mapping_error(dd->dev, dma_ctx)) {
+               dev_err(dd->dev, "mapping dma_ctx failed\n");
+               return;
+       }
 
-       if (cpu_after_eq(MESON_CPU_MAJOR_ID_AXG)) {
-               uint32_t i = 0;
-               int32_t len = sizeof(tctx->state);
-               while (len > 0) {
-                       dsc[i].src_addr = (uint32_t)dma_ctx + i * 16;
-                       dsc[i].tgt_addr = i * 16;
-                       dsc[i].dsc_cfg.d32 = 0;
-                       dsc[i].dsc_cfg.b.length = len > 16 ? 16 : len;
-                       dsc[i].dsc_cfg.b.mode = MODE_KEY;
-                       dsc[i].dsc_cfg.b.eoc = 0;
-                       dsc[i].dsc_cfg.b.owner = 1;
-                       i++;
-                       len -= 16;
-               }
-               dsc[i - 1].dsc_cfg.b.eoc = 1;
-       } else {
-               dsc->src_addr = (uint32_t)dma_ctx;
-               dsc->tgt_addr = 0;
-               dsc->dsc_cfg.d32 = 0;
-               dsc->dsc_cfg.b.length = sizeof(tctx->state);
-               dsc->dsc_cfg.b.mode = MODE_KEY;
-               dsc->dsc_cfg.b.eoc = 1;
-               dsc->dsc_cfg.b.owner = 1;
+       while (len > 0) {
+               dsc[i].src_addr = (uint32_t)dma_ctx + i * 16;
+               dsc[i].tgt_addr = i * 16;
+               dsc[i].dsc_cfg.d32 = 0;
+               dsc[i].dsc_cfg.b.length = len > 16 ? 16 : len;
+               dsc[i].dsc_cfg.b.mode = MODE_KEY;
+               dsc[i].dsc_cfg.b.eoc = 0;
+               dsc[i].dsc_cfg.b.owner = 1;
+               i++;
+               len -= 16;
        }
+       dsc[i - 1].dsc_cfg.b.eoc = 1;
 
        ctx->dma_descript_tab = dma_map_single(dd->dev, ctx->descriptor,
                        PAGE_SIZE, DMA_TO_DEVICE);
-       aml_write_crypto_reg(DMA_THREAD_REG,
+       if (dma_mapping_error(dd->dev, dma_ctx)) {
+               dev_err(dd->dev, "mapping descript tab failed\n");
+               dma_unmap_single(dd->dev, dma_ctx,
+                               sizeof(tctx->state), DMA_TO_DEVICE);
+               return;
+       }
+       aml_write_crypto_reg(dd->thread,
                        (uintptr_t) ctx->dma_descript_tab | 2);
-       aml_dma_debug(dsc, 1, __func__);
-       while (aml_read_crypto_reg(DMA_STATUS_REG) == 0)
+       aml_dma_debug(dsc, 1, __func__, dd->thread, dd->status);
+       while (aml_read_crypto_reg(dd->status) == 0)
                ;
-       aml_write_crypto_reg(DMA_STATUS_REG, 0xf);
+       aml_write_crypto_reg(dd->status, 0xf);
        dma_unmap_single(dd->dev, dma_ctx,
                        sizeof(tctx->state), DMA_TO_DEVICE);
        dma_unmap_single(dd->dev, ctx->dma_descript_tab, PAGE_SIZE,
                        DMA_FROM_DEVICE);
 
-       spin_unlock_irqrestore(&dd->lock, flags);
 }
 
 static int aml_sha_handle_queue(struct aml_sha_dev *dd,
@@ -706,21 +699,23 @@ static int aml_sha_handle_queue(struct aml_sha_dev *dd,
        unsigned long flags;
        int err = 0, ret = 0;
 
-       spin_lock_irqsave(&dd->lock, flags);
+       spin_lock_irqsave(&dd->dma->dma_lock, flags);
        if (req)
                ret = ahash_enqueue_request(&dd->queue, req);
 
-       if (SHA_FLAGS_BUSY & dd->flags) {
-               spin_unlock_irqrestore(&dd->lock, flags);
+       if (SHA_FLAGS_BUSY & dd->flags || dd->dma->dma_busy) {
+               spin_unlock_irqrestore(&dd->dma->dma_lock, flags);
                return ret;
        }
 
        backlog = crypto_get_backlog(&dd->queue);
        async_req = crypto_dequeue_request(&dd->queue);
-       if (async_req)
+       if (async_req) {
                dd->flags |= SHA_FLAGS_BUSY;
+               dd->dma->dma_busy = 1;
+       }
 
-       spin_unlock_irqrestore(&dd->lock, flags);
+       spin_unlock_irqrestore(&dd->dma->dma_lock, flags);
 
        if (!async_req)
                return ret;
@@ -732,10 +727,8 @@ static int aml_sha_handle_queue(struct aml_sha_dev *dd,
        dd->req = req;
        ctx = ahash_request_ctx(req);
 
-#if AML_CRYPTO_DEBUG
-       dev_info(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
+       pr_info("handling new req, op: %lu, nbytes: %d\n",
                                                ctx->op, req->nbytes);
-#endif
 
        err = aml_sha_hw_init(dd);
        if (err)
@@ -826,61 +819,10 @@ static int aml_sha_digest(struct ahash_request *req)
 
 static int aml_sha_import(struct ahash_request *req, const void *in)
 {
-       struct aml_sha_reqctx *ctx = ahash_request_ctx(req);
        struct aml_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
        const struct aml_sha_ctx *ictx = in;
-       struct aml_sha_dev *dd = tctx->dd;
-       dma_addr_t dma_ctx;
-       struct dma_dsc *dsc = ctx->descriptor;
-       unsigned long flags;
-
-       if (!cpu_after_eq(MESON_CPU_MAJOR_ID_TXLX))
-               return -ENOTSUPP;
 
-       spin_lock_irqsave(&dd->lock, flags);
        memcpy(tctx->state, ictx->state, sizeof(tctx->state));
-       dma_ctx = dma_map_single(dd->dev, tctx->state,
-                       sizeof(tctx->state), DMA_TO_DEVICE);
-
-       if (cpu_after_eq(MESON_CPU_MAJOR_ID_AXG)) {
-               uint32_t i = 0;
-               int32_t len = sizeof(tctx->state);
-               while (len > 0) {
-                       dsc[i].src_addr = (uint32_t)dma_ctx + i * 16;
-                       dsc[i].tgt_addr = i * 16;
-                       dsc[i].dsc_cfg.d32 = 0;
-                       dsc[i].dsc_cfg.b.length = len > 16 ? 16 : len;
-                       dsc[i].dsc_cfg.b.mode = MODE_KEY;
-                       dsc[i].dsc_cfg.b.eoc = 0;
-                       dsc[i].dsc_cfg.b.owner = 1;
-                       i++;
-                       len -= 16;
-               }
-               dsc[i - 1].dsc_cfg.b.eoc = 1;
-       } else {
-               dsc->src_addr = (uint32_t)dma_ctx;
-               dsc->tgt_addr = 0;
-               dsc->dsc_cfg.d32 = 0;
-               dsc->dsc_cfg.b.length = sizeof(tctx->state);
-               dsc->dsc_cfg.b.mode = MODE_KEY;
-               dsc->dsc_cfg.b.eoc = 1;
-               dsc->dsc_cfg.b.owner = 1;
-       }
-
-       ctx->dma_descript_tab = dma_map_single(dd->dev, ctx->descriptor,
-                       PAGE_SIZE, DMA_TO_DEVICE);
-       aml_write_crypto_reg(DMA_THREAD_REG,
-                       (uintptr_t) ctx->dma_descript_tab | 2);
-       aml_dma_debug(dsc, 1, __func__);
-       while (aml_read_crypto_reg(DMA_STATUS_REG) == 0)
-               ;
-       aml_write_crypto_reg(DMA_STATUS_REG, 0xf);
-       dma_unmap_single(dd->dev, dma_ctx,
-                       sizeof(tctx->state), DMA_TO_DEVICE);
-       dma_unmap_single(dd->dev, ctx->dma_descript_tab, PAGE_SIZE,
-                       DMA_FROM_DEVICE);
-
-       spin_unlock_irqrestore(&dd->lock, flags);
        return 0;
 }
 
@@ -888,15 +830,8 @@ static int aml_sha_export(struct ahash_request *req, void *out)
 {
        struct aml_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
        struct aml_sha_ctx *octx = out;
-       struct aml_sha_dev *dd = tctx->dd;
-       unsigned long flags;
-
-       if (!cpu_after_eq(MESON_CPU_MAJOR_ID_TXLX))
-               return -ENOTSUPP;
 
-       spin_lock_irqsave(&dd->lock, flags);
        memcpy(octx->state, tctx->state, sizeof(tctx->state));
-       spin_unlock_irqrestore(&dd->lock, flags);
        return 0;
 }
 
@@ -907,11 +842,11 @@ static int aml_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
        struct aml_sha_dev *dd = 0;
        struct aml_sha_dev *tmp = 0;
        struct dma_dsc *dsc = 0;
-       struct aml_sha_reqctx *ctx = 0;
        uint32_t bs = 0;
        uint32_t ds = 0;
        int err = 0;
        dma_addr_t dma_key = 0;
+       dma_addr_t dma_descript_tab = 0;
        uint8_t *key_raw = 0;
        uint32_t mode = MODE_SHA1;
        uint32_t map_len = 0;
@@ -927,8 +862,10 @@ static int aml_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
        } else {
                dd = tctx->dd;
        }
-       ctx = ahash_request_ctx(dd->req);
-       dsc = ctx->descriptor;
+
+       dsc = kmalloc(sizeof(struct dma_dsc) * 2, GFP_KERNEL);
+       if (!dsc)
+               return -ENOMEM;
 
        spin_unlock_bh(&aml_sha.lock);
 
@@ -961,6 +898,11 @@ static int aml_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
        map_len = keylen > bs ? keylen : bs;
        dma_key = dma_map_single(dd->dev, key_raw,
                        map_len, DMA_BIDIRECTIONAL);
+       if (dma_mapping_error(dd->dev, dma_key)) {
+               dev_err(dd->dev, "mapping dma_key failed\n");
+               return -EINVAL;
+       }
+
        if (keylen > bs) {
                dsc[0].src_addr = (uintptr_t)dma_key;
                dsc[0].tgt_addr = (uintptr_t)dma_key;
@@ -990,17 +932,24 @@ static int aml_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
        dsc[ipad].dsc_cfg.b.eoc = 1;
        dsc[ipad].dsc_cfg.b.owner = 1;
 
-       ctx->dma_descript_tab = dma_map_single(dd->dev, ctx->descriptor,
-                       PAGE_SIZE, DMA_TO_DEVICE);
-       aml_dma_debug(dsc, ipad + 1, __func__);
-       aml_write_crypto_reg(DMA_THREAD_REG,
-                       (uintptr_t) ctx->dma_descript_tab | 2);
-       while (aml_read_crypto_reg(DMA_STATUS_REG) == 0)
+       dma_descript_tab = dma_map_single(dd->dev, dsc,
+                       sizeof(struct dma_dsc) * 2, DMA_TO_DEVICE);
+       if (dma_mapping_error(dd->dev, dma_descript_tab)) {
+               dev_err(dd->dev, "mapping descript_tab failed\n");
+               dma_unmap_single(dd->dev, dma_key,
+                               map_len, DMA_BIDIRECTIONAL);
+               return -EINVAL;
+       }
+
+       aml_dma_debug(dsc, ipad + 1, __func__, dd->thread, dd->status);
+       aml_write_crypto_reg(dd->thread,
+                       (uintptr_t) dma_descript_tab | 2);
+       while (aml_read_crypto_reg(dd->status) == 0)
                ;
-       aml_write_crypto_reg(DMA_STATUS_REG, 0xf);
+       aml_write_crypto_reg(dd->status, 0xf);
        dma_unmap_single(dd->dev, dma_key,
                        map_len, DMA_BIDIRECTIONAL);
-       dma_unmap_single(dd->dev, ctx->dma_descript_tab, PAGE_SIZE,
+       dma_unmap_single(dd->dev, dma_descript_tab, PAGE_SIZE,
                        DMA_FROM_DEVICE);
        tctx->keylen = keylen;
        memcpy(tctx->key, key_raw, keylen);
@@ -1053,7 +1002,7 @@ static struct ahash_alg sha_algs[] = {
                        .base   = {
                                .cra_name         = "sha1",
                                .cra_driver_name  = "aml-sha1",
-                               .cra_priority     = 300,
+                               .cra_priority     = 100,
                                .cra_flags        = CRYPTO_ALG_ASYNC,
                                .cra_blocksize    = SHA1_BLOCK_SIZE,
                                .cra_ctxsize      = sizeof(struct aml_sha_ctx),
@@ -1078,7 +1027,7 @@ static struct ahash_alg sha_algs[] = {
                        .base   = {
                                .cra_name         = "sha256",
                                .cra_driver_name  = "aml-sha256",
-                               .cra_priority     = 300,
+                               .cra_priority     = 100,
                                .cra_flags        = CRYPTO_ALG_ASYNC,
                                .cra_blocksize    = SHA256_BLOCK_SIZE,
                                .cra_ctxsize      = sizeof(struct aml_sha_ctx),
@@ -1103,7 +1052,7 @@ static struct ahash_alg sha_algs[] = {
                        .base   = {
                                .cra_name         = "sha224",
                                .cra_driver_name  = "aml-sha224",
-                               .cra_priority     = 300,
+                               .cra_priority     = 100,
                                .cra_flags        = CRYPTO_ALG_ASYNC,
                                .cra_blocksize    = SHA224_BLOCK_SIZE,
                                .cra_ctxsize      = sizeof(struct aml_sha_ctx),
@@ -1129,7 +1078,7 @@ static struct ahash_alg sha_algs[] = {
                        .base   = {
                                .cra_name         = "hmac(sha1)",
                                .cra_driver_name  = "aml-hmac-sha1",
-                               .cra_priority     = 300,
+                               .cra_priority     = 100,
                                .cra_flags        = CRYPTO_ALG_ASYNC,
                                .cra_blocksize    = SHA1_BLOCK_SIZE,
                                .cra_ctxsize      = sizeof(struct aml_sha_ctx),
@@ -1155,7 +1104,7 @@ static struct ahash_alg sha_algs[] = {
                        .base   = {
                                .cra_name         = "hmac(sha224)",
                                .cra_driver_name  = "aml-hmac-sha224",
-                               .cra_priority     = 300,
+                               .cra_priority     = 100,
                                .cra_flags        = CRYPTO_ALG_ASYNC,
                                .cra_blocksize    = SHA224_BLOCK_SIZE,
                                .cra_ctxsize      = sizeof(struct aml_sha_ctx),
@@ -1181,7 +1130,7 @@ static struct ahash_alg sha_algs[] = {
                        .base   = {
                                .cra_name         = "hmac(sha256)",
                                .cra_driver_name  = "aml-hmac-sha256",
-                               .cra_priority     = 300,
+                               .cra_priority     = 100,
                                .cra_flags        = CRYPTO_ALG_ASYNC,
                                .cra_blocksize    = SHA256_BLOCK_SIZE,
                                .cra_ctxsize      = sizeof(struct aml_sha_ctx),
@@ -1208,7 +1157,7 @@ static void aml_sha_done_task(unsigned long data)
        dma_unmap_single(dd->dev, ctx->dma_descript_tab, PAGE_SIZE,
                        DMA_FROM_DEVICE);
        aml_dma_debug(ctx->descriptor, ctx->fast_nents ?
-                       ctx->fast_nents : 1, __func__);
+                       ctx->fast_nents : 1, __func__, dd->thread, dd->status);
 
        if (SHA_FLAGS_DMA_READY & dd->flags) {
                if (SHA_FLAGS_DMA_ACTIVE & dd->flags) {
@@ -1238,7 +1187,7 @@ finish:
 static irqreturn_t aml_sha_irq(int irq, void *dev_id)
 {
        struct aml_sha_dev *sha_dd = dev_id;
-       uint8_t status = aml_read_crypto_reg(DMA_STATUS_REG);
+       uint8_t status = aml_read_crypto_reg(sha_dd->status);
 
        if (status) {
                if (status == 0x1)
@@ -1246,7 +1195,7 @@ static irqreturn_t aml_sha_irq(int irq, void *dev_id)
                if (SHA_FLAGS_DMA_ACTIVE & sha_dd->flags) {
                        sha_dd->flags |= SHA_FLAGS_OUTPUT_READY;
                        sha_dd->flags |= SHA_FLAGS_DMA_READY;
-                       aml_write_crypto_reg(DMA_STATUS_REG, 0xf);
+                       aml_write_crypto_reg(sha_dd->status, 0xf);
                        tasklet_schedule(&sha_dd->done_task);
                        return IRQ_HANDLED;
                } else {
@@ -1286,8 +1235,6 @@ static int aml_sha_probe(struct platform_device *pdev)
 {
        struct aml_sha_dev *sha_dd;
        struct device *dev = &pdev->dev;
-       struct resource *res_irq = 0;
-       struct resource *res_base = 0;
        int err = -EPERM;
 
        sha_dd = kzalloc(sizeof(struct aml_sha_dev), GFP_KERNEL);
@@ -1297,32 +1244,19 @@ static int aml_sha_probe(struct platform_device *pdev)
        }
 
        sha_dd->dev = dev;
+       sha_dd->dma = dev_get_drvdata(dev->parent);
+       sha_dd->thread = sha_dd->dma->thread;
+       sha_dd->status = sha_dd->dma->status;
+       sha_dd->irq = sha_dd->dma->irq;
 
        platform_set_drvdata(pdev, sha_dd);
 
-
-       res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, SHA_THREAD_INDEX);
-
-       res_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res_base) {
-               dev_err(dev, "error to get normal IORESOURCE_MEM.\n");
-               goto sha_dd_err;
-       } else {
-               if (cryptoreg_offset) {
-                       cryptoreg_offset = ioremap(res_base->start,
-                                       resource_size(res_base));
-                       map_in_sha_dma = 1;
-               }
-       }
-
        INIT_LIST_HEAD(&sha_dd->list);
 
        tasklet_init(&sha_dd->done_task, aml_sha_done_task,
                                        (unsigned long)sha_dd);
 
        crypto_init_queue(&sha_dd->queue, AML_SHA_QUEUE_LENGTH);
-
-       sha_dd->irq = res_irq->start;
        err = request_irq(sha_dd->irq, aml_sha_irq, IRQF_SHARED, "aml-sha",
                                                sha_dd);
        if (err) {
@@ -1350,12 +1284,6 @@ err_algs:
        list_del(&sha_dd->list);
        spin_unlock(&aml_sha.lock);
 
-       if (map_in_sha_dma) {
-
-               iounmap(cryptoreg_offset);
-               map_in_sha_dma = 0;
-       }
-
        free_irq(sha_dd->irq, sha_dd);
 res_err:
        tasklet_kill(&sha_dd->done_task);
@@ -1382,12 +1310,6 @@ static int aml_sha_remove(struct platform_device *pdev)
 
        tasklet_kill(&sha_dd->done_task);
 
-       if (map_in_sha_dma) {
-
-               iounmap(cryptoreg_offset);
-               map_in_sha_dma = 0;
-       }
-
        if (sha_dd->irq >= 0)
                free_irq(sha_dd->irq, sha_dd);
 
index b3ada7d..87f6ce1 100644 (file)
@@ -40,7 +40,6 @@
 #include <crypto/hash.h>
 #include <crypto/internal/hash.h>
 #include <linux/amlogic/iomap.h>
-#include <linux/amlogic/cpu_version.h>
 #include "aml-crypto-dma.h"
 
 /* TDES flags */
 
 #define AML_TDES_QUEUE_LENGTH  50
 
-#define DMA_THREAD_REG (get_dma_t0_offset() + TDES_THREAD_INDEX)
-#define DMA_STATUS_REG (get_dma_sts0_offset() + TDES_THREAD_INDEX)
-u8 map_in_tdes_dma;
-
 struct aml_tdes_dev;
 
 struct aml_tdes_ctx {
@@ -84,7 +79,9 @@ struct aml_tdes_dev {
        unsigned long           flags;
        int     err;
 
-       spinlock_t              lock;
+       struct aml_dma_dev      *dma;
+       uint32_t thread;
+       uint32_t status;
        struct crypto_queue     queue;
 
        struct tasklet_struct   done_task;
@@ -123,7 +120,7 @@ static struct aml_tdes_drv aml_tdes = {
        .lock = __SPIN_LOCK_UNLOCKED(aml_tdes.lock),
 };
 
-static void set_tdes_key_iv(struct aml_tdes_dev *dd,
+static int set_tdes_key_iv(struct aml_tdes_dev *dd,
                u32 *key, u32 keylen, u32 *iv)
 {
        struct dma_dsc *dsc = dd->descriptor;
@@ -131,6 +128,7 @@ static void set_tdes_key_iv(struct aml_tdes_dev *dd,
        uint32_t *piv = key_iv + 8;
        uint32_t len = keylen;
        dma_addr_t dma_addr_key;
+       uint32_t i = 0;
 
        memset(key_iv, 0, sizeof(key_iv));
        memcpy(key_iv, key, keylen);
@@ -139,43 +137,42 @@ static void set_tdes_key_iv(struct aml_tdes_dev *dd,
                len = 48; /* full key storage */
        }
 
+       if (!len)
+               return -EPERM;
+
        dma_addr_key = dma_map_single(dd->dev, key_iv,
                        sizeof(key_iv), DMA_TO_DEVICE);
 
-       if (cpu_after_eq(MESON_CPU_MAJOR_ID_AXG)) {
-               uint32_t i = 0;
-               while (len > 0) {
-                       dsc[i].src_addr = (uint32_t)dma_addr_key + i * 16;
-                       dsc[i].tgt_addr = i * 16;
-                       dsc[i].dsc_cfg.d32 = 0;
-                       dsc[i].dsc_cfg.b.length = len > 16 ? 16 : len;
-                       dsc[i].dsc_cfg.b.mode = MODE_KEY;
-                       dsc[i].dsc_cfg.b.eoc = 0;
-                       dsc[i].dsc_cfg.b.owner = 1;
-                       i++;
-                       len -= 16;
-               }
-               dsc[i - 1].dsc_cfg.b.eoc = 1;
-       } else {
-               dsc->src_addr = (uintptr_t)dma_addr_key;
-               dsc->tgt_addr = 0;
-               dsc->dsc_cfg.d32 = 0;
-               dsc->dsc_cfg.b.length = len;
-               dsc->dsc_cfg.b.mode = MODE_KEY;
-               dsc->dsc_cfg.b.eoc = 1;
-               dsc->dsc_cfg.b.owner = 1;
+       if (dma_mapping_error(dd->dev, dma_addr_key)) {
+               dev_err(dd->dev, "error mapping dma_addr_key\n");
+               return -EINVAL;
+       }
+
+       while (len > 0) {
+               dsc[i].src_addr = (uint32_t)dma_addr_key + i * 16;
+               dsc[i].tgt_addr = i * 16;
+               dsc[i].dsc_cfg.d32 = 0;
+               dsc[i].dsc_cfg.b.length = len > 16 ? 16 : len;
+               dsc[i].dsc_cfg.b.mode = MODE_KEY;
+               dsc[i].dsc_cfg.b.eoc = 0;
+               dsc[i].dsc_cfg.b.owner = 1;
+               i++;
+               len -= 16;
        }
+       dsc[i - 1].dsc_cfg.b.eoc = 1;
 
        dma_sync_single_for_device(dd->dev, dd->dma_descript_tab,
                        PAGE_SIZE, DMA_TO_DEVICE);
-       aml_write_crypto_reg(DMA_THREAD_REG,
+       aml_write_crypto_reg(dd->thread,
                        (uintptr_t) dd->dma_descript_tab | 2);
-       aml_dma_debug(dsc, 1, __func__);
-       while (aml_read_crypto_reg(DMA_STATUS_REG) == 0)
+       aml_dma_debug(dsc, i, __func__, dd->thread, dd->status);
+       while (aml_read_crypto_reg(dd->status) == 0)
                ;
-       aml_write_crypto_reg(DMA_STATUS_REG, 0xf);
+       aml_write_crypto_reg(dd->status, 0xf);
        dma_unmap_single(dd->dev, dma_addr_key,
                        sizeof(key_iv), DMA_TO_DEVICE);
+
+       return 0;
 }
 
 
@@ -294,6 +291,7 @@ static void aml_tdes_finish_req(struct aml_tdes_dev *dd, int err)
        struct ablkcipher_request *req = dd->req;
 
        dd->flags &= ~TDES_FLAGS_BUSY;
+       dd->dma->dma_busy = 0;
        req->base.complete(&req->base, err);
 }
 
@@ -322,8 +320,8 @@ static int aml_tdes_crypt_dma(struct aml_tdes_dev *dd, struct dma_dsc *dsc,
        dma_sync_single_for_device(dd->dev, dd->dma_descript_tab,
                        PAGE_SIZE, DMA_TO_DEVICE);
 
-       aml_dma_debug(dsc, nents, __func__);
-       aml_write_crypto_reg(DMA_THREAD_REG,
+       aml_dma_debug(dsc, nents, __func__, dd->thread, dd->status);
+       aml_write_crypto_reg(dd->thread,
                        (uintptr_t) dd->dma_descript_tab | 2);
        return 0;
 }
@@ -388,10 +386,10 @@ static int aml_tdes_write_ctrl(struct aml_tdes_dev *dd)
                return err;
 
        if (dd->flags & TDES_FLAGS_CBC)
-               set_tdes_key_iv(dd, dd->ctx->key, dd->ctx->keylen,
+               err = set_tdes_key_iv(dd, dd->ctx->key, dd->ctx->keylen,
                                dd->req->info);
        else
-               set_tdes_key_iv(dd, dd->ctx->key, dd->ctx->keylen,
+               err = set_tdes_key_iv(dd, dd->ctx->key, dd->ctx->keylen,
                                NULL);
 
        return err;
@@ -406,19 +404,21 @@ static int aml_tdes_handle_queue(struct aml_tdes_dev *dd,
        unsigned long flags;
        int err, ret = 0;
 
-       spin_lock_irqsave(&dd->lock, flags);
+       spin_lock_irqsave(&dd->dma->dma_lock, flags);
        if (req)
                ret = ablkcipher_enqueue_request(&dd->queue, req);
 
-       if (dd->flags & TDES_FLAGS_BUSY) {
-               spin_unlock_irqrestore(&dd->lock, flags);
+       if (dd->flags & TDES_FLAGS_BUSY || dd->dma->dma_busy) {
+               spin_unlock_irqrestore(&dd->dma->dma_lock, flags);
                return ret;
        }
        backlog = crypto_get_backlog(&dd->queue);
        async_req = crypto_dequeue_request(&dd->queue);
-       if (async_req)
+       if (async_req) {
                dd->flags |= TDES_FLAGS_BUSY;
-       spin_unlock_irqrestore(&dd->lock, flags);
+               dd->dma->dma_busy = 1;
+       }
+       spin_unlock_irqrestore(&dd->dma->dma_lock, flags);
 
        if (!async_req)
                return ret;
@@ -662,7 +662,7 @@ static void aml_tdes_cra_exit(struct crypto_tfm *tfm)
 {
 }
 
-static struct crypto_alg tdes_algs[] = {
+static struct crypto_alg des_algs[] = {
        {
                .cra_name        = "ecb(des)",
                .cra_driver_name = "ecb-des-aml",
@@ -686,7 +686,7 @@ static struct crypto_alg tdes_algs[] = {
        {
                .cra_name        =  "cbc(des)",
                .cra_driver_name =  "cbc-des-aml",
-               .cra_priority  =  300,
+               .cra_priority  =  100,
                .cra_flags     =  CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
                .cra_blocksize =  DES_BLOCK_SIZE,
                .cra_ctxsize   =  sizeof(struct aml_tdes_ctx),
@@ -704,10 +704,13 @@ static struct crypto_alg tdes_algs[] = {
                        .decrypt        =    aml_tdes_cbc_decrypt,
                }
        },
+};
+
+static struct crypto_alg tdes_algs[] = {
        {
                .cra_name        = "ecb(des3_ede)",
                .cra_driver_name = "ecb-tdes-aml",
-               .cra_priority   = 300,
+               .cra_priority   = 100,
                .cra_flags      = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
                .cra_blocksize  = DES_BLOCK_SIZE,
                .cra_ctxsize    = sizeof(struct aml_tdes_ctx),
@@ -727,7 +730,7 @@ static struct crypto_alg tdes_algs[] = {
        {
                .cra_name        = "cbc(des3_ede)",
                .cra_driver_name = "cbc-tdes-aml",
-               .cra_priority  = 300,
+               .cra_priority  = 100,
                .cra_flags     = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
                .cra_blocksize = DES_BLOCK_SIZE,
                .cra_ctxsize   = sizeof(struct aml_tdes_ctx),
@@ -762,7 +765,7 @@ static void aml_tdes_done_task(unsigned long data)
        err = aml_tdes_crypt_dma_stop(dd);
 
        aml_dma_debug(dd->descriptor, dd->fast_nents ?
-                       dd->fast_nents : 1, __func__);
+                       dd->fast_nents : 1, __func__, dd->thread, dd->status);
        err = dd->err ? : err;
 
        if (dd->total && !err) {
@@ -790,13 +793,13 @@ static void aml_tdes_done_task(unsigned long data)
 static irqreturn_t aml_tdes_irq(int irq, void *dev_id)
 {
        struct aml_tdes_dev *tdes_dd = dev_id;
-       uint8_t status = aml_read_crypto_reg(DMA_STATUS_REG);
+       uint8_t status = aml_read_crypto_reg(tdes_dd->status);
 
        if (status) {
                if (status == 0x1)
                        pr_err("irq overwrite\n");
                if (TDES_FLAGS_DMA & tdes_dd->flags) {
-                       aml_write_crypto_reg(DMA_STATUS_REG, 0xf);
+                       aml_write_crypto_reg(tdes_dd->status, 0xf);
                        tasklet_schedule(&tdes_dd->done_task);
                        return IRQ_HANDLED;
                } else {
@@ -811,12 +814,8 @@ static void aml_tdes_unregister_algs(struct aml_tdes_dev *dd)
 {
        int i = 0;
 
-       /*
-        * AXG and beyond does not support DES
-        * and thus we start from 2
-        */
-       if (cpu_after_eq(MESON_CPU_MAJOR_ID_AXG))
-               i = 2;
+       for (; i < ARRAY_SIZE(des_algs); i++)
+               crypto_unregister_alg(&des_algs[i]);
 
        for (; i < ARRAY_SIZE(tdes_algs); i++)
                crypto_unregister_alg(&tdes_algs[i]);
@@ -824,17 +823,16 @@ static void aml_tdes_unregister_algs(struct aml_tdes_dev *dd)
 
 static int aml_tdes_register_algs(struct aml_tdes_dev *dd)
 {
-       int err = 0, i = 0, j = 0;
+       int err = 0, i = 0, j = 0, k = 0;
 
-       /*
-        * AXG and beyond does not support DES
-        * and thus we start from 2
-        */
-       if (cpu_after_eq(MESON_CPU_MAJOR_ID_AXG))
-               i = 2;
+       for (; i < ARRAY_SIZE(des_algs); i++) {
+               err = crypto_register_alg(&des_algs[i]);
+               if (err)
+                       goto err_des_algs;
+       }
 
-       for (; i < ARRAY_SIZE(tdes_algs); i++) {
-               err = crypto_register_alg(&tdes_algs[i]);
+       for (; k < ARRAY_SIZE(tdes_algs); k++) {
+               err = crypto_register_alg(&tdes_algs[k]);
                if (err)
                        goto err_tdes_algs;
        }
@@ -842,9 +840,13 @@ static int aml_tdes_register_algs(struct aml_tdes_dev *dd)
        return 0;
 
 err_tdes_algs:
-       for (j = 0; j < i; j++)
+       for (j = 0; j < k; j++)
                crypto_unregister_alg(&tdes_algs[j]);
 
+err_des_algs:
+       for (j = 0; j < i; j++)
+               crypto_unregister_alg(&des_algs[j]);
+
        return err;
 }
 
@@ -852,8 +854,6 @@ static int aml_tdes_probe(struct platform_device *pdev)
 {
        struct aml_tdes_dev *tdes_dd;
        struct device *dev = &pdev->dev;
-       struct resource *res_irq = 0;
-       struct resource *res_base = 0;
        int err = -EPERM;
 
        tdes_dd = kzalloc(sizeof(struct aml_tdes_dev), GFP_KERNEL);
@@ -863,22 +863,12 @@ static int aml_tdes_probe(struct platform_device *pdev)
        }
 
        tdes_dd->dev = dev;
+       tdes_dd->dma = dev_get_drvdata(dev->parent);
+       tdes_dd->thread = tdes_dd->dma->thread;
+       tdes_dd->status = tdes_dd->dma->status;
+       tdes_dd->irq = tdes_dd->dma->irq;
 
        platform_set_drvdata(pdev, tdes_dd);
-       res_irq = platform_get_resource(pdev, IORESOURCE_IRQ,
-                       TDES_THREAD_INDEX);
-
-       res_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res_base) {
-               dev_err(dev, "error to get normal IORESOURCE_MEM.\n");
-               goto tdes_dd_err;
-       } else {
-               if (!cryptoreg_offset) {
-                       cryptoreg_offset = ioremap(res_base->start,
-                               resource_size(res_base));
-                       map_in_tdes_dma = 1;
-               }
-       }
 
        INIT_LIST_HEAD(&tdes_dd->list);
 
@@ -888,8 +878,6 @@ static int aml_tdes_probe(struct platform_device *pdev)
                        (unsigned long)tdes_dd);
 
        crypto_init_queue(&tdes_dd->queue, AML_TDES_QUEUE_LENGTH);
-
-       tdes_dd->irq = res_irq->start;
        err = request_irq(tdes_dd->irq, aml_tdes_irq, IRQF_SHARED, "aml-tdes",
                        tdes_dd);
        if (err) {
@@ -926,11 +914,6 @@ err_tdes_buff:
        free_irq(tdes_dd->irq, tdes_dd);
 tdes_irq_err:
 
-       if (map_in_tdes_dma) {
-               iounmap(cryptoreg_offset);
-               map_in_tdes_dma = 0;
-       }
-
        tasklet_kill(&tdes_dd->done_task);
        tasklet_kill(&tdes_dd->queue_task);
        kfree(tdes_dd);
@@ -957,11 +940,6 @@ static int aml_tdes_remove(struct platform_device *pdev)
        tasklet_kill(&tdes_dd->done_task);
        tasklet_kill(&tdes_dd->queue_task);
 
-       if (map_in_tdes_dma) {
-               iounmap(cryptoreg_offset);
-               map_in_tdes_dma = 0;
-       }
-
        if (tdes_dd->irq > 0)
                free_irq(tdes_dd->irq, tdes_dd);