crypto: fix and enable aes dma on G12B [1/1]
authorMatthew Shyu <matthew.shyu@amlogic.com>
Mon, 17 Dec 2018 08:37:19 +0000 (16:37 +0800)
committerMatthew Shyu <matthew.shyu@amlogic.com>
Thu, 14 Mar 2019 02:54:20 +0000 (18:54 -0800)
PD# SWPL-4823

Problem:
1. After stack optimization, stack memory cannot be
mapped as dma buffers and thus causing crypto dma failed to
generate correct result.
2. crypto dma was not enabled on G12B

Solution:
1. Move key_iv buffer from stack to memory provided by kzalloc
2. Enable crypto dma on G12B
3. Replace module_param with debugfs
4. Replace pr_err with dev_err

Verify:
verified on G12B

Change-Id: I6de682e3d1fc141f8c6179c7d91f9b4bff165eae
Signed-off-by: Matthew Shyu <matthew.shyu@amlogic.com>
Signed-off-by: Mingyen Hung <mingyen.hung@amlogic.com>
Documentation/devicetree/bindings/crypto/aml-crypto.txt
arch/arm/boot/dts/amlogic/mesong12a.dtsi
arch/arm/boot/dts/amlogic/mesong12b.dtsi
arch/arm64/boot/dts/amlogic/mesong12a.dtsi
arch/arm64/boot/dts/amlogic/mesong12b.dtsi
drivers/amlogic/crypto/aml-aes-dma.c
drivers/amlogic/crypto/aml-crypto-dma.c
drivers/amlogic/crypto/aml-crypto-dma.h
drivers/amlogic/crypto/aml-dma.c
drivers/amlogic/crypto/aml-sha-dma.c
drivers/amlogic/crypto/aml-tdes-dma.c

index bf76d17..4d1cc94 100644 (file)
@@ -2,10 +2,12 @@
 
 These are the HW cryptographic accelerators found on Amlogic products.
 
+*For S805 series and S905 series
+
 * Advanced Encryption Standard (AES)
 
 Required properties:
-- compatible : Should be "amlogic,aes" for aes-128/192/256 or "amlogic,aes_g12a_dma" for aes-128/256
+- compatible : Should be "amlogic,aes" for aes-128/192/256
 - dev_name : Should be "aml_aes"
 - interrupts: Should contain the IRQ line for the AES.
 - resets: Should contain the clock to enable the module
@@ -25,10 +27,10 @@ aml_aes{
 
 Required properties:
 - compatible : Should be "amlogic,des,tdes".
-- dev_name : Should be "aml_aes"
+- dev_name : Should be "aml_tdes"
 - interrupts: Should contain the IRQ line for the TDES.
 - resets: Should contain the clock to enable the module
-- reg:  Should contain the base address of regs
+- reg: Should contain the base address of regs
 
 Example:
 aml_tdes{
@@ -40,29 +42,16 @@ aml_tdes{
                0x0 0xda832000 0x0 0xe4>;
 };
 
-* Secure Hash Algorithm (SHA1/SHA224/SHA256)
+********************************************************************************
 
-Required properties:
-- compatible : Should be "amlogic,sha".
-- dev_name : Should be "aml_sha"
-- interrupts: Should contain the IRQ line for the SHA.
-- resets: Should contain the clock to enable the module
-- reg:  Should contain the base address of regs
+* For S905X series and beyond
+* S905X series use gxl
+* T962X series use txlx
+* S905X2 series use g12a
 
-Example:
-aml_sha{
-       compatible = "amlogic,sha";
-       dev_name = "aml_sha";
-       interrupts = <0 36 1>;
-       resets = <&clock GCLK_IDX_BLK_MOV>;
-       reg = <0x0 0xc8832000 0x0 0x2c4
-               0x0 0xda832000 0x0 0xe4>;
-};
-
-* New DMA for GXL and beyond
 * Dma engine for crypto operations
 Required properties:
-- compatible : Should be "amlogic,aml_gxl_dma" or "amlogic,aml_txlx_dma".
+- compatible : Should be "amlogic,aml_gxl_dma" or "amlogic,aml_txlx_dma"
 - reg:  Should contain the base address of regs
 - interrupts: Should contain the IRQ line for DMA.
 
@@ -76,8 +65,9 @@ aml_dma {
 * Advanced Encryption Standard (AES)
 
 Required properties:
-- compatible : Should be "amlogic,aes".
-- dev_name : Should be "aml_aes"
+- compatible : Should be "amlogic,aes_dma" for aes-128/192/256
+                               or "amlogic,aes_g12a_dma" for aes-128/256
+- dev_name : Should be "aml_aes_dma"
 
 Example:
 aml_aes{
@@ -89,8 +79,9 @@ aml_aes{
 * Triple Data Encryption Standard (Triple DES)
 
 Required properties:
-- compatible : Should be "amlogic,des,tdes".
-- dev_name : Should be "aml_aes"
+- compatible : Should be "amlogic,des_dma,tdes_dma" for gxl
+                               or "amlogic,tdes_dma" for other series.
+- dev_name : Should be "aml_tdes_dma"
 
 Example:
 aml_tdes{
@@ -100,8 +91,8 @@ aml_tdes{
 * Secure Hash Algorithm (SHA1/SHA224/SHA256/HMAC)
 
 Required properties:
-- compatible : Should be "amlogic,sha".
-- dev_name : Should be "aml_sha"
+- compatible : Should be "amlogic,sha_dma".
+- dev_name : Should be "aml_sha_dma"
 
 Example:
 aml_sha{
index f6e6228..99493d9 100644 (file)
                        dev_name = "aml_sha_dma";
                        status = "okay";
                };
+
+               aml_tdes {
+                       compatible = "amlogic,tdes_dma";
+                       dev_name = "aml_tdes_dma";
+                       status = "okay";
+               };
        };
 
        rng {
index 40f4db2..558797f 100644 (file)
                        };
                };/* end of audiobus*/
 
+               aml_dma {
+                       compatible = "amlogic,aml_txlx_dma";
+                       reg = <0xff63e000 0x48>;
+                       interrupts = <0 180 1>;
+
+                       aml_aes {
+                               compatible = "amlogic,aes_g12a_dma";
+                               dev_name = "aml_aes_dma";
+                               status = "okay";
+                       };
+
+                       aml_sha {
+                               compatible = "amlogic,sha_dma";
+                               dev_name = "aml_sha_dma";
+                               status = "okay";
+                       };
+
+                       aml_tdes {
+                               compatible = "amlogic,tdes_dma";
+                               dev_name = "aml_tdes_dma";
+                               status = "okay";
+                       };
+               };
        }; /* end of soc*/
 
        remote:rc@0xff808040 {
index 8aea490..420dd20 100644 (file)
                        dev_name = "aml_sha_dma";
                        status = "okay";
                };
+
+               aml_tdes {
+                       compatible = "amlogic,tdes_dma";
+                       dev_name = "aml_tdes_dma";
+                       status = "okay";
+               };
        };
 
        rng {
index 60e39a4..40935e5 100644 (file)
                        };
                };/* end of audiobus*/
 
+               aml_dma {
+                       compatible = "amlogic,aml_txlx_dma";
+                       reg = <0x0 0xff63e000 0x0 0x48>;
+                       interrupts = <0 180 1>;
+
+                       aml_aes {
+                               compatible = "amlogic,aes_g12a_dma";
+                               dev_name = "aml_aes_dma";
+                               status = "okay";
+                       };
+
+                       aml_sha {
+                               compatible = "amlogic,sha_dma";
+                               dev_name = "aml_sha_dma";
+                               status = "okay";
+                       };
+
+                       aml_tdes {
+                               compatible = "amlogic,tdes_dma";
+                               dev_name = "aml_tdes_dma";
+                               status = "okay";
+                       };
+               };
        }; /* end of soc*/
 
        remote:rc@0xff808040 {
index b7ff805..6182b61 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/io.h>
 #include <linux/hw_random.h>
 #include <linux/platform_device.h>
+#include <linux/of_device.h>
 
 #include <linux/device.h>
 #include <linux/init.h>
@@ -39,7 +40,6 @@
 #include <crypto/aes.h>
 #include <crypto/hash.h>
 #include <crypto/internal/hash.h>
-#include <linux/amlogic/iomap.h>
 #include <linux/of_platform.h>
 #include <crypto/skcipher.h>
 #include "aml-crypto-dma.h"
@@ -135,14 +135,18 @@ static struct aml_aes_drv aml_aes = {
 static int set_aes_key_iv(struct aml_aes_dev *dd, u32 *key,
                uint32_t keylen, u32 *iv, uint8_t swap)
 {
+       struct device *dev = dd->dev;
        struct dma_dsc *dsc = dd->descriptor;
-       uint32_t key_iv[12];
+       uint32_t *key_iv = kzalloc(DMA_KEY_IV_BUF_SIZE, GFP_ATOMIC);
        uint32_t *piv = key_iv + 8;
        int32_t len = keylen;
        dma_addr_t dma_addr_key = 0;
        uint32_t i = 0;
 
-       memset(key_iv, 0, sizeof(key_iv));
+       if (!key_iv) {
+               dev_err(dev, "error allocating key_iv buffer\n");
+               return -EINVAL;
+       }
        memcpy(key_iv, key, keylen);
        if (iv) {
                if (swap) {
@@ -153,17 +157,16 @@ static int set_aes_key_iv(struct aml_aes_dev *dd, u32 *key,
                } else {
                        memcpy(piv, iv, 16);
                }
-               len = 48; /* full key storage */
        }
 
-       if (!len)
-               return -EPERM;
+       len = DMA_KEY_IV_BUF_SIZE; /* full key storage */
 
        dma_addr_key = dma_map_single(dd->dev, key_iv,
-                       sizeof(key_iv), DMA_TO_DEVICE);
+                       DMA_KEY_IV_BUF_SIZE, DMA_TO_DEVICE);
 
        if (dma_mapping_error(dd->dev, dma_addr_key)) {
-               dev_err(dd->dev, "error mapping dma_addr_key\n");
+               dev_err(dev, "error mapping dma_addr_key\n");
+               kfree(key_iv);
                return -EINVAL;
        }
 
@@ -189,8 +192,9 @@ static int set_aes_key_iv(struct aml_aes_dev *dd, u32 *key,
                ;
        aml_write_crypto_reg(dd->status, 0xf);
        dma_unmap_single(dd->dev, dma_addr_key,
-                       sizeof(key_iv), DMA_TO_DEVICE);
+                       DMA_KEY_IV_BUF_SIZE, DMA_TO_DEVICE);
 
+       kfree(key_iv);
        return 0;
 }
 
@@ -228,6 +232,7 @@ static size_t aml_aes_sg_copy(struct scatterlist **sg, size_t *offset,
 static size_t aml_aes_sg_dma(struct aml_aes_dev *dd, struct dma_dsc *dsc,
                uint32_t *nents, size_t total)
 {
+       struct device *dev = dd->dev;
        size_t count = 0;
        size_t process = 0;
        size_t count_total = 0;
@@ -253,14 +258,14 @@ static size_t aml_aes_sg_dma(struct aml_aes_dev *dd, struct dma_dsc *dsc,
        if (dd->in_sg != dd->out_sg) {
                err = dma_map_sg(dd->dev, dd->in_sg, *nents, DMA_TO_DEVICE);
                if (!err) {
-                       dev_err(dd->dev, "dma_map_sg() error\n");
+                       dev_err(dev, "dma_map_sg() error\n");
                        return 0;
                }
 
                err = dma_map_sg(dd->dev, dd->out_sg, *nents,
                                DMA_FROM_DEVICE);
                if (!err) {
-                       dev_err(dd->dev, "dma_map_sg() error\n");
+                       dev_err(dev, "dma_map_sg() error\n");
                        dma_unmap_sg(dd->dev, dd->in_sg, *nents,
                                        DMA_TO_DEVICE);
                        return 0;
@@ -269,7 +274,7 @@ static size_t aml_aes_sg_dma(struct aml_aes_dev *dd, struct dma_dsc *dsc,
                err = dma_map_sg(dd->dev, dd->in_sg, *nents,
                                DMA_BIDIRECTIONAL);
                if (!err) {
-                       dev_err(dd->dev, "dma_map_sg() error\n");
+                       dev_err(dev, "dma_map_sg() error\n");
                        return 0;
                }
                dma_sync_sg_for_device(dd->dev, dd->in_sg,
@@ -465,6 +470,7 @@ static int aml_aes_write_ctrl(struct aml_aes_dev *dd)
 static int aml_aes_handle_queue(struct aml_aes_dev *dd,
                struct ablkcipher_request *req)
 {
+       struct device *dev = dd->dev;
        struct crypto_async_request *async_req, *backlog;
        struct aml_aes_ctx *ctx;
        struct aml_aes_reqctx *rctx;
@@ -516,7 +522,7 @@ static int aml_aes_handle_queue(struct aml_aes_dev *dd,
                        (dd->flags & AES_FLAGS_CTR))
                        err = aml_aes_crypt_dma_start(dd);
                else {
-                       pr_err("size %zd is not multiple of %d",
+                       dev_err(dev, "size %zd is not multiple of %d",
                                        dd->total, AML_AES_DMA_THRESHOLD);
                        err = -EINVAL;
                }
@@ -532,6 +538,7 @@ static int aml_aes_handle_queue(struct aml_aes_dev *dd,
 
 static int aml_aes_crypt_dma_stop(struct aml_aes_dev *dd)
 {
+       struct device *dev = dd->dev;
        int err = -EINVAL;
        size_t count;
 
@@ -567,7 +574,8 @@ static int aml_aes_crypt_dma_stop(struct aml_aes_dev *dd)
                                        dd->dma_size, 1);
                        if (count != dd->dma_size) {
                                err = -EINVAL;
-                               pr_err("not all data converted: %zu\n", count);
+                               dev_err(dev, "not all data converted: %zu\n",
+                                               count);
                        }
                        /* install IV for CBC */
                        if (dd->flags & AES_FLAGS_CBC) {
@@ -584,6 +592,7 @@ static int aml_aes_crypt_dma_stop(struct aml_aes_dev *dd)
 
 static int aml_aes_buff_init(struct aml_aes_dev *dd)
 {
+       struct device *dev = dd->dev;
        int err = -ENOMEM;
 
        dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0);
@@ -593,7 +602,7 @@ static int aml_aes_buff_init(struct aml_aes_dev *dd)
        dd->buflen &= ~(AES_BLOCK_SIZE - 1);
 
        if (!dd->buf_in || !dd->buf_out || !dd->descriptor) {
-               dev_err(dd->dev, "unable to alloc pages.\n");
+               dev_err(dev, "unable to alloc pages.\n");
                goto err_alloc;
        }
 
@@ -601,7 +610,7 @@ static int aml_aes_buff_init(struct aml_aes_dev *dd)
        dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
                        dd->buflen, DMA_TO_DEVICE);
        if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
-               dev_err(dd->dev, "dma %zd bytes error\n", dd->buflen);
+               dev_err(dev, "dma %zd bytes error\n", dd->buflen);
                err = -EINVAL;
                goto err_map_in;
        }
@@ -609,7 +618,7 @@ static int aml_aes_buff_init(struct aml_aes_dev *dd)
        dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
                        dd->buflen, DMA_FROM_DEVICE);
        if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
-               dev_err(dd->dev, "dma %zd bytes error\n", dd->buflen);
+               dev_err(dev, "dma %zd bytes error\n", dd->buflen);
                err = -EINVAL;
                goto err_map_out;
        }
@@ -618,7 +627,7 @@ static int aml_aes_buff_init(struct aml_aes_dev *dd)
                        PAGE_SIZE, DMA_TO_DEVICE);
 
        if (dma_mapping_error(dd->dev, dd->dma_descript_tab)) {
-               dev_err(dd->dev, "dma descriptor error\n");
+               dev_err(dev, "dma descriptor error\n");
                err = -EINVAL;
                goto err_map_descriptor;
        }
@@ -638,7 +647,7 @@ err_map_in:
        free_page((uintptr_t)dd->descriptor);
 err_alloc:
        if (err)
-               pr_err("error: %d\n", err);
+               dev_err(dev, "error: %d\n", err);
        return err;
 }
 
@@ -911,7 +920,6 @@ static struct crypto_alg aes_lite_algs[] = {
                .cra_u.ablkcipher = {
                        .min_keysize    =    AES_MIN_KEY_SIZE,
                        .max_keysize    =    AES_MAX_KEY_SIZE,
-                       .ivsize         =    AES_BLOCK_SIZE,
                        .setkey         =    aml_aes_lite_setkey,
                        .encrypt        =    aml_aes_ecb_encrypt,
                        .decrypt        =    aml_aes_ecb_decrypt,
@@ -997,6 +1005,7 @@ static void aml_aes_queue_task(unsigned long data)
 static void aml_aes_done_task(unsigned long data)
 {
        struct aml_aes_dev *dd = (struct aml_aes_dev *) data;
+       struct device *dev = dd->dev;
        int err;
 
        err = aml_aes_crypt_dma_stop(dd);
@@ -1014,7 +1023,7 @@ static void aml_aes_done_task(unsigned long data)
                                dd->in_sg = sg_next(dd->in_sg);
                                dd->out_sg = sg_next(dd->out_sg);
                                if (!dd->in_sg || !dd->out_sg) {
-                                       pr_err("aml-aes: sg invalid\n");
+                                       dev_err(dev, "aml-aes: sg invalid\n");
                                        err = -EINVAL;
                                        break;
                                }
@@ -1034,11 +1043,12 @@ static void aml_aes_done_task(unsigned long data)
 static irqreturn_t aml_aes_irq(int irq, void *dev_id)
 {
        struct aml_aes_dev *aes_dd = dev_id;
+       struct device *dev = aes_dd->dev;
        uint8_t status = aml_read_crypto_reg(aes_dd->status);
 
        if (status) {
                if (status == 0x1)
-                       pr_err("irq overwrite\n");
+                       dev_err(dev, "irq overwrite\n");
                if (aes_dd->dma->dma_busy == DMA_FLAG_MAY_OCCUPY)
                        return IRQ_HANDLED;
                if ((aes_dd->flags & AES_FLAGS_DMA) &&
@@ -1079,7 +1089,7 @@ static int aml_aes_register_algs(struct aml_aes_dev *dd,
 
 err_aes_algs:
        for (j = 0; j < i; j++)
-               crypto_unregister_alg(&aes_algs[j]);
+               crypto_unregister_alg(&(aes_info->algs[j]));
 
        return err;
 }
@@ -1092,7 +1102,7 @@ static int aml_aes_probe(struct platform_device *pdev)
        int err = -EPERM;
        const struct aml_aes_info *aes_info = NULL;
 
-       aes_dd = kzalloc(sizeof(struct aml_aes_dev), GFP_KERNEL);
+       aes_dd = devm_kzalloc(dev, sizeof(struct aml_aes_dev), GFP_KERNEL);
        if (aes_dd == NULL) {
                err = -ENOMEM;
                goto aes_dd_err;
@@ -1100,9 +1110,8 @@ static int aml_aes_probe(struct platform_device *pdev)
 
        match = of_match_device(aml_aes_dt_match, &pdev->dev);
        if (!match) {
-               pr_err("%s: cannot find match dt\n", __func__);
+               dev_err(dev, "%s: cannot find match dt\n", __func__);
                err = -EINVAL;
-               kfree(aes_dd);
                goto aes_dd_err;
        }
        aes_info = match->data;
@@ -1121,8 +1130,8 @@ static int aml_aes_probe(struct platform_device *pdev)
                        (unsigned long)aes_dd);
 
        crypto_init_queue(&aes_dd->queue, AML_AES_QUEUE_LENGTH);
-       err = request_irq(aes_dd->irq, aml_aes_irq, IRQF_SHARED, "aml-aes",
-                       aes_dd);
+       err = devm_request_irq(dev, aes_dd->irq, aml_aes_irq, IRQF_SHARED,
+                       "aml-aes", aes_dd);
        if (err) {
                dev_err(dev, "unable to request aes irq.\n");
                goto aes_irq_err;
@@ -1154,13 +1163,9 @@ err_algs:
        spin_unlock(&aml_aes.lock);
        aml_aes_buff_cleanup(aes_dd);
 err_aes_buff:
-       free_irq(aes_dd->irq, aes_dd);
 aes_irq_err:
-
        tasklet_kill(&aes_dd->done_task);
        tasklet_kill(&aes_dd->queue_task);
-       kfree(aes_dd);
-       aes_dd = NULL;
 aes_dd_err:
        dev_err(dev, "initialization failed.\n");
 
@@ -1169,6 +1174,7 @@ aes_dd_err:
 
 static int aml_aes_remove(struct platform_device *pdev)
 {
+       struct device *dev = &pdev->dev;
        static struct aml_aes_dev *aes_dd;
        const struct of_device_id *match;
        const struct aml_aes_info *aes_info = NULL;
@@ -1178,7 +1184,7 @@ static int aml_aes_remove(struct platform_device *pdev)
                return -ENODEV;
        match = of_match_device(aml_aes_dt_match, &pdev->dev);
        if (!match) {
-               pr_err("%s: cannot find match dt\n", __func__);
+               dev_err(dev, "%s: cannot find match dt\n", __func__);
                return -EINVAL;
        }
        aes_info = match->data;
@@ -1191,12 +1197,6 @@ static int aml_aes_remove(struct platform_device *pdev)
        tasklet_kill(&aes_dd->done_task);
        tasklet_kill(&aes_dd->queue_task);
 
-       if (aes_dd->irq > 0)
-               free_irq(aes_dd->irq, aes_dd);
-
-       kfree(aes_dd);
-       aes_dd = NULL;
-
        return 0;
 }
 
index 0412b75..03b3b05 100644 (file)
@@ -37,8 +37,6 @@
 #include <crypto/scatterwalk.h>
 #include <crypto/algapi.h>
 #include <crypto/internal/hash.h>
-#include <linux/amlogic/iomap.h>
-#include <linux/amlogic/cpu_version.h>
 #include "aml-crypto-dma.h"
 
 u32 swap_ulong32(u32 val)
index 48e0a4a..12ab5d9 100644 (file)
@@ -19,8 +19,6 @@
 #define _AML_CRYPTO_H_
 #include <linux/io.h>
 
-/* #define CRYPTO_DEBUG */
-
  /* Reserved 4096 bytes and table is 12 bytes each */
 #define MAX_NUM_TABLES 341
 
@@ -117,6 +115,7 @@ struct dma_dsc {
 #define DMA_FLAG_AES_IN_USE    BIT(2)
 #define DMA_FLAG_SHA_IN_USE    BIT(3)
 
+#define DMA_KEY_IV_BUF_SIZE (48)
 struct aml_dma_dev {
        spinlock_t dma_lock;
        uint32_t thread;
@@ -136,17 +135,13 @@ u32 get_dma_sts0_offset(void);
 
 extern void __iomem *cryptoreg;
 
-extern int debug;
-#ifndef CRYPTO_DEBUG
-#define dbgp(level, fmt, arg...)
-#else
+extern u32 debug;
 #define dbgp(level, fmt, arg...)                 \
        do {                                            \
-               if (likely(debug >= level))                         \
+               if (likely(debug > level))                         \
                        pr_debug("%s: " fmt, __func__, ## arg);\
                else                                            \
                        pr_info("%s: " fmt, __func__, ## arg); \
        } while (0)
 
 #endif
-#endif
index 9e74d0f..4637164 100644 (file)
@@ -17,6 +17,7 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/debugfs.h>
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/clk.h>
 #include <linux/of_platform.h>
 #include "aml-crypto-dma.h"
 
-int debug = 2;
-#ifdef CRYPTO_DEBUG
-module_param(debug, int, 0644);
-#endif
+static struct dentry *aml_dma_debug_dent;
+u32 debug = 3;
 
 void __iomem *cryptoreg;
 
@@ -78,6 +77,27 @@ MODULE_DEVICE_TABLE(of, aml_dma_dt_match);
 #define aml_aes_dt_match NULL
 #endif
 
+static int aml_dma_init_dbgfs(struct device *dev)
+{
+       struct dentry *file = NULL;
+
+       if (!aml_dma_debug_dent) {
+               aml_dma_debug_dent = debugfs_create_dir("aml_dma", NULL);
+               if (!aml_dma_debug_dent) {
+                       dev_err(dev, "can not create debugfs directory\n");
+                       return -ENOMEM;
+               }
+               file = debugfs_create_u32("debug", 0644,
+                               aml_dma_debug_dent, &debug);
+               if (!file) {
+                       dev_err(dev, "can not create entry in debugfs directory\n");
+                       return -ENOMEM;
+               }
+       }
+       return 0;
+}
+
+
 static int aml_dma_probe(struct platform_device *pdev)
 {
        struct aml_dma_dev *dma_dd;
@@ -89,7 +109,7 @@ static int aml_dma_probe(struct platform_device *pdev)
        int err = -EPERM;
        const struct meson_dma_data *priv_data;
 
-       dma_dd = kzalloc(sizeof(struct aml_dma_dev), GFP_KERNEL);
+       dma_dd = devm_kzalloc(dev, sizeof(struct aml_dma_dev), GFP_KERNEL);
        if (dma_dd == NULL) {
                err = -ENOMEM;
                goto dma_err;
@@ -106,8 +126,7 @@ static int aml_dma_probe(struct platform_device *pdev)
                dev_err(dev, "error to get normal IORESOURCE_MEM.\n");
                goto dma_err;
        } else {
-               cryptoreg = ioremap(res_base->start,
-                               resource_size(res_base));
+               cryptoreg = devm_ioremap_resource(dev, res_base);
                if (!cryptoreg) {
                        dev_err(dev, "failed to remap crypto reg\n");
                        goto dma_err;
@@ -118,17 +137,21 @@ static int aml_dma_probe(struct platform_device *pdev)
        dma_dd->irq = res_irq->start;
        dma_dd->dma_busy = 0;
        platform_set_drvdata(pdev, dma_dd);
+
+       err = aml_dma_init_dbgfs(dev);
+       if (err)
+               goto dma_err;
+
        dev_info(dev, "Aml dma\n");
 
        err = of_platform_populate(np, NULL, NULL, dev);
-
        if (err != 0)
-               iounmap(cryptoreg);
+               goto dma_err;
 
        return err;
 
 dma_err:
-       kfree(dma_dd);
+       debugfs_remove_recursive(aml_dma_debug_dent);
        dev_err(dev, "initialization failed.\n");
 
        return err;
@@ -142,8 +165,7 @@ static int aml_dma_remove(struct platform_device *pdev)
        if (!dma_dd)
                return -ENODEV;
 
-       iounmap(cryptoreg);
-       kfree(dma_dd);
+       debugfs_remove_recursive(aml_dma_debug_dent);
 
        return 0;
 }
index fde19f1..3a39ef2 100644 (file)
@@ -40,7 +40,6 @@
 #include <crypto/sha.h>
 #include <crypto/hash.h>
 #include <crypto/internal/hash.h>
-#include <linux/amlogic/iomap.h>
 #include "aml-crypto-dma.h"
 
 /* SHA flags */
@@ -459,9 +458,7 @@ static int aml_sha_update_dma_stop(struct aml_sha_dev *dd)
 static int aml_sha_update_req(struct aml_sha_dev *dd, struct ahash_request *req)
 {
        int err;
-#ifdef CRYPTO_DEBUG
        struct aml_sha_reqctx *ctx = ahash_request_ctx(req);
-#endif
 
        dbgp(1, "update_req: ctx: %p, total: %u, digcnt: 0x%llx 0x%llx\n",
                ctx, ctx->total, ctx->digcnt[1], ctx->digcnt[0]);
@@ -474,9 +471,7 @@ static int aml_sha_update_req(struct aml_sha_dev *dd, struct ahash_request *req)
 static int aml_sha_final_req(struct aml_sha_dev *dd, struct ahash_request *req)
 {
        int err = 0;
-#ifdef CRYPTO_DEBUG
        struct aml_sha_reqctx *ctx = ahash_request_ctx(req);
-#endif
 
        err = aml_sha_update_dma_slow(dd, req);
 
@@ -1092,7 +1087,7 @@ static struct ahash_alg sha_algs[] = {
                        .base   = {
                                .cra_name         = "sha1",
                                .cra_driver_name  = "aml-sha1",
-                               .cra_priority     = 200,
+                               .cra_priority     = 150,
                                .cra_flags        = CRYPTO_ALG_ASYNC,
                                .cra_blocksize    = SHA1_BLOCK_SIZE,
                                .cra_ctxsize      = sizeof(struct aml_sha_ctx),
@@ -1118,7 +1113,7 @@ static struct ahash_alg sha_algs[] = {
                        .base   = {
                                .cra_name         = "sha256",
                                .cra_driver_name  = "aml-sha256",
-                               .cra_priority     = 200,
+                               .cra_priority     = 150,
                                .cra_flags        = CRYPTO_ALG_ASYNC,
                                .cra_blocksize    = SHA256_BLOCK_SIZE,
                                .cra_ctxsize      = sizeof(struct aml_sha_ctx),
@@ -1144,7 +1139,7 @@ static struct ahash_alg sha_algs[] = {
                        .base   = {
                                .cra_name         = "sha224",
                                .cra_driver_name  = "aml-sha224",
-                               .cra_priority     = 200,
+                               .cra_priority     = 150,
                                .cra_flags        = CRYPTO_ALG_ASYNC,
                                .cra_blocksize    = SHA224_BLOCK_SIZE,
                                .cra_ctxsize      = sizeof(struct aml_sha_ctx),
@@ -1171,7 +1166,7 @@ static struct ahash_alg sha_algs[] = {
                        .base   = {
                                .cra_name         = "hmac(sha1)",
                                .cra_driver_name  = "aml-hmac-sha1",
-                               .cra_priority     = 200,
+                               .cra_priority     = 150,
                                .cra_flags        = CRYPTO_ALG_ASYNC |
                                        CRYPTO_ALG_NEED_FALLBACK,
                                .cra_blocksize    = SHA1_BLOCK_SIZE,
@@ -1199,7 +1194,7 @@ static struct ahash_alg sha_algs[] = {
                        .base   = {
                                .cra_name         = "hmac(sha224)",
                                .cra_driver_name  = "aml-hmac-sha224",
-                               .cra_priority     = 200,
+                               .cra_priority     = 150,
                                .cra_flags        = CRYPTO_ALG_ASYNC |
                                        CRYPTO_ALG_NEED_FALLBACK,
                                .cra_blocksize    = SHA224_BLOCK_SIZE,
@@ -1227,7 +1222,7 @@ static struct ahash_alg sha_algs[] = {
                        .base   = {
                                .cra_name         = "hmac(sha256)",
                                .cra_driver_name  = "aml-hmac-sha256",
-                               .cra_priority     = 200,
+                               .cra_priority     = 150,
                                .cra_flags        = CRYPTO_ALG_ASYNC |
                                        CRYPTO_ALG_NEED_FALLBACK,
                                .cra_blocksize    = SHA256_BLOCK_SIZE,
@@ -1285,7 +1280,7 @@ static irqreturn_t aml_sha_irq(int irq, void *dev_id)
 
        if (status) {
                if (status == 0x1)
-                       pr_err("irq overwrite\n");
+                       dev_err(sha_dd->dev, "irq overwrite\n");
                if (sha_dd->dma->dma_busy == DMA_FLAG_MAY_OCCUPY)
                        return IRQ_HANDLED;
                if (sha_dd->flags & SHA_FLAGS_DMA_ACTIVE &&
@@ -1335,7 +1330,7 @@ static int aml_sha_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        int err = -EPERM;
 
-       sha_dd = kzalloc(sizeof(struct aml_sha_dev), GFP_KERNEL);
+       sha_dd = devm_kzalloc(dev, sizeof(struct aml_sha_dev), GFP_KERNEL);
        if (sha_dd == NULL) {
                err = -ENOMEM;
                goto sha_dd_err;
@@ -1355,8 +1350,8 @@ static int aml_sha_probe(struct platform_device *pdev)
                                        (unsigned long)sha_dd);
 
        crypto_init_queue(&sha_dd->queue, AML_SHA_QUEUE_LENGTH);
-       err = request_irq(sha_dd->irq, aml_sha_irq, IRQF_SHARED, "aml-sha",
-                                               sha_dd);
+       err = devm_request_irq(dev, sha_dd->irq, aml_sha_irq, IRQF_SHARED,
+                       "aml-sha", sha_dd);
        if (err) {
                dev_err(dev, "unable to request sha irq.\n");
                goto res_err;
@@ -1364,7 +1359,6 @@ static int aml_sha_probe(struct platform_device *pdev)
 
        aml_sha_hw_init(sha_dd);
 
-
        spin_lock(&aml_sha.lock);
        list_add_tail(&sha_dd->list, &aml_sha.dev_list);
        spin_unlock(&aml_sha.lock);
@@ -1381,12 +1375,8 @@ err_algs:
        spin_lock(&aml_sha.lock);
        list_del(&sha_dd->list);
        spin_unlock(&aml_sha.lock);
-
-       free_irq(sha_dd->irq, sha_dd);
 res_err:
        tasklet_kill(&sha_dd->done_task);
-       kfree(sha_dd);
-       sha_dd = NULL;
 sha_dd_err:
        dev_err(dev, "initialization failed.\n");
 
@@ -1408,12 +1398,6 @@ static int aml_sha_remove(struct platform_device *pdev)
 
        tasklet_kill(&sha_dd->done_task);
 
-       if (sha_dd->irq >= 0)
-               free_irq(sha_dd->irq, sha_dd);
-
-       kfree(sha_dd);
-       sha_dd = NULL;
-
        return 0;
 }
 
index 99f32ba..b2198cc 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/clk.h>
 #include <linux/io.h>
 #include <linux/hw_random.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 
 #include <linux/device.h>
@@ -39,7 +40,8 @@
 #include <crypto/des.h>
 #include <crypto/hash.h>
 #include <crypto/internal/hash.h>
-#include <linux/amlogic/iomap.h>
+//#include <linux/amlogic/iomap.h>
+#include <crypto/skcipher.h>
 #include "aml-crypto-dma.h"
 
 /* TDES flags */
@@ -54,6 +56,7 @@
 
 #define AML_TDES_QUEUE_LENGTH  50
 
+#define SUPPORT_FAST_DMA 0
 struct aml_tdes_dev;
 
 struct aml_tdes_ctx {
@@ -63,6 +66,8 @@ struct aml_tdes_ctx {
        u32     key[3*DES_KEY_SIZE / sizeof(u32)];
 
        u16     block_size;
+       struct crypto_skcipher  *fallback;
+       u16 same_key;
 };
 
 struct aml_tdes_reqctx {
@@ -115,6 +120,11 @@ struct aml_tdes_drv {
        spinlock_t              lock;
 };
 
+struct aml_tdes_info {
+       struct crypto_alg *algs;
+       uint32_t num_algs;
+};
+
 static struct aml_tdes_drv aml_tdes = {
        .dev_list = LIST_HEAD_INIT(aml_tdes.dev_list),
        .lock = __SPIN_LOCK_UNLOCKED(aml_tdes.lock),
@@ -124,42 +134,43 @@ static int set_tdes_key_iv(struct aml_tdes_dev *dd,
                u32 *key, u32 keylen, u32 *iv)
 {
        struct dma_dsc *dsc = dd->descriptor;
-       uint32_t key_iv[12];
+       struct device *dev = dd->dev;
+       uint32_t *key_iv = kzalloc(DMA_KEY_IV_BUF_SIZE, GFP_ATOMIC);
        uint32_t *piv = key_iv + 8;
        uint32_t len = keylen;
-       uint32_t processed = 0;
        dma_addr_t dma_addr_key;
        uint32_t i = 0;
 
-       memset(key_iv, 0, sizeof(key_iv));
+       if (!key_iv) {
+               dev_err(dev, "error allocating key_iv buffer\n");
+               return -EINVAL;
+       }
        memcpy(key_iv, key, keylen);
        if (iv) {
                memcpy(piv, iv, 8);
-               len = 48; /* full key storage */
        }
 
-       if (!len)
-               return -EPERM;
+       len = DMA_KEY_IV_BUF_SIZE; /* full key storage */
 
        dma_addr_key = dma_map_single(dd->dev, key_iv,
-                       sizeof(key_iv), DMA_TO_DEVICE);
+                       DMA_KEY_IV_BUF_SIZE, DMA_TO_DEVICE);
 
        if (dma_mapping_error(dd->dev, dma_addr_key)) {
-               dev_err(dd->dev, "error mapping dma_addr_key\n");
+               dev_err(dev, "error mapping dma_addr_key\n");
+               kfree(key_iv);
                return -EINVAL;
        }
 
        while (len > 0) {
-               processed = len > 16 ? 16 : len;
                dsc[i].src_addr = (uint32_t)dma_addr_key + i * 16;
                dsc[i].tgt_addr = i * 16;
                dsc[i].dsc_cfg.d32 = 0;
-               dsc[i].dsc_cfg.b.length = processed;
+               dsc[i].dsc_cfg.b.length = len > 16 ? 16 : len;
                dsc[i].dsc_cfg.b.mode = MODE_KEY;
                dsc[i].dsc_cfg.b.eoc = 0;
                dsc[i].dsc_cfg.b.owner = 1;
                i++;
-               len -= processed;
+               len -= 16;
        }
        dsc[i - 1].dsc_cfg.b.eoc = 1;
 
@@ -172,8 +183,9 @@ static int set_tdes_key_iv(struct aml_tdes_dev *dd,
                ;
        aml_write_crypto_reg(dd->status, 0xf);
        dma_unmap_single(dd->dev, dma_addr_key,
-                       sizeof(key_iv), DMA_TO_DEVICE);
+                       DMA_KEY_IV_BUF_SIZE, DMA_TO_DEVICE);
 
+       kfree(key_iv);
        return 0;
 }
 
@@ -209,11 +221,15 @@ static size_t aml_tdes_sg_copy(struct scatterlist **sg, size_t *offset,
        return off;
 }
 
+#if SUPPORT_FAST_DMA
 static size_t aml_tdes_sg_dma(struct aml_tdes_dev *dd, struct dma_dsc *dsc,
                uint32_t *nents, size_t total)
 {
+       struct device *dev = dd->dev;
        size_t count = 0;
        size_t process = 0;
+       size_t count_total = 0;
+       size_t count_sg = 0;
        uint32_t i = 0;
        int err = 0;
        struct scatterlist *in_sg = dd->in_sg;
@@ -221,44 +237,62 @@ static size_t aml_tdes_sg_dma(struct aml_tdes_dev *dd, struct dma_dsc *dsc,
        dma_addr_t addr_in, addr_out;
 
        while (total && in_sg && out_sg && (in_sg->length == out_sg->length)
+                       && IS_ALIGNED(in_sg->length, DES_BLOCK_SIZE)
                        && *nents < MAX_NUM_TABLES) {
                process = min_t(unsigned int, total, in_sg->length);
                count += process;
                *nents += 1;
+               if (process != in_sg->length)
+                       dd->out_offset = dd->in_offset = in_sg->length;
                total -= process;
                in_sg = sg_next(in_sg);
                out_sg = sg_next(out_sg);
        }
-       err = dma_map_sg(dd->dev, dd->in_sg, *nents, DMA_TO_DEVICE);
-       if (!err) {
-               dev_err(dd->dev, "dma_map_sg() error\n");
-               return 0;
-       }
+       if (dd->in_sg != dd->out_sg) {
+               err = dma_map_sg(dd->dev, dd->in_sg, *nents, DMA_TO_DEVICE);
+               if (!err) {
+                       dev_err(dev, "dma_map_sg() error\n");
+                       return 0;
+               }
 
-       err = dma_map_sg(dd->dev, dd->out_sg, *nents,
-                       DMA_FROM_DEVICE);
-       if (!err) {
-               dev_err(dd->dev, "dma_map_sg() error\n");
-               dma_unmap_sg(dd->dev, dd->in_sg, *nents,
-                               DMA_TO_DEVICE);
-               return 0;
+               err = dma_map_sg(dd->dev, dd->out_sg, *nents,
+                               DMA_FROM_DEVICE);
+               if (!err) {
+                       dev_err(dev, "dma_map_sg() error\n");
+                       dma_unmap_sg(dd->dev, dd->in_sg, *nents,
+                                       DMA_TO_DEVICE);
+                       return 0;
+               }
+       } else {
+               err = dma_map_sg(dd->dev, dd->in_sg, *nents,
+                               DMA_BIDIRECTIONAL);
+               if (!err) {
+                       dev_err(dev, "dma_map_sg() error\n");
+                       return 0;
+               }
+               dma_sync_sg_for_device(dd->dev, dd->in_sg,
+                               *nents, DMA_TO_DEVICE);
        }
 
        in_sg = dd->in_sg;
        out_sg = dd->out_sg;
+       count_total = count;
        for (i = 0; i < *nents; i++) {
+               count_sg = count_total > sg_dma_len(in_sg) ?
+                       sg_dma_len(in_sg) : count_total;
                addr_in = sg_dma_address(in_sg);
                addr_out = sg_dma_address(out_sg);
                dsc[i].src_addr = (uintptr_t)addr_in;
                dsc[i].tgt_addr = (uintptr_t)addr_out;
                dsc[i].dsc_cfg.d32 = 0;
-               dsc[i].dsc_cfg.b.length = sg_dma_len(in_sg);
+               dsc[i].dsc_cfg.b.length = count_sg;
                in_sg = sg_next(in_sg);
                out_sg = sg_next(out_sg);
+               count_total -= count_sg;
        }
        return count;
 }
-
+#endif
 static struct aml_tdes_dev *aml_tdes_find_dev(struct aml_tdes_ctx *ctx)
 {
        struct aml_tdes_dev *tdes_dd = NULL;
@@ -362,11 +396,14 @@ static int aml_tdes_crypt_dma_start(struct aml_tdes_dev *dd)
                dd->fast_nents = 0;
        }
 
+#if SUPPORT_FAST_DMA
        if (fast)  {
                count = aml_tdes_sg_dma(dd, dsc, &dd->fast_nents, dd->total);
                dd->flags |= TDES_FLAGS_FAST;
                nents = dd->fast_nents;
-       } else {
+       } else
+#endif
+       {
                /* slow dma */
                /* use cache buffers */
                count = aml_tdes_sg_copy(&dd->in_sg, &dd->in_offset,
@@ -472,6 +509,7 @@ static int aml_tdes_handle_queue(struct aml_tdes_dev *dd,
 
 static int aml_tdes_crypt_dma_stop(struct aml_tdes_dev *dd)
 {
+       struct device *dev = dd->dev;
        int err = -EINVAL;
        size_t count;
 
@@ -480,10 +518,17 @@ static int aml_tdes_crypt_dma_stop(struct aml_tdes_dev *dd)
                dma_sync_single_for_cpu(dd->dev, dd->dma_descript_tab,
                                PAGE_SIZE, DMA_FROM_DEVICE);
                if  (dd->flags & TDES_FLAGS_FAST) {
-                       dma_unmap_sg(dd->dev, dd->out_sg,
+                       if (dd->in_sg != dd->out_sg) {
+                               dma_unmap_sg(dd->dev, dd->out_sg,
                                        dd->fast_nents, DMA_FROM_DEVICE);
-                       dma_unmap_sg(dd->dev, dd->in_sg,
+                               dma_unmap_sg(dd->dev, dd->in_sg,
                                        dd->fast_nents, DMA_TO_DEVICE);
+                       } else {
+                               dma_sync_sg_for_cpu(dd->dev, dd->in_sg,
+                               dd->fast_nents, DMA_FROM_DEVICE);
+                               dma_unmap_sg(dd->dev, dd->in_sg,
+                                       dd->fast_nents, DMA_BIDIRECTIONAL);
+                       }
                } else {
                        dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out,
                                        dd->dma_size, DMA_FROM_DEVICE);
@@ -494,7 +539,8 @@ static int aml_tdes_crypt_dma_stop(struct aml_tdes_dev *dd)
                                        dd->dma_size, 1);
                        if (count != dd->dma_size) {
                                err = -EINVAL;
-                               pr_err("not all data converted: %zu\n", count);
+                               dev_err(dev, "not all data converted: %zu\n",
+                                               count);
                        }
                }
                dd->flags &= ~TDES_FLAGS_DMA;
@@ -506,6 +552,7 @@ static int aml_tdes_crypt_dma_stop(struct aml_tdes_dev *dd)
 
 static int aml_tdes_buff_init(struct aml_tdes_dev *dd)
 {
+       struct device *dev = dd->dev;
        int err = -ENOMEM;
 
        dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0);
@@ -515,7 +562,7 @@ static int aml_tdes_buff_init(struct aml_tdes_dev *dd)
        dd->buflen &= ~(DES_BLOCK_SIZE - 1);
 
        if (!dd->buf_in || !dd->buf_out || !dd->descriptor) {
-               dev_err(dd->dev, "unable to alloc pages.\n");
+               dev_err(dev, "unable to alloc pages.\n");
                goto err_alloc;
        }
 
@@ -523,7 +570,7 @@ static int aml_tdes_buff_init(struct aml_tdes_dev *dd)
        dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
                        dd->buflen, DMA_TO_DEVICE);
        if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
-               dev_err(dd->dev, "dma %zd bytes error\n", dd->buflen);
+               dev_err(dev, "dma %zd bytes error\n", dd->buflen);
                err = -EINVAL;
                goto err_map_in;
        }
@@ -531,7 +578,7 @@ static int aml_tdes_buff_init(struct aml_tdes_dev *dd)
        dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
                        dd->buflen, DMA_FROM_DEVICE);
        if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
-               dev_err(dd->dev, "dma %zd bytes error\n", dd->buflen);
+               dev_err(dev, "dma %zd bytes error\n", dd->buflen);
                err = -EINVAL;
                goto err_map_out;
        }
@@ -540,7 +587,7 @@ static int aml_tdes_buff_init(struct aml_tdes_dev *dd)
                        PAGE_SIZE, DMA_TO_DEVICE);
 
        if (dma_mapping_error(dd->dev, dd->dma_descript_tab)) {
-               dev_err(dd->dev, "dma descriptor error\n");
+               dev_err(dev, "dma descriptor error\n");
                err = -EINVAL;
                goto err_map_descriptor;
        }
@@ -561,7 +608,7 @@ err_map_in:
        free_page((uintptr_t)dd->descriptor);
 err_alloc:
        if (err)
-               pr_err("error: %d\n", err);
+               dev_err(dev, "error: %d\n", err);
        return err;
 }
 
@@ -591,6 +638,31 @@ static int aml_tdes_crypt(struct ablkcipher_request *req, unsigned long mode)
        }
        ctx->block_size = DES_BLOCK_SIZE;
 
+       if (ctx->fallback && ctx->same_key) {
+               char *__subreq_desc = kzalloc(sizeof(struct skcipher_request) +
+                               crypto_skcipher_reqsize(ctx->fallback),
+                               GFP_ATOMIC);
+               struct skcipher_request *subreq = (void *)__subreq_desc;
+               int ret = 0;
+
+               if (!subreq)
+                       return -ENOMEM;
+
+               skcipher_request_set_tfm(subreq, ctx->fallback);
+               skcipher_request_set_callback(subreq, req->base.flags, NULL,
+                                             NULL);
+               skcipher_request_set_crypt(subreq, req->src, req->dst,
+                                          req->nbytes, req->info);
+
+               if (mode & TDES_FLAGS_ENCRYPT)
+                       ret = crypto_skcipher_encrypt(subreq);
+               else
+                       ret = crypto_skcipher_decrypt(subreq);
+
+               skcipher_request_free(subreq);
+               return ret;
+       }
+
        dd = aml_tdes_find_dev(ctx);
        if (!dd)
                return -ENODEV;
@@ -667,7 +739,10 @@ static int aml_tdes_cbc_decrypt(struct ablkcipher_request *req)
 
 static int aml_tdes_cra_init(struct crypto_tfm *tfm)
 {
+       struct aml_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
+
        tfm->crt_ablkcipher.reqsize = sizeof(struct aml_tdes_reqctx);
+       ctx->fallback = NULL;
 
        return 0;
 }
@@ -676,7 +751,7 @@ static void aml_tdes_cra_exit(struct crypto_tfm *tfm)
 {
 }
 
-static struct crypto_alg des_algs[] = {
+static struct crypto_alg des_tdes_algs[] = {
        {
                .cra_name        = "ecb(des)",
                .cra_driver_name = "ecb-des-aml",
@@ -718,9 +793,6 @@ static struct crypto_alg des_algs[] = {
                        .decrypt        =    aml_tdes_cbc_decrypt,
                }
        },
-};
-
-static struct crypto_alg tdes_algs[] = {
        {
                .cra_name        = "ecb(des3_ede)",
                .cra_driver_name = "ecb-tdes-aml",
@@ -764,6 +836,114 @@ static struct crypto_alg tdes_algs[] = {
        }
 };
 
+static int aml_tdes_lite_cra_init(struct crypto_tfm *tfm)
+{
+       struct aml_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
+       const char *alg_name = crypto_tfm_alg_name(tfm);
+       const u32 flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
+
+       tfm->crt_ablkcipher.reqsize = sizeof(struct aml_tdes_reqctx);
+
+       /* Allocate a fallback and abort if it failed. */
+       ctx->fallback = crypto_alloc_skcipher(alg_name, 0,
+                                           flags);
+       if (IS_ERR(ctx->fallback)) {
+               pr_err("aml-tdes: fallback '%s' could not be loaded.\n",
+                               alg_name);
+               return PTR_ERR(ctx->fallback);
+       }
+
+       return 0;
+}
+
+static void aml_tdes_lite_cra_exit(struct crypto_tfm *tfm)
+{
+       struct aml_tdes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       if (ctx->fallback)
+               crypto_free_skcipher(ctx->fallback);
+
+       ctx->fallback = NULL;
+}
+
+static int aml_tdes_lite_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+               unsigned int keylen)
+{
+       struct aml_tdes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+       int ret = 0;
+       u64 *tmp = NULL;
+
+       if ((keylen != 2 * DES_KEY_SIZE) && (keylen != 3 * DES_KEY_SIZE)) {
+               crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+               return -EINVAL;
+       }
+
+       memcpy(ctx->key, key, keylen);
+       ctx->keylen = keylen;
+
+       tmp = (u64 *)ctx->key;
+       if (keylen == 2 * DES_KEY_SIZE)
+               ctx->same_key = !(tmp[0] ^ tmp[1]);
+       else
+               ctx->same_key = !((tmp[0] ^ tmp[1]) | (tmp[0] ^ tmp[2]));
+
+       if (ctx->same_key) {
+               crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
+               crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
+                               CRYPTO_TFM_REQ_MASK);
+               ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+       }
+
+       return ret;
+}
+
+
+static struct crypto_alg tdes_lite_algs[] = {
+       {
+               .cra_name        = "ecb(des3_ede)",
+               .cra_driver_name = "ecb-tdes-lite-aml",
+               .cra_priority   = 200,
+               .cra_flags      = CRYPTO_ALG_TYPE_ABLKCIPHER |
+                       CRYPTO_ALG_ASYNC |  CRYPTO_ALG_NEED_FALLBACK,
+               .cra_blocksize  = DES_BLOCK_SIZE,
+               .cra_ctxsize    = sizeof(struct aml_tdes_ctx),
+               .cra_alignmask  = 0,
+               .cra_type       = &crypto_ablkcipher_type,
+               .cra_module     = THIS_MODULE,
+               .cra_init       = aml_tdes_lite_cra_init,
+               .cra_exit       = aml_tdes_lite_cra_exit,
+               .cra_u.ablkcipher = {
+                       .min_keysize    =    2 * DES_KEY_SIZE,
+                       .max_keysize    =    3 * DES_KEY_SIZE,
+                       .setkey     =    aml_tdes_lite_setkey,
+                       .encrypt    =    aml_tdes_ecb_encrypt,
+                       .decrypt    =    aml_tdes_ecb_decrypt,
+               }
+       },
+       {
+               .cra_name        = "cbc(des3_ede)",
+               .cra_driver_name = "cbc-tdes-lite-aml",
+               .cra_priority  = 200,
+               .cra_flags     = CRYPTO_ALG_TYPE_ABLKCIPHER |
+                       CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+               .cra_blocksize = DES_BLOCK_SIZE,
+               .cra_ctxsize   = sizeof(struct aml_tdes_ctx),
+               .cra_alignmask = 0,
+               .cra_type      = &crypto_ablkcipher_type,
+               .cra_module    = THIS_MODULE,
+               .cra_init      = aml_tdes_lite_cra_init,
+               .cra_exit      = aml_tdes_lite_cra_exit,
+               .cra_u.ablkcipher =       {
+                       .min_keysize = 2 * DES_KEY_SIZE,
+                       .max_keysize = 3 * DES_KEY_SIZE,
+                       .ivsize      = DES_BLOCK_SIZE,
+                       .setkey      = aml_tdes_lite_setkey,
+                       .encrypt     = aml_tdes_cbc_encrypt,
+                       .decrypt     = aml_tdes_cbc_decrypt,
+               }
+       }
+};
+
 static void aml_tdes_queue_task(unsigned long data)
 {
        struct aml_tdes_dev *dd = (struct aml_tdes_dev *)data;
@@ -774,6 +954,7 @@ static void aml_tdes_queue_task(unsigned long data)
 static void aml_tdes_done_task(unsigned long data)
 {
        struct aml_tdes_dev *dd = (struct aml_tdes_dev *) data;
+       struct device *dev = dd->dev;
        int err;
 
        err = aml_tdes_crypt_dma_stop(dd);
@@ -790,7 +971,7 @@ static void aml_tdes_done_task(unsigned long data)
                                dd->in_sg = sg_next(dd->in_sg);
                                dd->out_sg = sg_next(dd->out_sg);
                                if (!dd->in_sg || !dd->out_sg) {
-                                       pr_err("aml-tdes: sg invalid\n");
+                                       dev_err(dev, "aml-tdes: sg invalid\n");
                                        err = -EINVAL;
                                        break;
                                }
@@ -810,11 +991,12 @@ static void aml_tdes_done_task(unsigned long data)
 static irqreturn_t aml_tdes_irq(int irq, void *dev_id)
 {
        struct aml_tdes_dev *tdes_dd = dev_id;
+       struct device *dev = tdes_dd->dev;
        uint8_t status = aml_read_crypto_reg(tdes_dd->status);
 
        if (status) {
                if (status == 0x1)
-                       pr_err("irq overwrite\n");
+                       dev_err(dev, "irq overwrite\n");
                if (tdes_dd->dma->dma_busy == DMA_FLAG_MAY_OCCUPY)
                        return IRQ_HANDLED;
                if ((tdes_dd->dma->dma_busy & DMA_FLAG_TDES_IN_USE) &&
@@ -831,29 +1013,22 @@ static irqreturn_t aml_tdes_irq(int irq, void *dev_id)
        return IRQ_NONE;
 }
 
-static void aml_tdes_unregister_algs(struct aml_tdes_dev *dd)
+static void aml_tdes_unregister_algs(struct aml_tdes_dev *dd,
+               const struct aml_tdes_info *tdes_info)
 {
-       int i = 0;
-
-       for (; i < ARRAY_SIZE(des_algs); i++)
-               crypto_unregister_alg(&des_algs[i]);
+       int i;
 
-       for (; i < ARRAY_SIZE(tdes_algs); i++)
-               crypto_unregister_alg(&tdes_algs[i]);
+       for (i = 0; i < tdes_info->num_algs; i++)
+               crypto_unregister_alg(&(tdes_info->algs[i]));
 }
 
-static int aml_tdes_register_algs(struct aml_tdes_dev *dd)
+static int aml_tdes_register_algs(struct aml_tdes_dev *dd,
+               const struct aml_tdes_info *tdes_info)
 {
-       int err = 0, i = 0, j = 0, k = 0;
+       int err, i, j;
 
-       for (; i < ARRAY_SIZE(des_algs); i++) {
-               err = crypto_register_alg(&des_algs[i]);
-               if (err)
-                       goto err_des_algs;
-       }
-
-       for (; k < ARRAY_SIZE(tdes_algs); k++) {
-               err = crypto_register_alg(&tdes_algs[k]);
+       for (i = 0; i < tdes_info->num_algs; i++) {
+               err = crypto_register_alg(&(tdes_info->algs[i]));
                if (err)
                        goto err_tdes_algs;
        }
@@ -861,28 +1036,58 @@ static int aml_tdes_register_algs(struct aml_tdes_dev *dd)
        return 0;
 
 err_tdes_algs:
-       for (j = 0; j < k; j++)
-               crypto_unregister_alg(&tdes_algs[j]);
-
-err_des_algs:
        for (j = 0; j < i; j++)
-               crypto_unregister_alg(&des_algs[j]);
+               crypto_unregister_alg(&(tdes_info->algs[j]));
 
        return err;
 }
 
+struct aml_tdes_info aml_des_tdes = {
+       .algs = des_tdes_algs,
+       .num_algs = ARRAY_SIZE(des_tdes_algs),
+};
+
+struct aml_tdes_info aml_tdes_lite = {
+       .algs = tdes_lite_algs,
+       .num_algs = ARRAY_SIZE(tdes_lite_algs),
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id aml_tdes_dt_match[] = {
+       {       .compatible = "amlogic,des_dma,tdes_dma",
+               .data = &aml_des_tdes,
+       },
+       {       .compatible = "amlogic,tdes_dma",
+               .data = &aml_tdes_lite,
+       },
+       {},
+};
+#else
+#define aml_tdes_dt_match NULL
+#endif
+
 static int aml_tdes_probe(struct platform_device *pdev)
 {
        struct aml_tdes_dev *tdes_dd;
        struct device *dev = &pdev->dev;
        int err = -EPERM;
+       const struct of_device_id *match;
+       const struct aml_tdes_info *tdes_info = NULL;
 
-       tdes_dd = kzalloc(sizeof(struct aml_tdes_dev), GFP_KERNEL);
+       tdes_dd = devm_kzalloc(dev, sizeof(struct aml_tdes_dev), GFP_KERNEL);
        if (tdes_dd == NULL) {
                err = -ENOMEM;
                goto tdes_dd_err;
        }
 
+       match = of_match_device(aml_tdes_dt_match, &pdev->dev);
+       if (!match) {
+               dev_err(dev, "%s: cannot find match dt\n", __func__);
+               err = -EINVAL;
+               goto tdes_dd_err;
+       }
+
+       tdes_info = match->data;
        tdes_dd->dev = dev;
        tdes_dd->dma = dev_get_drvdata(dev->parent);
        tdes_dd->thread = tdes_dd->dma->thread;
@@ -899,8 +1104,8 @@ static int aml_tdes_probe(struct platform_device *pdev)
                        (unsigned long)tdes_dd);
 
        crypto_init_queue(&tdes_dd->queue, AML_TDES_QUEUE_LENGTH);
-       err = request_irq(tdes_dd->irq, aml_tdes_irq, IRQF_SHARED, "aml-tdes",
-                       tdes_dd);
+       err = devm_request_irq(dev, tdes_dd->irq, aml_tdes_irq, IRQF_SHARED,
+                       "aml-tdes", tdes_dd);
        if (err) {
                dev_err(dev, "unable to request tdes irq.\n");
                goto tdes_irq_err;
@@ -918,7 +1123,7 @@ static int aml_tdes_probe(struct platform_device *pdev)
        list_add_tail(&tdes_dd->list, &aml_tdes.dev_list);
        spin_unlock(&aml_tdes.lock);
 
-       err = aml_tdes_register_algs(tdes_dd);
+       err = aml_tdes_register_algs(tdes_dd, tdes_info);
        if (err)
                goto err_algs;
 
@@ -932,13 +1137,9 @@ err_algs:
        spin_unlock(&aml_tdes.lock);
        aml_tdes_buff_cleanup(tdes_dd);
 err_tdes_buff:
-       free_irq(tdes_dd->irq, tdes_dd);
 tdes_irq_err:
-
        tasklet_kill(&tdes_dd->done_task);
        tasklet_kill(&tdes_dd->queue_task);
-       kfree(tdes_dd);
-       tdes_dd = NULL;
 tdes_dd_err:
        dev_err(dev, "initialization failed.\n");
 
@@ -948,38 +1149,33 @@ tdes_dd_err:
 static int aml_tdes_remove(struct platform_device *pdev)
 {
        static struct aml_tdes_dev *tdes_dd;
+       struct device *dev = &pdev->dev;
+       const struct of_device_id *match;
+       const struct aml_tdes_info *tdes_info = NULL;
 
        tdes_dd = platform_get_drvdata(pdev);
        if (!tdes_dd)
                return -ENODEV;
+
+       match = of_match_device(aml_tdes_dt_match, &pdev->dev);
+       if (!match) {
+               dev_err(dev, "%s: cannot find match dt\n", __func__);
+               return -EINVAL;
+       }
+
+       tdes_info = match->data;
        spin_lock(&aml_tdes.lock);
        list_del(&tdes_dd->list);
        spin_unlock(&aml_tdes.lock);
 
-       aml_tdes_unregister_algs(tdes_dd);
+       aml_tdes_unregister_algs(tdes_dd, tdes_info);
 
        tasklet_kill(&tdes_dd->done_task);
        tasklet_kill(&tdes_dd->queue_task);
 
-       if (tdes_dd->irq > 0)
-               free_irq(tdes_dd->irq, tdes_dd);
-
-       kfree(tdes_dd);
-       tdes_dd = NULL;
-
        return 0;
 }
 
-#ifdef CONFIG_OF
-static const struct of_device_id aml_tdes_dt_match[] = {
-       {       .compatible = "amlogic,des_dma,tdes_dma",
-       },
-       {},
-};
-#else
-#define aml_tdes_dt_match NULL
-#endif
-
 static struct platform_driver aml_tdes_driver = {
        .probe          = aml_tdes_probe,
        .remove         = aml_tdes_remove,