crypto: Use arm64 crypto for android net cts tests
authorMatthew Shyu <matthew.shyu@amlogic.com>
Fri, 29 Jun 2018 10:21:06 +0000 (18:21 +0800)
committerYixun Lan <yixun.lan@amlogic.com>
Thu, 5 Jul 2018 13:42:46 +0000 (06:42 -0700)
PD#169014:

1. fix race condition in Aml HW crypto driver
A flag is added in irq handler for distinguishing
the source of interrupt since all crypto modules
share the same dma thread

2. Enable arm64 crypto
Arm64 crypto is synchronous and thus is able to
pass net cts tests for UDP

Change-Id: I2fe4f44a0b386e1e5d234d0ba07d17ffdc3224a7
Signed-off-by: Matthew Shyu <matthew.shyu@amlogic.com>
arch/arm64/configs/meson64_defconfig
drivers/amlogic/crypto/aml-aes-dma.c
drivers/amlogic/crypto/aml-crypto-dma.h
drivers/amlogic/crypto/aml-dma.c
drivers/amlogic/crypto/aml-sha-dma.c
drivers/amlogic/crypto/aml-tdes-dma.c

index c0c0fe4..017b983 100644 (file)
@@ -595,7 +595,6 @@ CONFIG_LSM_MMAP_MIN_ADDR=0
 CONFIG_HARDENED_USERCOPY=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SMACK=y
-CONFIG_CRYPTO_CRYPTD=y
 CONFIG_CRYPTO_MICHAEL_MIC=y
 CONFIG_CRYPTO_SHA512=y
 CONFIG_CRYPTO_TWOFISH=y
@@ -605,5 +604,10 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=y
 CONFIG_ASYMMETRIC_KEY_TYPE=y
 CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
 CONFIG_X509_CERTIFICATE_PARSER=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
 CONFIG_CRC_T10DIF=y
 CONFIG_CRC7=y
index 1239089..c265790 100644 (file)
@@ -319,7 +319,7 @@ static void aml_aes_finish_req(struct aml_aes_dev *dd, int32_t err)
 
        dd->flags &= ~AES_FLAGS_BUSY;
        spin_lock_irqsave(&dd->dma->dma_lock, flags);
-       dd->dma->dma_busy = 0;
+       dd->dma->dma_busy &= ~DMA_FLAG_MAY_OCCUPY;
        spin_unlock_irqrestore(&dd->dma->dma_lock, flags);
        req->base.complete(&req->base, err);
 }
@@ -330,8 +330,7 @@ static int aml_aes_crypt_dma(struct aml_aes_dev *dd, struct dma_dsc *dsc,
 {
        uint32_t op_mode = OP_MODE_ECB;
        uint32_t i = 0;
-
-       dd->flags |= AES_FLAGS_DMA;
+       unsigned long flags;
 
        if (dd->flags & AES_FLAGS_CBC)
                op_mode = OP_MODE_CBC;
@@ -355,6 +354,11 @@ static int aml_aes_crypt_dma(struct aml_aes_dev *dd, struct dma_dsc *dsc,
        aml_dma_debug(dsc, nents, __func__, dd->thread, dd->status);
 
        /* Start DMA transfer */
+       spin_lock_irqsave(&dd->dma->dma_lock, flags);
+       dd->dma->dma_busy |= DMA_FLAG_AES_IN_USE;
+       spin_unlock_irqrestore(&dd->dma->dma_lock, flags);
+
+       dd->flags |= AES_FLAGS_DMA;
        aml_write_crypto_reg(dd->thread, dd->dma_descript_tab | 2);
        return -EINPROGRESS;
 }
@@ -431,7 +435,7 @@ static int aml_aes_write_ctrl(struct aml_aes_dev *dd)
        if (dd->flags & AES_FLAGS_CBC)
                err = set_aes_key_iv(dd, dd->ctx->key, dd->ctx->keylen,
                                dd->req->info, 0);
-       else if  (dd->flags & AES_FLAGS_CTR)
+       else if (dd->flags & AES_FLAGS_CTR)
                err = set_aes_key_iv(dd, dd->ctx->key, dd->ctx->keylen,
                                dd->req->info, 1);
        else
@@ -454,7 +458,7 @@ static int aml_aes_handle_queue(struct aml_aes_dev *dd,
        if (req)
                ret = ablkcipher_enqueue_request(&dd->queue, req);
 
-       if (dd->flags & AES_FLAGS_BUSY || dd->dma->dma_busy) {
+       if ((dd->flags & AES_FLAGS_BUSY) || dd->dma->dma_busy) {
                spin_unlock_irqrestore(&dd->dma->dma_lock, flags);
                return ret;
        }
@@ -462,7 +466,7 @@ static int aml_aes_handle_queue(struct aml_aes_dev *dd,
        async_req = crypto_dequeue_request(&dd->queue);
        if (async_req) {
                dd->flags |= AES_FLAGS_BUSY;
-               dd->dma->dma_busy = 1;
+               dd->dma->dma_busy |= DMA_FLAG_MAY_OCCUPY;
        }
        spin_unlock_irqrestore(&dd->dma->dma_lock, flags);
 
@@ -1011,8 +1015,12 @@ static irqreturn_t aml_aes_irq(int irq, void *dev_id)
        if (status) {
                if (status == 0x1)
                        pr_err("irq overwrite\n");
-               if (AES_FLAGS_DMA & aes_dd->flags) {
+               if (aes_dd->dma->dma_busy == DMA_FLAG_MAY_OCCUPY)
+                       return IRQ_HANDLED;
+               if ((aes_dd->flags & AES_FLAGS_DMA) &&
+                               (aes_dd->dma->dma_busy & DMA_FLAG_AES_IN_USE)) {
                        aml_write_crypto_reg(aes_dd->status, 0xf);
+                       aes_dd->dma->dma_busy &= ~DMA_FLAG_AES_IN_USE;
                        tasklet_schedule(&aes_dd->done_task);
                        return IRQ_HANDLED;
                } else {
index d080e66..48e0a4a 100644 (file)
@@ -19,6 +19,8 @@
 #define _AML_CRYPTO_H_
 #include <linux/io.h>
 
+/* #define CRYPTO_DEBUG */
+
  /* Reserved 4096 bytes and table is 12 bytes each */
 #define MAX_NUM_TABLES 341
 
@@ -110,6 +112,11 @@ struct dma_dsc {
        uint32_t tgt_addr;
 };
 
+#define DMA_FLAG_MAY_OCCUPY    BIT(0)
+#define DMA_FLAG_TDES_IN_USE   BIT(1)
+#define DMA_FLAG_AES_IN_USE    BIT(2)
+#define DMA_FLAG_SHA_IN_USE    BIT(3)
+
 struct aml_dma_dev {
        spinlock_t dma_lock;
        uint32_t thread;
@@ -130,12 +137,16 @@ u32 get_dma_sts0_offset(void);
 extern void __iomem *cryptoreg;
 
 extern int debug;
+#ifndef CRYPTO_DEBUG
+#define dbgp(level, fmt, arg...)
+#else
 #define dbgp(level, fmt, arg...)                 \
        do {                                            \
-               if (debug >= level)                         \
+               if (likely(debug >= level))                         \
                        pr_debug("%s: " fmt, __func__, ## arg);\
                else                                            \
                        pr_info("%s: " fmt, __func__, ## arg); \
        } while (0)
 
 #endif
+#endif
index f50417b..12f5b3e 100644 (file)
@@ -43,7 +43,9 @@
 #include "aml-crypto-dma.h"
 
 int debug = 2;
-/*module_param(debug, int, 0644);*/
+#ifdef CRYPTO_DEBUG
+module_param(debug, int, 0644);
+#endif
 
 void __iomem *cryptoreg;
 
index ba06337..fde19f1 100644 (file)
@@ -229,6 +229,7 @@ static int aml_sha_xmit_dma(struct aml_sha_dev *dd,
        struct aml_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
        struct aml_sha_reqctx *ctx = ahash_request_ctx(req);
        size_t length = 0;
+       unsigned long flags;
 
        dbgp(1, "xmit_dma:ctx:%p,digcnt:0x%llx 0x%llx,nents: %u,final:%d\n",
                ctx, ctx->digcnt[1], ctx->digcnt[0], nents, final);
@@ -286,6 +287,10 @@ static int aml_sha_xmit_dma(struct aml_sha_dev *dd,
                ctx, ctx->digcnt[1], ctx->digcnt[0], length, final);
 
        /* Start DMA transfer */
+       spin_lock_irqsave(&dd->dma->dma_lock, flags);
+       dd->dma->dma_busy |= DMA_FLAG_SHA_IN_USE;
+       spin_unlock_irqrestore(&dd->dma->dma_lock, flags);
+
        dd->flags |=  SHA_FLAGS_DMA_ACTIVE;
        aml_write_crypto_reg(dd->thread,
                        (uintptr_t) ctx->dma_descript_tab | 2);
@@ -454,7 +459,9 @@ static int aml_sha_update_dma_stop(struct aml_sha_dev *dd)
 static int aml_sha_update_req(struct aml_sha_dev *dd, struct ahash_request *req)
 {
        int err;
+#ifdef CRYPTO_DEBUG
        struct aml_sha_reqctx *ctx = ahash_request_ctx(req);
+#endif
 
        dbgp(1, "update_req: ctx: %p, total: %u, digcnt: 0x%llx 0x%llx\n",
                ctx, ctx->total, ctx->digcnt[1], ctx->digcnt[0]);
@@ -467,7 +474,9 @@ static int aml_sha_update_req(struct aml_sha_dev *dd, struct ahash_request *req)
 static int aml_sha_final_req(struct aml_sha_dev *dd, struct ahash_request *req)
 {
        int err = 0;
+#ifdef CRYPTO_DEBUG
        struct aml_sha_reqctx *ctx = ahash_request_ctx(req);
+#endif
 
        err = aml_sha_update_dma_slow(dd, req);
 
@@ -624,7 +633,7 @@ static void aml_sha_finish_req(struct ahash_request *req, int err)
        }
 
        spin_lock_irqsave(&dd->dma->dma_lock, flags);
-       dd->dma->dma_busy = 0;
+       dd->dma->dma_busy &= ~DMA_FLAG_MAY_OCCUPY;
        dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_OUTPUT_READY);
        spin_unlock_irqrestore(&dd->dma->dma_lock, flags);
 
@@ -806,7 +815,7 @@ static int aml_sha_handle_queue(struct aml_sha_dev *dd,
        if (req)
                ret = ahash_enqueue_request(&dd->queue, req);
 
-       if (SHA_FLAGS_BUSY & dd->flags || dd->dma->dma_busy) {
+       if ((dd->flags & SHA_FLAGS_BUSY) || dd->dma->dma_busy) {
                spin_unlock_irqrestore(&dd->dma->dma_lock, flags);
                return ret;
        }
@@ -815,7 +824,7 @@ static int aml_sha_handle_queue(struct aml_sha_dev *dd,
        async_req = crypto_dequeue_request(&dd->queue);
        if (async_req) {
                dd->flags |= SHA_FLAGS_BUSY;
-               dd->dma->dma_busy = 1;
+               dd->dma->dma_busy |= DMA_FLAG_MAY_OCCUPY;
        }
        spin_unlock_irqrestore(&dd->dma->dma_lock, flags);
 
@@ -1277,15 +1286,20 @@ static irqreturn_t aml_sha_irq(int irq, void *dev_id)
        if (status) {
                if (status == 0x1)
                        pr_err("irq overwrite\n");
-               if (SHA_FLAGS_DMA_ACTIVE & sha_dd->flags) {
+               if (sha_dd->dma->dma_busy == DMA_FLAG_MAY_OCCUPY)
+                       return IRQ_HANDLED;
+               if (sha_dd->flags & SHA_FLAGS_DMA_ACTIVE &&
+                               (sha_dd->dma->dma_busy & DMA_FLAG_SHA_IN_USE)) {
                        sha_dd->flags |= SHA_FLAGS_OUTPUT_READY;
                        aml_write_crypto_reg(sha_dd->status, 0xf);
+                       sha_dd->dma->dma_busy &= ~DMA_FLAG_SHA_IN_USE;
                        tasklet_schedule(&sha_dd->done_task);
                        return IRQ_HANDLED;
                } else {
                        return IRQ_NONE;
                }
        }
+
        return IRQ_NONE;
 }
 
index 8927412..99f32ba 100644 (file)
@@ -213,6 +213,7 @@ static size_t aml_tdes_sg_dma(struct aml_tdes_dev *dd, struct dma_dsc *dsc,
                uint32_t *nents, size_t total)
 {
        size_t count = 0;
+       size_t process = 0;
        uint32_t i = 0;
        int err = 0;
        struct scatterlist *in_sg = dd->in_sg;
@@ -221,9 +222,10 @@ static size_t aml_tdes_sg_dma(struct aml_tdes_dev *dd, struct dma_dsc *dsc,
 
        while (total && in_sg && out_sg && (in_sg->length == out_sg->length)
                        && *nents < MAX_NUM_TABLES) {
-               count += min_t(unsigned int, total, in_sg->length);
+               process = min_t(unsigned int, total, in_sg->length);
+               count += process;
                *nents += 1;
-               total -= count;
+               total -= process;
                in_sg = sg_next(in_sg);
                out_sg = sg_next(out_sg);
        }
@@ -291,9 +293,12 @@ static int aml_tdes_hw_init(struct aml_tdes_dev *dd)
 static void aml_tdes_finish_req(struct aml_tdes_dev *dd, int err)
 {
        struct ablkcipher_request *req = dd->req;
+       unsigned long flags;
 
+       spin_lock_irqsave(&dd->dma->dma_lock, flags);
        dd->flags &= ~TDES_FLAGS_BUSY;
-       dd->dma->dma_busy = 0;
+       dd->dma->dma_busy &= ~DMA_FLAG_MAY_OCCUPY;
+       spin_unlock_irqrestore(&dd->dma->dma_lock, flags);
        req->base.complete(&req->base, err);
 }
 
@@ -302,8 +307,8 @@ static int aml_tdes_crypt_dma(struct aml_tdes_dev *dd, struct dma_dsc *dsc,
 {
        uint32_t op_mode = OP_MODE_ECB;
        uint32_t i = 0;
+       unsigned long flags;
 
-       dd->flags |= TDES_FLAGS_DMA;
 
        if (dd->flags & TDES_FLAGS_CBC)
                op_mode = OP_MODE_CBC;
@@ -325,6 +330,11 @@ static int aml_tdes_crypt_dma(struct aml_tdes_dev *dd, struct dma_dsc *dsc,
        aml_dma_debug(dsc, nents, __func__, dd->thread, dd->status);
 
        /* Start DMA transfer */
+       spin_lock_irqsave(&dd->dma->dma_lock, flags);
+       dd->dma->dma_busy |= DMA_FLAG_TDES_IN_USE;
+       spin_unlock_irqrestore(&dd->dma->dma_lock, flags);
+
+       dd->flags |= TDES_FLAGS_DMA;
        aml_write_crypto_reg(dd->thread,
                        (uintptr_t) dd->dma_descript_tab | 2);
        return -EINPROGRESS;
@@ -412,7 +422,7 @@ static int aml_tdes_handle_queue(struct aml_tdes_dev *dd,
        if (req)
                ret = ablkcipher_enqueue_request(&dd->queue, req);
 
-       if (dd->flags & TDES_FLAGS_BUSY || dd->dma->dma_busy) {
+       if ((dd->flags & TDES_FLAGS_BUSY) || dd->dma->dma_busy) {
                spin_unlock_irqrestore(&dd->dma->dma_lock, flags);
                return ret;
        }
@@ -420,7 +430,7 @@ static int aml_tdes_handle_queue(struct aml_tdes_dev *dd,
        async_req = crypto_dequeue_request(&dd->queue);
        if (async_req) {
                dd->flags |= TDES_FLAGS_BUSY;
-               dd->dma->dma_busy = 1;
+               dd->dma->dma_busy |= DMA_FLAG_MAY_OCCUPY;
        }
        spin_unlock_irqrestore(&dd->dma->dma_lock, flags);
 
@@ -670,7 +680,7 @@ static struct crypto_alg des_algs[] = {
        {
                .cra_name        = "ecb(des)",
                .cra_driver_name = "ecb-des-aml",
-               .cra_priority  =  100,
+               .cra_priority  =  200,
                .cra_flags     =  CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
                .cra_blocksize =  DES_BLOCK_SIZE,
                .cra_ctxsize   =  sizeof(struct aml_tdes_ctx),
@@ -690,7 +700,7 @@ static struct crypto_alg des_algs[] = {
        {
                .cra_name        =  "cbc(des)",
                .cra_driver_name =  "cbc-des-aml",
-               .cra_priority  =  100,
+               .cra_priority  =  200,
                .cra_flags     =  CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
                .cra_blocksize =  DES_BLOCK_SIZE,
                .cra_ctxsize   =  sizeof(struct aml_tdes_ctx),
@@ -714,7 +724,7 @@ static struct crypto_alg tdes_algs[] = {
        {
                .cra_name        = "ecb(des3_ede)",
                .cra_driver_name = "ecb-tdes-aml",
-               .cra_priority   = 100,
+               .cra_priority   = 200,
                .cra_flags      = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
                .cra_blocksize  = DES_BLOCK_SIZE,
                .cra_ctxsize    = sizeof(struct aml_tdes_ctx),
@@ -734,7 +744,7 @@ static struct crypto_alg tdes_algs[] = {
        {
                .cra_name        = "cbc(des3_ede)",
                .cra_driver_name = "cbc-tdes-aml",
-               .cra_priority  = 100,
+               .cra_priority  = 200,
                .cra_flags     = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
                .cra_blocksize = DES_BLOCK_SIZE,
                .cra_ctxsize   = sizeof(struct aml_tdes_ctx),
@@ -805,8 +815,12 @@ static irqreturn_t aml_tdes_irq(int irq, void *dev_id)
        if (status) {
                if (status == 0x1)
                        pr_err("irq overwrite\n");
-               if (TDES_FLAGS_DMA & tdes_dd->flags) {
+               if (tdes_dd->dma->dma_busy == DMA_FLAG_MAY_OCCUPY)
+                       return IRQ_HANDLED;
+               if ((tdes_dd->dma->dma_busy & DMA_FLAG_TDES_IN_USE) &&
+                               (tdes_dd->flags & TDES_FLAGS_DMA)) {
                        aml_write_crypto_reg(tdes_dd->status, 0xf);
+                       tdes_dd->dma->dma_busy &= ~DMA_FLAG_TDES_IN_USE;
                        tasklet_schedule(&tdes_dd->done_task);
                        return IRQ_HANDLED;
                } else {