crypto: rockchip - add fallback for cipher
authorCorentin Labbe <clabbe@baylibre.com>
Tue, 27 Sep 2022 07:54:44 +0000 (07:54 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 31 Dec 2022 12:14:22 +0000 (13:14 +0100)
[ Upstream commit 68ef8af09a1a912a5ed2cfaa4cca7606f52cef90 ]

The hardware does not handle 0 size length request, let's add a
fallback.
Furthermore fallback will be used for all unaligned case the hardware
cannot handle.

Fixes: ce0183cb6464b ("crypto: rockchip - switch to skcipher API")
Reviewed-by: John Keeping <john@metanate.com>
Signed-off-by: Corentin Labbe <clabbe@baylibre.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/crypto/Kconfig
drivers/crypto/rockchip/rk3288_crypto.h
drivers/crypto/rockchip/rk3288_crypto_skcipher.c

index 51690e7..9432375 100644 (file)
@@ -772,6 +772,10 @@ config CRYPTO_DEV_IMGTEC_HASH
 config CRYPTO_DEV_ROCKCHIP
        tristate "Rockchip's Cryptographic Engine driver"
        depends on OF && ARCH_ROCKCHIP
+       depends on PM
+       select CRYPTO_ECB
+       select CRYPTO_CBC
+       select CRYPTO_DES
        select CRYPTO_AES
        select CRYPTO_LIB_DES
        select CRYPTO_MD5
index 3e60e3d..dfff0e2 100644 (file)
@@ -246,10 +246,12 @@ struct rk_cipher_ctx {
        struct rk_crypto_info           *dev;
        unsigned int                    keylen;
        u8                              iv[AES_BLOCK_SIZE];
+       struct crypto_skcipher *fallback_tfm;
 };
 
 struct rk_cipher_rctx {
        u32                             mode;
+       struct skcipher_request fallback_req;   // keep at the end
 };
 
 enum alg_type {
index bbd0bf5..eac5bba 100644 (file)
 
 #define RK_CRYPTO_DEC                  BIT(0)
 
+static int rk_cipher_need_fallback(struct skcipher_request *req)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       unsigned int bs = crypto_skcipher_blocksize(tfm);
+       struct scatterlist *sgs, *sgd;
+       unsigned int stodo, dtodo, len;
+
+       if (!req->cryptlen)
+               return true;
+
+       len = req->cryptlen;
+       sgs = req->src;
+       sgd = req->dst;
+       while (sgs && sgd) {
+               if (!IS_ALIGNED(sgs->offset, sizeof(u32))) {
+                       return true;
+               }
+               if (!IS_ALIGNED(sgd->offset, sizeof(u32))) {
+                       return true;
+               }
+               stodo = min(len, sgs->length);
+               if (stodo % bs) {
+                       return true;
+               }
+               dtodo = min(len, sgd->length);
+               if (dtodo % bs) {
+                       return true;
+               }
+               if (stodo != dtodo) {
+                       return true;
+               }
+               len -= stodo;
+               sgs = sg_next(sgs);
+               sgd = sg_next(sgd);
+       }
+       return false;
+}
+
+static int rk_cipher_fallback(struct skcipher_request *areq)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+       struct rk_cipher_ctx *op = crypto_skcipher_ctx(tfm);
+       struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq);
+       int err;
+
+       skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
+       skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
+                                     areq->base.complete, areq->base.data);
+       skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
+                                  areq->cryptlen, areq->iv);
+       if (rctx->mode & RK_CRYPTO_DEC)
+               err = crypto_skcipher_decrypt(&rctx->fallback_req);
+       else
+               err = crypto_skcipher_encrypt(&rctx->fallback_req);
+       return err;
+}
+
 static void rk_crypto_complete(struct crypto_async_request *base, int err)
 {
        if (base->complete)
@@ -22,10 +79,10 @@ static void rk_crypto_complete(struct crypto_async_request *base, int err)
 static int rk_handle_req(struct rk_crypto_info *dev,
                         struct skcipher_request *req)
 {
-       if (!IS_ALIGNED(req->cryptlen, dev->align_size))
-               return -EINVAL;
-       else
-               return dev->enqueue(dev, &req->base);
+       if (rk_cipher_need_fallback(req))
+               return rk_cipher_fallback(req);
+
+       return dev->enqueue(dev, &req->base);
 }
 
 static int rk_aes_setkey(struct crypto_skcipher *cipher,
@@ -39,7 +96,8 @@ static int rk_aes_setkey(struct crypto_skcipher *cipher,
                return -EINVAL;
        ctx->keylen = keylen;
        memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen);
-       return 0;
+
+       return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
 }
 
 static int rk_des_setkey(struct crypto_skcipher *cipher,
@@ -54,7 +112,8 @@ static int rk_des_setkey(struct crypto_skcipher *cipher,
 
        ctx->keylen = keylen;
        memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
-       return 0;
+
+       return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
 }
 
 static int rk_tdes_setkey(struct crypto_skcipher *cipher,
@@ -69,7 +128,7 @@ static int rk_tdes_setkey(struct crypto_skcipher *cipher,
 
        ctx->keylen = keylen;
        memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
-       return 0;
+       return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
 }
 
 static int rk_aes_ecb_encrypt(struct skcipher_request *req)
@@ -394,6 +453,7 @@ static int rk_ablk_init_tfm(struct crypto_skcipher *tfm)
 {
        struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
        struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+       const char *name = crypto_tfm_alg_name(&tfm->base);
        struct rk_crypto_tmp *algt;
 
        algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
@@ -407,6 +467,16 @@ static int rk_ablk_init_tfm(struct crypto_skcipher *tfm)
        if (!ctx->dev->addr_vir)
                return -ENOMEM;
 
+       ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+       if (IS_ERR(ctx->fallback_tfm)) {
+               dev_err(ctx->dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
+                       name, PTR_ERR(ctx->fallback_tfm));
+               return PTR_ERR(ctx->fallback_tfm);
+       }
+
+       tfm->reqsize = sizeof(struct rk_cipher_rctx) +
+               crypto_skcipher_reqsize(ctx->fallback_tfm);
+
        return 0;
 }
 
@@ -415,6 +485,7 @@ static void rk_ablk_exit_tfm(struct crypto_skcipher *tfm)
        struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 
        free_page((unsigned long)ctx->dev->addr_vir);
+       crypto_free_skcipher(ctx->fallback_tfm);
 }
 
 struct rk_crypto_tmp rk_ecb_aes_alg = {
@@ -423,7 +494,7 @@ struct rk_crypto_tmp rk_ecb_aes_alg = {
                .base.cra_name          = "ecb(aes)",
                .base.cra_driver_name   = "ecb-aes-rk",
                .base.cra_priority      = 300,
-               .base.cra_flags         = CRYPTO_ALG_ASYNC,
+               .base.cra_flags         = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
                .base.cra_blocksize     = AES_BLOCK_SIZE,
                .base.cra_ctxsize       = sizeof(struct rk_cipher_ctx),
                .base.cra_alignmask     = 0x0f,
@@ -445,7 +516,7 @@ struct rk_crypto_tmp rk_cbc_aes_alg = {
                .base.cra_name          = "cbc(aes)",
                .base.cra_driver_name   = "cbc-aes-rk",
                .base.cra_priority      = 300,
-               .base.cra_flags         = CRYPTO_ALG_ASYNC,
+               .base.cra_flags         = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
                .base.cra_blocksize     = AES_BLOCK_SIZE,
                .base.cra_ctxsize       = sizeof(struct rk_cipher_ctx),
                .base.cra_alignmask     = 0x0f,
@@ -468,7 +539,7 @@ struct rk_crypto_tmp rk_ecb_des_alg = {
                .base.cra_name          = "ecb(des)",
                .base.cra_driver_name   = "ecb-des-rk",
                .base.cra_priority      = 300,
-               .base.cra_flags         = CRYPTO_ALG_ASYNC,
+               .base.cra_flags         = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
                .base.cra_blocksize     = DES_BLOCK_SIZE,
                .base.cra_ctxsize       = sizeof(struct rk_cipher_ctx),
                .base.cra_alignmask     = 0x07,
@@ -490,7 +561,7 @@ struct rk_crypto_tmp rk_cbc_des_alg = {
                .base.cra_name          = "cbc(des)",
                .base.cra_driver_name   = "cbc-des-rk",
                .base.cra_priority      = 300,
-               .base.cra_flags         = CRYPTO_ALG_ASYNC,
+               .base.cra_flags         = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
                .base.cra_blocksize     = DES_BLOCK_SIZE,
                .base.cra_ctxsize       = sizeof(struct rk_cipher_ctx),
                .base.cra_alignmask     = 0x07,
@@ -513,7 +584,7 @@ struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
                .base.cra_name          = "ecb(des3_ede)",
                .base.cra_driver_name   = "ecb-des3-ede-rk",
                .base.cra_priority      = 300,
-               .base.cra_flags         = CRYPTO_ALG_ASYNC,
+               .base.cra_flags         = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
                .base.cra_blocksize     = DES_BLOCK_SIZE,
                .base.cra_ctxsize       = sizeof(struct rk_cipher_ctx),
                .base.cra_alignmask     = 0x07,
@@ -535,7 +606,7 @@ struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
                .base.cra_name          = "cbc(des3_ede)",
                .base.cra_driver_name   = "cbc-des3-ede-rk",
                .base.cra_priority      = 300,
-               .base.cra_flags         = CRYPTO_ALG_ASYNC,
+               .base.cra_flags         = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
                .base.cra_blocksize     = DES_BLOCK_SIZE,
                .base.cra_ctxsize       = sizeof(struct rk_cipher_ctx),
                .base.cra_alignmask     = 0x07,