crypto: sun8i-ss - rework handling of IV
authorCorentin Labbe <clabbe@baylibre.com>
Mon, 2 May 2022 20:19:14 +0000 (20:19 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 9 Jun 2022 08:23:11 +0000 (10:23 +0200)
[ Upstream commit 359e893e8af456be2fefabe851716237df289cbf ]

sun8i-ss fail handling IVs when doing decryption of multiple SGs in-place.
It should backup the last block of each SG source for using it later as
IVs.
In the same time remove allocation on requests path for storing all
IVs.

Fixes: f08fcced6d00 ("crypto: allwinner - Add sun8i-ss cryptographic offloader")
Signed-off-by: Corentin Labbe <clabbe@baylibre.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h

index 554e400..70e2e6e 100644 (file)
@@ -93,6 +93,68 @@ static int sun8i_ss_cipher_fallback(struct skcipher_request *areq)
        return err;
 }
 
+static int sun8i_ss_setup_ivs(struct skcipher_request *areq)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+       struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+       struct sun8i_ss_dev *ss = op->ss;
+       struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+       struct scatterlist *sg = areq->src;
+       unsigned int todo, offset;
+       unsigned int len = areq->cryptlen;
+       unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+       struct sun8i_ss_flow *sf = &ss->flows[rctx->flow];
+       int i = 0;
+       u32 a;
+       int err;
+
+       rctx->ivlen = ivsize;
+       if (rctx->op_dir & SS_DECRYPTION) {
+               offset = areq->cryptlen - ivsize;
+               scatterwalk_map_and_copy(sf->biv, areq->src, offset,
+                                        ivsize, 0);
+       }
+
+       /* we need to copy all IVs from source in case DMA is bi-directionnal */
+       while (sg && len) {
+               if (sg_dma_len(sg) == 0) {
+                       sg = sg_next(sg);
+                       continue;
+               }
+               if (i == 0)
+                       memcpy(sf->iv[0], areq->iv, ivsize);
+               a = dma_map_single(ss->dev, sf->iv[i], ivsize, DMA_TO_DEVICE);
+               if (dma_mapping_error(ss->dev, a)) {
+                       memzero_explicit(sf->iv[i], ivsize);
+                       dev_err(ss->dev, "Cannot DMA MAP IV\n");
+                       err = -EFAULT;
+                       goto dma_iv_error;
+               }
+               rctx->p_iv[i] = a;
+               /* we need to setup all others IVs only in the decrypt way */
+               if (rctx->op_dir & SS_ENCRYPTION)
+                       return 0;
+               todo = min(len, sg_dma_len(sg));
+               len -= todo;
+               i++;
+               if (i < MAX_SG) {
+                       offset = sg->length - ivsize;
+                       scatterwalk_map_and_copy(sf->iv[i], sg, offset, ivsize, 0);
+               }
+               rctx->niv = i;
+               sg = sg_next(sg);
+       }
+
+       return 0;
+dma_iv_error:
+       i--;
+       while (i >= 0) {
+               dma_unmap_single(ss->dev, rctx->p_iv[i], ivsize, DMA_TO_DEVICE);
+               memzero_explicit(sf->iv[i], ivsize);
+       }
+       return err;
+}
+
 static int sun8i_ss_cipher(struct skcipher_request *areq)
 {
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
@@ -101,9 +163,9 @@ static int sun8i_ss_cipher(struct skcipher_request *areq)
        struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
        struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
        struct sun8i_ss_alg_template *algt;
+       struct sun8i_ss_flow *sf = &ss->flows[rctx->flow];
        struct scatterlist *sg;
        unsigned int todo, len, offset, ivsize;
-       void *backup_iv = NULL;
        int nr_sgs = 0;
        int nr_sgd = 0;
        int err = 0;
@@ -134,30 +196,9 @@ static int sun8i_ss_cipher(struct skcipher_request *areq)
 
        ivsize = crypto_skcipher_ivsize(tfm);
        if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
-               rctx->ivlen = ivsize;
-               rctx->biv = kzalloc(ivsize, GFP_KERNEL | GFP_DMA);
-               if (!rctx->biv) {
-                       err = -ENOMEM;
+               err = sun8i_ss_setup_ivs(areq);
+               if (err)
                        goto theend_key;
-               }
-               if (rctx->op_dir & SS_DECRYPTION) {
-                       backup_iv = kzalloc(ivsize, GFP_KERNEL);
-                       if (!backup_iv) {
-                               err = -ENOMEM;
-                               goto theend_key;
-                       }
-                       offset = areq->cryptlen - ivsize;
-                       scatterwalk_map_and_copy(backup_iv, areq->src, offset,
-                                                ivsize, 0);
-               }
-               memcpy(rctx->biv, areq->iv, ivsize);
-               rctx->p_iv = dma_map_single(ss->dev, rctx->biv, rctx->ivlen,
-                                           DMA_TO_DEVICE);
-               if (dma_mapping_error(ss->dev, rctx->p_iv)) {
-                       dev_err(ss->dev, "Cannot DMA MAP IV\n");
-                       err = -ENOMEM;
-                       goto theend_iv;
-               }
        }
        if (areq->src == areq->dst) {
                nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src),
@@ -243,21 +284,19 @@ theend_sgs:
        }
 
 theend_iv:
-       if (rctx->p_iv)
-               dma_unmap_single(ss->dev, rctx->p_iv, rctx->ivlen,
-                                DMA_TO_DEVICE);
-
        if (areq->iv && ivsize > 0) {
-               if (rctx->biv) {
-                       offset = areq->cryptlen - ivsize;
-                       if (rctx->op_dir & SS_DECRYPTION) {
-                               memcpy(areq->iv, backup_iv, ivsize);
-                               kfree_sensitive(backup_iv);
-                       } else {
-                               scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
-                                                        ivsize, 0);
-                       }
-                       kfree(rctx->biv);
+               for (i = 0; i < rctx->niv; i++) {
+                       dma_unmap_single(ss->dev, rctx->p_iv[i], ivsize, DMA_TO_DEVICE);
+                       memzero_explicit(sf->iv[i], ivsize);
+               }
+
+               offset = areq->cryptlen - ivsize;
+               if (rctx->op_dir & SS_DECRYPTION) {
+                       memcpy(areq->iv, sf->biv, ivsize);
+                       memzero_explicit(sf->biv, ivsize);
+               } else {
+                       scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
+                                       ivsize, 0);
                }
        }
 
index 319fe32..6575305 100644 (file)
@@ -66,6 +66,7 @@ int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx
                      const char *name)
 {
        int flow = rctx->flow;
+       unsigned int ivlen = rctx->ivlen;
        u32 v = SS_START;
        int i;
 
@@ -104,15 +105,14 @@ int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx
                mutex_lock(&ss->mlock);
                writel(rctx->p_key, ss->base + SS_KEY_ADR_REG);
 
-               if (i == 0) {
-                       if (rctx->p_iv)
-                               writel(rctx->p_iv, ss->base + SS_IV_ADR_REG);
-               } else {
-                       if (rctx->biv) {
-                               if (rctx->op_dir == SS_ENCRYPTION)
-                                       writel(rctx->t_dst[i - 1].addr + rctx->t_dst[i - 1].len * 4 - rctx->ivlen, ss->base + SS_IV_ADR_REG);
+               if (ivlen) {
+                       if (rctx->op_dir == SS_ENCRYPTION) {
+                               if (i == 0)
+                                       writel(rctx->p_iv[0], ss->base + SS_IV_ADR_REG);
                                else
-                                       writel(rctx->t_src[i - 1].addr + rctx->t_src[i - 1].len * 4 - rctx->ivlen, ss->base + SS_IV_ADR_REG);
+                                       writel(rctx->t_dst[i - 1].addr + rctx->t_dst[i - 1].len * 4 - ivlen, ss->base + SS_IV_ADR_REG);
+                       } else {
+                               writel(rctx->p_iv[i], ss->base + SS_IV_ADR_REG);
                        }
                }
 
@@ -464,7 +464,7 @@ static void sun8i_ss_free_flows(struct sun8i_ss_dev *ss, int i)
  */
 static int allocate_flows(struct sun8i_ss_dev *ss)
 {
-       int i, err;
+       int i, j, err;
 
        ss->flows = devm_kcalloc(ss->dev, MAXFLOW, sizeof(struct sun8i_ss_flow),
                                 GFP_KERNEL);
@@ -474,6 +474,18 @@ static int allocate_flows(struct sun8i_ss_dev *ss)
        for (i = 0; i < MAXFLOW; i++) {
                init_completion(&ss->flows[i].complete);
 
+               ss->flows[i].biv = devm_kmalloc(ss->dev, AES_BLOCK_SIZE,
+                                               GFP_KERNEL | GFP_DMA);
+               if (!ss->flows[i].biv)
+                       goto error_engine;
+
+               for (j = 0; j < MAX_SG; j++) {
+                       ss->flows[i].iv[j] = devm_kmalloc(ss->dev, AES_BLOCK_SIZE,
+                                                         GFP_KERNEL | GFP_DMA);
+                       if (!ss->flows[i].iv[j])
+                               goto error_engine;
+               }
+
                ss->flows[i].engine = crypto_engine_alloc_init(ss->dev, true);
                if (!ss->flows[i].engine) {
                        dev_err(ss->dev, "Cannot allocate engine\n");
index 2818868..57ada86 100644 (file)
@@ -121,11 +121,15 @@ struct sginfo {
  * @complete:  completion for the current task on this flow
  * @status:    set to 1 by interrupt if task is done
  * @stat_req:  number of request done by this flow
+ * @iv:                list of IV to use for each step
+ * @biv:       buffer which contain the backuped IV
  */
 struct sun8i_ss_flow {
        struct crypto_engine *engine;
        struct completion complete;
        int status;
+       u8 *iv[MAX_SG];
+       u8 *biv;
 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
        unsigned long stat_req;
 #endif
@@ -164,28 +168,28 @@ struct sun8i_ss_dev {
  * @t_src:             list of mapped SGs with their size
  * @t_dst:             list of mapped SGs with their size
  * @p_key:             DMA address of the key
- * @p_iv:              DMA address of the IV
+ * @p_iv:              DMA address of the IVs
+ * @niv:               Number of IVs DMA mapped
  * @method:            current algorithm for this request
  * @op_mode:           op_mode for this request
  * @op_dir:            direction (encrypt vs decrypt) for this request
  * @flow:              the flow to use for this request
- * @ivlen:             size of biv
+ * @ivlen:             size of IVs
  * @keylen:            keylen for this request
- * @biv:               buffer which contain the IV
  * @fallback_req:      request struct for invoking the fallback skcipher TFM
  */
 struct sun8i_cipher_req_ctx {
        struct sginfo t_src[MAX_SG];
        struct sginfo t_dst[MAX_SG];
        u32 p_key;
-       u32 p_iv;
+       u32 p_iv[MAX_SG];
+       int niv;
        u32 method;
        u32 op_mode;
        u32 op_dir;
        int flow;
        unsigned int ivlen;
        unsigned int keylen;
-       void *biv;
        struct skcipher_request fallback_req;   // keep at the end
 };