crypto: sun8i-ce - split into prepare/run/unprepare
authorCorentin Labbe <clabbe@baylibre.com>
Fri, 18 Sep 2020 07:23:07 +0000 (07:23 +0000)
committerHerbert Xu <herbert@gondor.apana.org.au>
Fri, 25 Sep 2020 07:48:18 +0000 (17:48 +1000)
This patch split the do_one_request into three.
Prepare will handle all DMA mapping and initialisation of the task
structure.
Unprepare will clean all DMA mapping.
And the do_one_request will be limited to just executing the task.

Signed-off-by: Corentin Labbe <clabbe@baylibre.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h

index f699b13..cae307e 100644 (file)
@@ -75,8 +75,9 @@ static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
        return err;
 }
 
-static int sun8i_ce_cipher(struct skcipher_request *areq)
+static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req)
 {
+       struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
        struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
        struct sun8i_ce_dev *ce = op->ce;
@@ -87,7 +88,6 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
        struct ce_task *cet;
        struct scatterlist *sg;
        unsigned int todo, len, offset, ivsize;
-       dma_addr_t addr_iv = 0, addr_key = 0;
        u32 common, sym;
        int flow, i;
        int nr_sgs = 0;
@@ -140,13 +140,13 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
        cet->t_sym_ctl = cpu_to_le32(sym);
        cet->t_asym_ctl = 0;
 
-       addr_key = dma_map_single(ce->dev, op->key, op->keylen, DMA_TO_DEVICE);
-       cet->t_key = cpu_to_le32(addr_key);
-       if (dma_mapping_error(ce->dev, addr_key)) {
+       rctx->addr_key = dma_map_single(ce->dev, op->key, op->keylen, DMA_TO_DEVICE);
+       if (dma_mapping_error(ce->dev, rctx->addr_key)) {
                dev_err(ce->dev, "Cannot DMA MAP KEY\n");
                err = -EFAULT;
                goto theend;
        }
+       cet->t_key = cpu_to_le32(rctx->addr_key);
 
        ivsize = crypto_skcipher_ivsize(tfm);
        if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
@@ -167,14 +167,14 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
                                                 offset, ivsize, 0);
                }
                memcpy(rctx->bounce_iv, areq->iv, ivsize);
-               addr_iv = dma_map_single(ce->dev, rctx->bounce_iv, rctx->ivlen,
-                                        DMA_TO_DEVICE);
-               cet->t_iv = cpu_to_le32(addr_iv);
-               if (dma_mapping_error(ce->dev, addr_iv)) {
+               rctx->addr_iv = dma_map_single(ce->dev, rctx->bounce_iv, rctx->ivlen,
+                                              DMA_TO_DEVICE);
+               if (dma_mapping_error(ce->dev, rctx->addr_iv)) {
                        dev_err(ce->dev, "Cannot DMA MAP IV\n");
                        err = -ENOMEM;
                        goto theend_iv;
                }
+               cet->t_iv = cpu_to_le32(rctx->addr_iv);
        }
 
        if (areq->src == areq->dst) {
@@ -234,7 +234,9 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
        }
 
        chan->timeout = areq->cryptlen;
-       err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm));
+       rctx->nr_sgs = nr_sgs;
+       rctx->nr_sgd = nr_sgd;
+       return 0;
 
 theend_sgs:
        if (areq->src == areq->dst) {
@@ -247,9 +249,8 @@ theend_sgs:
 
 theend_iv:
        if (areq->iv && ivsize > 0) {
-               if (addr_iv)
-                       dma_unmap_single(ce->dev, addr_iv, rctx->ivlen,
-                                        DMA_TO_DEVICE);
+               if (rctx->addr_iv)
+                       dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
                offset = areq->cryptlen - ivsize;
                if (rctx->op_dir & CE_DECRYPTION) {
                        memcpy(areq->iv, rctx->backup_iv, ivsize);
@@ -262,19 +263,69 @@ theend_iv:
        }
 
 theend_key:
-       dma_unmap_single(ce->dev, addr_key, op->keylen, DMA_TO_DEVICE);
+       dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
 
 theend:
        return err;
 }
 
-static int sun8i_ce_handle_cipher_request(struct crypto_engine *engine, void *areq)
+static int sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
 {
-       int err;
        struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
+       struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+       struct sun8i_ce_dev *ce = op->ce;
+       struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq);
+       int flow, err;
 
-       err = sun8i_ce_cipher(breq);
+       flow = rctx->flow;
+       err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
        crypto_finalize_skcipher_request(engine, breq, err);
+       return 0;
+}
+
+static int sun8i_ce_cipher_unprepare(struct crypto_engine *engine, void *async_req)
+{
+       struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+       struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+       struct sun8i_ce_dev *ce = op->ce;
+       struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
+       struct sun8i_ce_flow *chan;
+       struct ce_task *cet;
+       unsigned int ivsize, offset;
+       int nr_sgs = rctx->nr_sgs;
+       int nr_sgd = rctx->nr_sgd;
+       int flow;
+
+       flow = rctx->flow;
+       chan = &ce->chanlist[flow];
+       cet = chan->tl;
+       ivsize = crypto_skcipher_ivsize(tfm);
+
+       if (areq->src == areq->dst) {
+               dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
+       } else {
+               if (nr_sgs > 0)
+                       dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
+               dma_unmap_sg(ce->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
+       }
+
+       if (areq->iv && ivsize > 0) {
+               if (cet->t_iv)
+                       dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
+               offset = areq->cryptlen - ivsize;
+               if (rctx->op_dir & CE_DECRYPTION) {
+                       memcpy(areq->iv, rctx->backup_iv, ivsize);
+                       kfree_sensitive(rctx->backup_iv);
+               } else {
+                       scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
+                                                ivsize, 0);
+               }
+               kfree(rctx->bounce_iv);
+       }
+
+       dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
 
        return 0;
 }
@@ -346,9 +397,9 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
                 crypto_tfm_alg_driver_name(&sktfm->base),
                 crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)));
 
-       op->enginectx.op.do_one_request = sun8i_ce_handle_cipher_request;
-       op->enginectx.op.prepare_request = NULL;
-       op->enginectx.op.unprepare_request = NULL;
+       op->enginectx.op.do_one_request = sun8i_ce_cipher_run;
+       op->enginectx.op.prepare_request = sun8i_ce_cipher_prepare;
+       op->enginectx.op.unprepare_request = sun8i_ce_cipher_unprepare;
 
        err = pm_runtime_get_sync(op->ce->dev);
        if (err < 0)
index f5555c4..c305332 100644 (file)
@@ -182,6 +182,10 @@ struct sun8i_ce_dev {
  * @backup_iv:         buffer which contain the next IV to store
  * @bounce_iv:         buffer which contain the IV
  * @ivlen:             size of bounce_iv
+ * @nr_sgs:            The number of source SG (as given by dma_map_sg())
+ * @nr_sgd:            The number of destination SG (as given by dma_map_sg())
+ * @addr_iv:           The IV addr returned by dma_map_single, need to unmap later
+ * @addr_key:          The key addr returned by dma_map_single, need to unmap later
  * @fallback_req:      request struct for invoking the fallback skcipher TFM
  */
 struct sun8i_cipher_req_ctx {
@@ -190,6 +194,10 @@ struct sun8i_cipher_req_ctx {
        void *backup_iv;
        void *bounce_iv;
        unsigned int ivlen;
+       int nr_sgs;
+       int nr_sgd;
+       dma_addr_t addr_iv;
+       dma_addr_t addr_key;
        struct skcipher_request fallback_req;   // keep at the end
 };