engine->req = NULL;
mv_cesa_dequeue_req_unlocked(engine);
spin_unlock_bh(&engine->lock);
+ ctx->ops->complete(req);
ctx->ops->cleanup(req);
local_bh_disable();
req->complete(req, res);
* code)
* @step: launch the crypto operation on the next chunk
* @cleanup: cleanup the crypto request (release associated data)
+ * @complete: complete the request, i.e copy result or context from sram when
+ * needed.
*/
struct mv_cesa_req_ops {
void (*prepare)(struct crypto_async_request *req,
int (*process)(struct crypto_async_request *req, u32 status);
void (*step)(struct crypto_async_request *req);
void (*cleanup)(struct crypto_async_request *req);
+ void (*complete)(struct crypto_async_request *req);
};
/**
struct mv_cesa_ablkcipher_std_req *sreq = &creq->std;
struct mv_cesa_engine *engine = creq->base.engine;
size_t len;
- unsigned int ivsize;
len = sg_pcopy_from_buffer(req->dst, creq->dst_nents,
engine->sram + CESA_SA_DATA_SRAM_OFFSET,
if (sreq->offset < req->nbytes)
return -EINPROGRESS;
- ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
- memcpy_fromio(req->info,
- engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET, ivsize);
-
return 0;
}
mv_cesa_ablkcipher_cleanup(ablkreq);
}
+static void
+mv_cesa_ablkcipher_complete(struct crypto_async_request *req)
+{
+ struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
+ struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
+ struct mv_cesa_engine *engine = creq->base.engine;
+ unsigned int ivsize;
+
+ ivsize = crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq));
+
+ if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) {
+ struct mv_cesa_req *basereq;
+
+ basereq = &creq->base;
+ memcpy(ablkreq->info, basereq->chain.last->data, ivsize);
+ } else {
+ memcpy_fromio(ablkreq->info,
+ engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
+ ivsize);
+ }
+}
+
static const struct mv_cesa_req_ops mv_cesa_ablkcipher_req_ops = {
.step = mv_cesa_ablkcipher_step,
.process = mv_cesa_ablkcipher_process,
.prepare = mv_cesa_ablkcipher_prepare,
.cleanup = mv_cesa_ablkcipher_req_cleanup,
+ .complete = mv_cesa_ablkcipher_complete,
};
static int mv_cesa_ablkcipher_cra_init(struct crypto_tfm *tfm)
{
struct ahash_request *ahashreq = ahash_request_cast(req);
struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
- struct mv_cesa_engine *engine = creq->base.engine;
- unsigned int digsize;
- int ret, i;
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
- ret = mv_cesa_dma_process(&creq->base, status);
- else
- ret = mv_cesa_ahash_std_process(ahashreq, status);
+ return mv_cesa_dma_process(&creq->base, status);
- if (ret == -EINPROGRESS)
- return ret;
+ return mv_cesa_ahash_std_process(ahashreq, status);
+}
+
+static void mv_cesa_ahash_complete(struct crypto_async_request *req)
+{
+ struct ahash_request *ahashreq = ahash_request_cast(req);
+ struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
+ struct mv_cesa_engine *engine = creq->base.engine;
+ unsigned int digsize;
+ int i;
digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
for (i = 0; i < digsize / 4; i++)
result[i] = cpu_to_be32(creq->state[i]);
}
}
-
- return ret;
}
static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
.process = mv_cesa_ahash_process,
.prepare = mv_cesa_ahash_prepare,
.cleanup = mv_cesa_ahash_req_cleanup,
+ .complete = mv_cesa_ahash_complete,
};
static int mv_cesa_ahash_init(struct ahash_request *req,