u8 buffer[HASH_BUFLEN] __aligned(4);
/* hash state */
- u32 *hw_context;
+ u32 hw_context[3 + HASH_CSR_REGISTER_NUMBER];
};
struct stm32_hash_request_ctx {
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
struct stm32_hash_state *state = &rctx->state;
+ u32 *preg = state->hw_context;
int bufcnt, err = 0, final;
+ int i;
dev_dbg(hdev->dev, "%s flags %x\n", __func__, state->flags);
if (final) {
bufcnt = state->bufcnt;
state->bufcnt = 0;
- err = stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 1);
+ return stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 1);
}
+ if (!(hdev->flags & HASH_FLAGS_INIT))
+ return 0;
+
+ if (stm32_hash_wait_busy(hdev))
+ return -ETIMEDOUT;
+
+ if (!hdev->pdata->ux500)
+ *preg++ = stm32_hash_read(hdev, HASH_IMR);
+ *preg++ = stm32_hash_read(hdev, HASH_STR);
+ *preg++ = stm32_hash_read(hdev, HASH_CR);
+ for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
+ *preg++ = stm32_hash_read(hdev, HASH_CSR(i));
+
+ state->flags |= HASH_FLAGS_INIT;
+
return err;
}
if (!err && (HASH_FLAGS_FINAL & hdev->flags)) {
stm32_hash_copy_hash(req);
err = stm32_hash_finish(req);
- hdev->flags &= ~(HASH_FLAGS_FINAL | HASH_FLAGS_CPU |
- HASH_FLAGS_INIT | HASH_FLAGS_DMA_READY |
- HASH_FLAGS_OUTPUT_READY | HASH_FLAGS_HMAC |
- HASH_FLAGS_HMAC_INIT | HASH_FLAGS_HMAC_FINAL |
- HASH_FLAGS_HMAC_KEY);
}
pm_runtime_mark_last_busy(hdev->dev);
crypto_finalize_hash_request(hdev->engine, req, err);
}
-static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
- struct stm32_hash_request_ctx *rctx)
-{
- pm_runtime_get_sync(hdev->dev);
-
- if (!(HASH_FLAGS_INIT & hdev->flags)) {
- stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
- stm32_hash_write(hdev, HASH_STR, 0);
- stm32_hash_write(hdev, HASH_DIN, 0);
- stm32_hash_write(hdev, HASH_IMR, 0);
- }
-
- return 0;
-}
-
-static int stm32_hash_one_request(struct crypto_engine *engine, void *areq);
-static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq);
-
static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev,
struct ahash_request *req)
{
return crypto_transfer_hash_request_to_engine(hdev->engine, req);
}
-static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq)
+static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
{
struct ahash_request *req = container_of(areq, struct ahash_request,
base);
struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
- struct stm32_hash_request_ctx *rctx;
+ struct stm32_hash_state *state = &rctx->state;
+ int err = 0;
if (!hdev)
return -ENODEV;
- hdev->req = req;
-
- rctx = ahash_request_ctx(req);
-
dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n",
rctx->op, req->nbytes);
- return stm32_hash_hw_init(hdev, rctx);
-}
+ pm_runtime_get_sync(hdev->dev);
-static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
-{
- struct ahash_request *req = container_of(areq, struct ahash_request,
- base);
- struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
- struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
- struct stm32_hash_request_ctx *rctx;
- int err = 0;
+ hdev->req = req;
+ hdev->flags = 0;
+
+ if (state->flags & HASH_FLAGS_INIT) {
+ u32 *preg = rctx->state.hw_context;
+ u32 reg;
+ int i;
+
+ if (!hdev->pdata->ux500)
+ stm32_hash_write(hdev, HASH_IMR, *preg++);
+ stm32_hash_write(hdev, HASH_STR, *preg++);
+ stm32_hash_write(hdev, HASH_CR, *preg);
+ reg = *preg++ | HASH_CR_INIT;
+ stm32_hash_write(hdev, HASH_CR, reg);
- if (!hdev)
- return -ENODEV;
+ for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
+ stm32_hash_write(hdev, HASH_CSR(i), *preg++);
- hdev->req = req;
+ hdev->flags |= HASH_FLAGS_INIT;
- rctx = ahash_request_ctx(req);
+ if (state->flags & HASH_FLAGS_HMAC)
+ hdev->flags |= HASH_FLAGS_HMAC |
+ HASH_FLAGS_HMAC_KEY;
+ }
if (rctx->op == HASH_OP_UPDATE)
err = stm32_hash_update_req(hdev);
static int stm32_hash_export(struct ahash_request *req, void *out)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
- struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
- struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
- struct stm32_hash_state *state = &rctx->state;
- u32 *preg;
- unsigned int i;
- int ret;
-
- pm_runtime_get_sync(hdev->dev);
-
- ret = stm32_hash_wait_busy(hdev);
- if (ret)
- return ret;
-
- state->hw_context = kmalloc_array(3 + HASH_CSR_REGISTER_NUMBER,
- sizeof(u32), GFP_KERNEL);
- preg = state->hw_context;
-
- if (!hdev->pdata->ux500)
- *preg++ = stm32_hash_read(hdev, HASH_IMR);
- *preg++ = stm32_hash_read(hdev, HASH_STR);
- *preg++ = stm32_hash_read(hdev, HASH_CR);
- for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
- *preg++ = stm32_hash_read(hdev, HASH_CSR(i));
-
- pm_runtime_mark_last_busy(hdev->dev);
- pm_runtime_put_autosuspend(hdev->dev);
- memcpy(out, rctx, sizeof(*rctx));
+ memcpy(out, &rctx->state, sizeof(rctx->state));
return 0;
}
static int stm32_hash_import(struct ahash_request *req, const void *in)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
- struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
- struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
- struct stm32_hash_state *state = &rctx->state;
- const u32 *preg = in;
- u32 reg;
- unsigned int i;
-
- memcpy(rctx, in, sizeof(*rctx));
-
- preg = state->hw_context;
-
- pm_runtime_get_sync(hdev->dev);
-
- if (!hdev->pdata->ux500)
- stm32_hash_write(hdev, HASH_IMR, *preg++);
- stm32_hash_write(hdev, HASH_STR, *preg++);
- stm32_hash_write(hdev, HASH_CR, *preg);
- reg = *preg++ | HASH_CR_INIT;
- stm32_hash_write(hdev, HASH_CR, reg);
-
- for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
- stm32_hash_write(hdev, HASH_CSR(i), *preg++);
-
- pm_runtime_mark_last_busy(hdev->dev);
- pm_runtime_put_autosuspend(hdev->dev);
- kfree(state->hw_context);
+ stm32_hash_init(req);
+ memcpy(&rctx->state, in, sizeof(rctx->state));
return 0;
}
ctx->flags |= HASH_FLAGS_HMAC;
ctx->enginectx.op.do_one_request = stm32_hash_one_request;
- ctx->enginectx.op.prepare_request = stm32_hash_prepare_req;
- ctx->enginectx.op.unprepare_request = NULL;
return stm32_hash_init_fallback(tfm);
}
.import = stm32_hash_import,
.halg = {
.digestsize = MD5_DIGEST_SIZE,
- .statesize = sizeof(struct stm32_hash_request_ctx),
+ .statesize = sizeof(struct stm32_hash_state),
.base = {
.cra_name = "md5",
.cra_driver_name = "stm32-md5",
.setkey = stm32_hash_setkey,
.halg = {
.digestsize = MD5_DIGEST_SIZE,
- .statesize = sizeof(struct stm32_hash_request_ctx),
+ .statesize = sizeof(struct stm32_hash_state),
.base = {
.cra_name = "hmac(md5)",
.cra_driver_name = "stm32-hmac-md5",
.import = stm32_hash_import,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct stm32_hash_request_ctx),
+ .statesize = sizeof(struct stm32_hash_state),
.base = {
.cra_name = "sha1",
.cra_driver_name = "stm32-sha1",
.setkey = stm32_hash_setkey,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct stm32_hash_request_ctx),
+ .statesize = sizeof(struct stm32_hash_state),
.base = {
.cra_name = "hmac(sha1)",
.cra_driver_name = "stm32-hmac-sha1",
.import = stm32_hash_import,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
- .statesize = sizeof(struct stm32_hash_request_ctx),
+ .statesize = sizeof(struct stm32_hash_state),
.base = {
.cra_name = "sha224",
.cra_driver_name = "stm32-sha224",
.import = stm32_hash_import,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
- .statesize = sizeof(struct stm32_hash_request_ctx),
+ .statesize = sizeof(struct stm32_hash_state),
.base = {
.cra_name = "hmac(sha224)",
.cra_driver_name = "stm32-hmac-sha224",
.import = stm32_hash_import,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct stm32_hash_request_ctx),
+ .statesize = sizeof(struct stm32_hash_state),
.base = {
.cra_name = "sha256",
.cra_driver_name = "stm32-sha256",
.setkey = stm32_hash_setkey,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct stm32_hash_request_ctx),
+ .statesize = sizeof(struct stm32_hash_state),
.base = {
.cra_name = "hmac(sha256)",
.cra_driver_name = "stm32-hmac-sha256",