jh7110_sec_write(sdev, JH7110_AES_MLEN1, data_len & 0xffffffff);
}
+static inline int crypto_ccm_check_iv(const u8 *iv)
+{
+ /* 2 <= L <= 8, so 1 <= L' <= 7. */
+ if (iv[0] < 1 || iv[0] > 7)
+ return -EINVAL;
+
+ return 0;
+}
+
+
static int jh7110_cryp_hw_write_iv(struct jh7110_sec_ctx *ctx, u32 *iv)
{
struct jh7110_sec_dev *sdev = ctx->sdev;
if (!is_gcm(rctx))
jh7110_sec_write(sdev, JH7110_AES_IV3, iv[3]);
-
- if (is_gcm(rctx))
+ else
if (jh7110_aes_wait_gcmdone(ctx))
return -ETIMEDOUT;
memset((void *)rctx->last_ctr, 0, sizeof(rctx->last_ctr));
jh7110_cryp_gcm_init(ctx);
- if (jh7110_aes_wait_gcmdone(ctx))
- return -ETIMEDOUT;
break;
case JH7110_AES_MODE_CCM:
struct jh7110_sec_dev *sdev = ctx->sdev;
struct jh7110_sec_request_ctx *rctx = ctx->rctx;
int loop, total_len, start_addr;
- int ret = 0;
total_len = AES_BLOCK_SIZE / sizeof(u32);
start_addr = JH7110_AES_NONCE0;
rctx->authsize, 0);
if (crypto_memneq(rctx->tag_in, rctx->tag_out, rctx->authsize))
- ret = -EBADMSG;
+ return -EBADMSG;
}
- return ret;
+ return 0;
}
static int jh7110_gcm_zero_message_data(struct jh7110_sec_ctx *ctx);
-static int jh7110_cryp_finish_req(struct jh7110_sec_ctx *ctx, int err)
+static void jh7110_cryp_finish_req(struct jh7110_sec_ctx *ctx, int err)
{
struct jh7110_sec_request_ctx *rctx = ctx->rctx;
err);
memset(ctx->key, 0, ctx->keylen);
-
- return err;
}
static bool jh7110_check_counter_overflow(struct jh7110_sec_ctx *ctx, size_t count)
while (total_len > 0) {
for (loop = 0; loop < 4; loop++, buffer++)
jh7110_sec_write(sdev, JH7110_AES_AESDIO0R, *buffer);
+
if (jh7110_aes_wait_busy(ctx)) {
dev_err(sdev->dev, "jh7110_aes_wait_busy error\n");
return -ETIMEDOUT;
int ret;
bool fragmented = false;
- if (unlikely(!rctx->total_in)) {
- dev_warn(sdev->dev, "No more data to process\n");
- return -EINVAL;
- }
-
sdev->cry_type = JH7110_AES_TYPE;
/* ctr counter overflow. */
unsigned int *buffer;
int total_len, loop;
- total_len = rctx->assoclen / sizeof(u32);
+ if (rctx->assoclen) {
+ total_len = rctx->assoclen;
+ total_len = (total_len & 0xf) ? (((total_len >> 4) + 1) << 2) : (total_len >> 2);
+ }
+
buffer = (unsigned int *)sdev->aes_data;
for (loop = 0; loop < total_len; loop += 4) {
int ret;
bool fragmented = false;
- if (unlikely(!rctx->total_in)) {
- dev_warn(sdev->dev, "No more data to process\n");
- return -EINVAL;
- }
-
sdev->cry_type = JH7110_AES_TYPE;
/* ctr counter overflow. */
}
rctx->bufcnt = data_len;
-
total += data_len;
+
if (is_ccm(rctx))
ret = jh7110_cryp_ccm_write_aad(ctx);
else
ret = jh7110_cryp_gcm_write_aad(ctx);
+
+ if (ret)
+ return ret;
}
total = 0;
ret = jh7110_cryp_write_out_dma(ctx);
else
ret = jh7110_cryp_write_out_cpu(ctx);
-
- if (ret)
- return ret;
}
rctx->offset += count;
if (ret)
return ret;
- ret = jh7110_cryp_finish_req(ctx, ret);
+ jh7110_cryp_finish_req(ctx, ret);
- return ret;
+ return 0;
}
static int jh7110_cryp_xcm_start(struct jh7110_sec_ctx *ctx, struct jh7110_sec_request_ctx *rctx)
if (ret)
return ret;
- ret = jh7110_cryp_finish_req(ctx, ret);
+ jh7110_cryp_finish_req(ctx, ret);
- return ret;
+ mutex_unlock(&ctx->sdev->lock);
+
+ return 0;
}
static int jh7110_cryp_cipher_one_req(struct crypto_engine *engine, void *areq);
{
struct jh7110_sec_ctx *ctx = crypto_skcipher_ctx(
crypto_skcipher_reqtfm(req));
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct jh7110_sec_request_ctx *rctx = skcipher_request_ctx(req);
struct jh7110_sec_dev *sdev = ctx->sdev;
+ unsigned int blocksize_align = crypto_skcipher_blocksize(tfm) - 1;
if (!sdev)
return -ENODEV;
rctx->flags = flags;
rctx->req_type = JH7110_ABLK_REQ;
+ if (is_ecb(rctx) || is_cbc(rctx))
+ if (req->cryptlen & (blocksize_align))
+ return -EINVAL;
+
return crypto_transfer_skcipher_request_to_engine(sdev->engine, req);
}
static int jh7110_cryp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
+ if (!key || !keylen)
+ return -EINVAL;
+
if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
keylen != AES_KEYSIZE_256)
return -EINVAL;
struct jh7110_sec_ctx *ctx = crypto_aead_ctx(tfm);
int ret = 0;
+ if (!key || !keylen)
+ return -EINVAL;
+
if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
keylen != AES_KEYSIZE_256) {
return -EINVAL;
static int jh7110_cryp_aes_gcm_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
+ struct jh7110_sec_ctx *ctx = crypto_aead_ctx(tfm);
+
+ tfm->authsize = authsize;
+
+ if (ctx->fallback.aead)
+ ctx->fallback.aead->authsize = authsize;
+
return crypto_gcm_check_authsize(authsize);
}
static int jh7110_cryp_aes_ccm_encrypt(struct aead_request *req)
{
+ int ret;
+
+ ret = crypto_ccm_check_iv(req->iv);
+ if (ret)
+ return ret;
+
return jh7110_cryp_aead_crypt(req, JH7110_AES_MODE_CCM | FLG_ENCRYPT);
}
static int jh7110_cryp_aes_ccm_decrypt(struct aead_request *req)
{
+ int ret;
+
+ ret = crypto_ccm_check_iv(req->iv);
+ if (ret)
+ return ret;
+
return jh7110_cryp_aead_crypt(req, JH7110_AES_MODE_CCM);
}
static int jh7110_cryp_cipher_one_req(struct crypto_engine *engine, void *areq)
{
struct skcipher_request *req = container_of(areq,
- struct skcipher_request,
- base);
+ struct skcipher_request,
+ base);
struct jh7110_sec_request_ctx *rctx = skcipher_request_ctx(req);
struct jh7110_sec_ctx *ctx = crypto_skcipher_ctx(
crypto_skcipher_reqtfm(req));
struct jh7110_sec_request_ctx *rctx = aead_request_ctx(req);
struct jh7110_sec_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
struct jh7110_sec_dev *sdev = ctx->sdev;
- int ret;
if (!sdev)
return -ENODEV;
/* No input data to process: get tag and finish */
jh7110_gcm_zero_message_data(ctx);
jh7110_cryp_finish_req(ctx, 0);
- ret = 0;
- goto out;
+ mutex_unlock(&ctx->sdev->lock);
+ return 0;
}
- ret = jh7110_cryp_xcm_start(ctx, rctx);
-
- out:
- mutex_unlock(&ctx->sdev->lock);
-
- return ret;
+ return jh7110_cryp_xcm_start(ctx, rctx);
}
static struct skcipher_alg crypto_algs[] = {
static struct aead_alg aead_algs[] = {
{
- .setkey = jh7110_cryp_aes_aead_setkey,
- .setauthsize = jh7110_cryp_aes_gcm_setauthsize,
- .encrypt = jh7110_cryp_aes_gcm_encrypt,
- .decrypt = jh7110_cryp_aes_gcm_decrypt,
- .init = jh7110_cryp_aes_aead_init,
- .exit = jh7110_cryp_aes_aead_exit,
- .ivsize = GCM_AES_IV_SIZE,
- .maxauthsize = AES_BLOCK_SIZE,
+ .setkey = jh7110_cryp_aes_aead_setkey,
+ .setauthsize = jh7110_cryp_aes_gcm_setauthsize,
+ .encrypt = jh7110_cryp_aes_gcm_encrypt,
+ .decrypt = jh7110_cryp_aes_gcm_decrypt,
+ .init = jh7110_cryp_aes_aead_init,
+ .exit = jh7110_cryp_aes_aead_exit,
+ .ivsize = GCM_AES_IV_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
.base = {
- .cra_name = "gcm(aes)",
- .cra_driver_name = "jh7110-gcm-aes",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct jh7110_sec_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "jh7110-gcm-aes",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct jh7110_sec_ctx),
+ .cra_alignmask = 0xf,
+ .cra_module = THIS_MODULE,
},
}, {
.setkey = jh7110_cryp_aes_aead_setkey,