u64 len[2];
u64 processed[2];
- u8 cache[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
+ u8 cache[SHA512_BLOCK_SIZE << 1] __aligned(sizeof(u32));
dma_addr_t cache_dma;
unsigned int cache_sz;
- u8 cache_next[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
+ u8 cache_next[SHA512_BLOCK_SIZE << 1] __aligned(sizeof(u32));
};
static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req)
cdesc->control_data.control0 |= ctx->alg;
cdesc->control_data.control0 |= req->digest;
+ if (!req->finish)
+ cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH;
+
if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
if (req->processed[0] || req->processed[1]) {
if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5)
cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH;
}
- if (!req->finish)
- cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH;
-
/*
* Copy the input digest if needed, and setup the context
* fields. Do this now as we need it to setup the first command
struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
struct safexcel_result_desc *rdesc;
struct scatterlist *sg;
- int i, extra, n_cdesc = 0, ret = 0;
- u64 queued, len, cache_len;
+ int i, extra = 0, n_cdesc = 0, ret = 0;
+ u64 queued, len, cache_len, cache_max;
+
+ cache_max = crypto_ahash_blocksize(ahash);
+ if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
+ cache_max <<= 1;
queued = len = safexcel_queued_len(req);
- if (queued <= crypto_ahash_blocksize(ahash))
+ if (queued <= cache_max)
cache_len = queued;
else
cache_len = queued - areq->nbytes;
*/
extra = queued & (crypto_ahash_blocksize(ahash) - 1);
+ if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC &&
+ extra < crypto_ahash_blocksize(ahash))
+ extra += crypto_ahash_blocksize(ahash);
+
/* If this is not the last request and the queued data
* is a multiple of a block, cache the last one for now.
*/
queued -= extra;
len -= extra;
-
- if (!queued) {
- *commands = 0;
- *results = 0;
- return 0;
- }
}
/* Add a command descriptor for the cached data, if any */
/* safexcel_ahash_cache: cache data until at least one request can be sent to
* the engine, aka. when there is at least 1 block size in the pipe.
*/
-static int safexcel_ahash_cache(struct ahash_request *areq)
+static int safexcel_ahash_cache(struct ahash_request *areq, u32 cache_max)
{
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
u64 queued, cache_len;
/* queued: everything accepted by the driver which will be handled by
* In case there isn't enough bytes to proceed (less than a
* block size), cache the data until we have enough.
*/
- if (cache_len + areq->nbytes <= crypto_ahash_blocksize(ahash)) {
+ if (cache_len + areq->nbytes <= cache_max) {
sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
req->cache + cache_len,
areq->nbytes, 0);
{
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+ u32 cache_max;
/* If the request is 0 length, do nothing */
if (!areq->nbytes)
if (req->len[0] < areq->nbytes)
req->len[1]++;
- safexcel_ahash_cache(areq);
+ cache_max = crypto_ahash_blocksize(ahash);
+ if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
+ cache_max <<= 1;
+
+ safexcel_ahash_cache(areq, cache_max);
/*
* We're not doing partial updates when performing an hmac request.
return safexcel_ahash_enqueue(areq);
if (!req->last_req &&
- safexcel_queued_len(req) > crypto_ahash_blocksize(ahash))
+ safexcel_queued_len(req) > cache_max)
return safexcel_ahash_enqueue(areq);
return 0;
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
struct safexcel_ahash_export_state *export = out;
+ u32 cache_sz;
+
+ cache_sz = crypto_ahash_blocksize(ahash);
+ if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
+ cache_sz <<= 1;
export->len[0] = req->len[0];
export->len[1] = req->len[1];
export->digest = req->digest;
memcpy(export->state, req->state, req->state_sz);
- memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash));
+ memcpy(export->cache, req->cache, cache_sz);
return 0;
}
struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
struct safexcel_ahash_req *req = ahash_request_ctx(areq);
const struct safexcel_ahash_export_state *export = in;
+ u32 cache_sz;
int ret;
ret = crypto_ahash_init(areq);
if (ret)
return ret;
+ cache_sz = crypto_ahash_blocksize(ahash);
+ if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
+ cache_sz <<= 1;
+
req->len[0] = export->len[0];
req->len[1] = export->len[1];
req->processed[0] = export->processed[0];
req->digest = export->digest;
- memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash));
+ memcpy(req->cache, export->cache, cache_sz);
memcpy(req->state, export->state, req->state_sz);
return 0;