1 // SPDX-License-Identifier: GPL-2.0
3 * sun8i-ce-hash.c - hardware cryptographic offloader for
4 * Allwinner H3/A64/H5/H2+/H6/R40 SoC
6 * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com>
8 * This file add support for MD5 and SHA1/SHA224/SHA256/SHA384/SHA512.
10 * You could find the datasheet in Documentation/arch/arm/sunxi.rst
12 #include <linux/bottom_half.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/scatterlist.h>
16 #include <crypto/internal/hash.h>
17 #include <crypto/sha1.h>
18 #include <crypto/sha2.h>
19 #include <crypto/md5.h>
22 int sun8i_ce_hash_crainit(struct crypto_tfm *tfm)
24 struct sun8i_ce_hash_tfm_ctx *op = crypto_tfm_ctx(tfm);
25 struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
26 struct sun8i_ce_alg_template *algt;
29 memset(op, 0, sizeof(struct sun8i_ce_hash_tfm_ctx));
31 algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
34 op->enginectx.op.do_one_request = sun8i_ce_hash_run;
37 op->fallback_tfm = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0,
38 CRYPTO_ALG_NEED_FALLBACK);
39 if (IS_ERR(op->fallback_tfm)) {
40 dev_err(algt->ce->dev, "Fallback driver could no be loaded\n");
41 return PTR_ERR(op->fallback_tfm);
44 if (algt->alg.hash.halg.statesize < crypto_ahash_statesize(op->fallback_tfm))
45 algt->alg.hash.halg.statesize = crypto_ahash_statesize(op->fallback_tfm);
47 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
48 sizeof(struct sun8i_ce_hash_reqctx) +
49 crypto_ahash_reqsize(op->fallback_tfm));
51 memcpy(algt->fbname, crypto_tfm_alg_driver_name(&op->fallback_tfm->base),
54 err = pm_runtime_get_sync(op->ce->dev);
59 pm_runtime_put_noidle(op->ce->dev);
60 crypto_free_ahash(op->fallback_tfm);
64 void sun8i_ce_hash_craexit(struct crypto_tfm *tfm)
66 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_tfm_ctx(tfm);
68 crypto_free_ahash(tfmctx->fallback_tfm);
69 pm_runtime_put_sync_suspend(tfmctx->ce->dev);
72 int sun8i_ce_hash_init(struct ahash_request *areq)
74 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
75 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
76 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
78 memset(rctx, 0, sizeof(struct sun8i_ce_hash_reqctx));
80 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
81 rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
83 return crypto_ahash_init(&rctx->fallback_req);
86 int sun8i_ce_hash_export(struct ahash_request *areq, void *out)
88 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
89 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
90 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
92 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
93 rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
95 return crypto_ahash_export(&rctx->fallback_req, out);
98 int sun8i_ce_hash_import(struct ahash_request *areq, const void *in)
100 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
101 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
102 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
104 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
105 rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
107 return crypto_ahash_import(&rctx->fallback_req, in);
110 int sun8i_ce_hash_final(struct ahash_request *areq)
112 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
113 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
114 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
115 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
116 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
117 struct sun8i_ce_alg_template *algt;
120 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
121 rctx->fallback_req.base.flags = areq->base.flags &
122 CRYPTO_TFM_REQ_MAY_SLEEP;
123 rctx->fallback_req.result = areq->result;
125 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
126 algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
130 return crypto_ahash_final(&rctx->fallback_req);
133 int sun8i_ce_hash_update(struct ahash_request *areq)
135 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
136 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
137 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
139 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
140 rctx->fallback_req.base.flags = areq->base.flags &
141 CRYPTO_TFM_REQ_MAY_SLEEP;
142 rctx->fallback_req.nbytes = areq->nbytes;
143 rctx->fallback_req.src = areq->src;
145 return crypto_ahash_update(&rctx->fallback_req);
148 int sun8i_ce_hash_finup(struct ahash_request *areq)
150 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
151 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
152 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
153 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
154 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
155 struct sun8i_ce_alg_template *algt;
158 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
159 rctx->fallback_req.base.flags = areq->base.flags &
160 CRYPTO_TFM_REQ_MAY_SLEEP;
162 rctx->fallback_req.nbytes = areq->nbytes;
163 rctx->fallback_req.src = areq->src;
164 rctx->fallback_req.result = areq->result;
165 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
166 algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
170 return crypto_ahash_finup(&rctx->fallback_req);
173 static int sun8i_ce_hash_digest_fb(struct ahash_request *areq)
175 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
176 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
177 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
178 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
179 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
180 struct sun8i_ce_alg_template *algt;
183 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
184 rctx->fallback_req.base.flags = areq->base.flags &
185 CRYPTO_TFM_REQ_MAY_SLEEP;
187 rctx->fallback_req.nbytes = areq->nbytes;
188 rctx->fallback_req.src = areq->src;
189 rctx->fallback_req.result = areq->result;
190 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
191 algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
195 return crypto_ahash_digest(&rctx->fallback_req);
198 static bool sun8i_ce_hash_need_fallback(struct ahash_request *areq)
200 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
201 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
202 struct sun8i_ce_alg_template *algt;
203 struct scatterlist *sg;
205 algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
207 if (areq->nbytes == 0) {
208 algt->stat_fb_len0++;
211 /* we need to reserve one SG for padding one */
212 if (sg_nents_for_len(areq->src, areq->nbytes) > MAX_SG - 1) {
213 algt->stat_fb_maxsg++;
218 if (sg->length % 4) {
219 algt->stat_fb_srclen++;
222 if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
223 algt->stat_fb_srcali++;
231 int sun8i_ce_hash_digest(struct ahash_request *areq)
233 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
234 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
235 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
236 struct sun8i_ce_alg_template *algt;
237 struct sun8i_ce_dev *ce;
238 struct crypto_engine *engine;
239 struct scatterlist *sg;
242 if (sun8i_ce_hash_need_fallback(areq))
243 return sun8i_ce_hash_digest_fb(areq);
245 nr_sgs = sg_nents_for_len(areq->src, areq->nbytes);
246 if (nr_sgs > MAX_SG - 1)
247 return sun8i_ce_hash_digest_fb(areq);
249 for_each_sg(areq->src, sg, nr_sgs, i) {
250 if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
251 return sun8i_ce_hash_digest_fb(areq);
254 algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
257 e = sun8i_ce_get_engine_number(ce);
259 engine = ce->chanlist[e].engine;
261 return crypto_transfer_hash_request_to_engine(engine, areq);
264 static u64 hash_pad(__le32 *buf, unsigned int bufsize, u64 padi, u64 byte_count, bool le, int bs)
266 u64 fill, min_fill, j, k;
271 buf[j++] = cpu_to_le32(0x80);
274 fill = 64 - (byte_count % 64);
275 min_fill = 2 * sizeof(u32) + sizeof(u32);
277 fill = 128 - (byte_count % 128);
278 min_fill = 4 * sizeof(u32) + sizeof(u32);
285 j += (fill - min_fill) / sizeof(u32);
286 if (j * 4 > bufsize) {
287 pr_err("%s OVERFLOW %llu\n", __func__, j);
295 lebits = (__le64 *)&buf[j];
296 *lebits = cpu_to_le64(byte_count << 3);
300 /* sha1 sha224 sha256 */
301 bebits = (__be64 *)&buf[j];
302 *bebits = cpu_to_be64(byte_count << 3);
306 bebits = (__be64 *)&buf[j];
307 *bebits = cpu_to_be64(byte_count >> 61);
309 bebits = (__be64 *)&buf[j];
310 *bebits = cpu_to_be64(byte_count << 3);
314 if (j * 4 > bufsize) {
315 pr_err("%s OVERFLOW %llu\n", __func__, j);
322 int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
324 struct ahash_request *areq = container_of(breq, struct ahash_request, base);
325 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
326 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
327 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
328 struct sun8i_ce_alg_template *algt;
329 struct sun8i_ce_dev *ce;
330 struct sun8i_ce_flow *chan;
332 struct scatterlist *sg;
333 int nr_sgs, flow, err;
343 dma_addr_t addr_res, addr_pad;
344 int ns = sg_nents_for_len(areq->src, areq->nbytes);
346 algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
349 bs = algt->alg.hash.halg.base.cra_blocksize;
350 digestsize = algt->alg.hash.halg.digestsize;
351 if (digestsize == SHA224_DIGEST_SIZE)
352 digestsize = SHA256_DIGEST_SIZE;
353 if (digestsize == SHA384_DIGEST_SIZE)
354 digestsize = SHA512_DIGEST_SIZE;
356 /* the padding could be up to two block. */
357 buf = kzalloc(bs * 2, GFP_KERNEL | GFP_DMA);
364 result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
371 chan = &ce->chanlist[flow];
373 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
376 dev_dbg(ce->dev, "%s %s len=%d\n", __func__, crypto_tfm_alg_name(areq->base.tfm), areq->nbytes);
379 memset(cet, 0, sizeof(struct ce_task));
381 cet->t_id = cpu_to_le32(flow);
382 common = ce->variant->alg_hash[algt->ce_algo_id];
383 common |= CE_COMM_INT;
384 cet->t_common_ctl = cpu_to_le32(common);
389 nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
390 if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
391 dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
397 for_each_sg(areq->src, sg, nr_sgs, i) {
398 cet->t_src[i].addr = cpu_to_le32(sg_dma_address(sg));
399 todo = min(len, sg_dma_len(sg));
400 cet->t_src[i].len = cpu_to_le32(todo / 4);
404 dev_err(ce->dev, "remaining len %d\n", len);
408 addr_res = dma_map_single(ce->dev, result, digestsize, DMA_FROM_DEVICE);
409 cet->t_dst[0].addr = cpu_to_le32(addr_res);
410 cet->t_dst[0].len = cpu_to_le32(digestsize / 4);
411 if (dma_mapping_error(ce->dev, addr_res)) {
412 dev_err(ce->dev, "DMA map dest\n");
417 byte_count = areq->nbytes;
420 switch (algt->ce_algo_id) {
422 j = hash_pad(bf, 2 * bs, j, byte_count, true, bs);
424 case CE_ID_HASH_SHA1:
425 case CE_ID_HASH_SHA224:
426 case CE_ID_HASH_SHA256:
427 j = hash_pad(bf, 2 * bs, j, byte_count, false, bs);
429 case CE_ID_HASH_SHA384:
430 case CE_ID_HASH_SHA512:
431 j = hash_pad(bf, 2 * bs, j, byte_count, false, bs);
439 addr_pad = dma_map_single(ce->dev, buf, j * 4, DMA_TO_DEVICE);
440 cet->t_src[i].addr = cpu_to_le32(addr_pad);
441 cet->t_src[i].len = cpu_to_le32(j);
442 if (dma_mapping_error(ce->dev, addr_pad)) {
443 dev_err(ce->dev, "DMA error on padding SG\n");
448 if (ce->variant->hash_t_dlen_in_bits)
449 cet->t_dlen = cpu_to_le32((areq->nbytes + j * 4) * 8);
451 cet->t_dlen = cpu_to_le32(areq->nbytes / 4 + j);
453 chan->timeout = areq->nbytes;
455 err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm));
457 dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE);
458 dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
459 dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);
462 memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
467 crypto_finalize_hash_request(engine, breq, err);