1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
4 #include <crypto/aes.h>
5 #include <crypto/algapi.h>
6 #include <crypto/des.h>
7 #include <crypto/skcipher.h>
8 #include <crypto/xts.h>
9 #include <linux/crypto.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/idr.h>
14 #include "sec_crypto.h"
16 #define SEC_PRIORITY 4001
17 #define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE)
18 #define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE)
19 #define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE)
20 #define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE)
22 /* SEC sqe(bd) bit operational relative MACRO */
23 #define SEC_DE_OFFSET 1
24 #define SEC_CIPHER_OFFSET 4
25 #define SEC_SCENE_OFFSET 3
26 #define SEC_DST_SGL_OFFSET 2
27 #define SEC_SRC_SGL_OFFSET 7
28 #define SEC_CKEY_OFFSET 9
29 #define SEC_CMODE_OFFSET 12
30 #define SEC_FLAG_OFFSET 7
31 #define SEC_FLAG_MASK 0x0780
32 #define SEC_TYPE_MASK 0x0F
33 #define SEC_DONE_MASK 0x0001
35 #define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH)
36 #define SEC_SGL_SGE_NR 128
37 #define SEC_CTX_DEV(ctx) (&(ctx)->sec->qm.pdev->dev)
39 static DEFINE_MUTEX(sec_algs_lock);
40 static unsigned int sec_active_devs;
42 /* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
43 static inline int sec_get_queue_id(struct sec_ctx *ctx, struct sec_req *req)
45 if (req->c_req.encrypt)
46 return (u32)atomic_inc_return(&ctx->enc_qcyclic) %
49 return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num +
53 static inline void sec_put_queue_id(struct sec_ctx *ctx, struct sec_req *req)
55 if (req->c_req.encrypt)
56 atomic_dec(&ctx->enc_qcyclic);
58 atomic_dec(&ctx->dec_qcyclic);
61 static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
65 mutex_lock(&qp_ctx->req_lock);
67 req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL,
68 0, QM_Q_DEPTH, GFP_ATOMIC);
69 mutex_unlock(&qp_ctx->req_lock);
71 dev_err(SEC_CTX_DEV(req->ctx), "alloc req id fail!\n");
76 qp_ctx->req_list[req_id] = req;
80 static void sec_free_req_id(struct sec_req *req)
82 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
83 int req_id = req->req_id;
85 if (req_id < 0 || req_id >= QM_Q_DEPTH) {
86 dev_err(SEC_CTX_DEV(req->ctx), "free request id invalid!\n");
90 qp_ctx->req_list[req_id] = NULL;
93 mutex_lock(&qp_ctx->req_lock);
94 idr_remove(&qp_ctx->req_idr, req_id);
95 mutex_unlock(&qp_ctx->req_lock);
98 static void sec_req_cb(struct hisi_qp *qp, void *resp)
100 struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
101 struct sec_sqe *bd = resp;
106 type = bd->type_cipher_auth & SEC_TYPE_MASK;
107 if (type == SEC_BD_TYPE2) {
108 req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)];
109 req->err_type = bd->type2.error_type;
111 done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
112 flag = (le16_to_cpu(bd->type2.done_flag) &
113 SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
114 if (req->err_type || done != 0x1 || flag != 0x2)
115 dev_err(SEC_CTX_DEV(req->ctx),
116 "err_type[%d],done[%d],flag[%d]\n",
117 req->err_type, done, flag);
119 pr_err("err bd type [%d]\n", type);
123 atomic64_inc(&req->ctx->sec->debug.dfx.recv_cnt);
125 req->ctx->req_op->buf_unmap(req->ctx, req);
127 req->ctx->req_op->callback(req->ctx, req);
130 static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
132 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
135 mutex_lock(&qp_ctx->req_lock);
136 ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
137 mutex_unlock(&qp_ctx->req_lock);
138 atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
144 if (atomic_read(&req->fake_busy))
153 static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
154 int qp_ctx_id, int alg_type)
156 struct device *dev = SEC_CTX_DEV(ctx);
157 struct sec_qp_ctx *qp_ctx;
161 qp = hisi_qm_create_qp(qm, alg_type);
165 qp_ctx = &ctx->qp_ctx[qp_ctx_id];
168 qp->req_cb = sec_req_cb;
172 mutex_init(&qp_ctx->req_lock);
173 atomic_set(&qp_ctx->pending_reqs, 0);
174 idr_init(&qp_ctx->req_idr);
176 qp_ctx->req_list = kcalloc(QM_Q_DEPTH, sizeof(void *), GFP_ATOMIC);
177 if (!qp_ctx->req_list)
178 goto err_destroy_idr;
180 qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
182 if (IS_ERR(qp_ctx->c_in_pool)) {
183 dev_err(dev, "fail to create sgl pool for input!\n");
184 goto err_free_req_list;
187 qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
189 if (IS_ERR(qp_ctx->c_out_pool)) {
190 dev_err(dev, "fail to create sgl pool for output!\n");
191 goto err_free_c_in_pool;
194 ret = ctx->req_op->resource_alloc(ctx, qp_ctx);
196 goto err_free_c_out_pool;
198 ret = hisi_qm_start_qp(qp, 0);
205 ctx->req_op->resource_free(ctx, qp_ctx);
207 hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
209 hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
211 kfree(qp_ctx->req_list);
213 idr_destroy(&qp_ctx->req_idr);
214 hisi_qm_release_qp(qp);
219 static void sec_release_qp_ctx(struct sec_ctx *ctx,
220 struct sec_qp_ctx *qp_ctx)
222 struct device *dev = SEC_CTX_DEV(ctx);
224 hisi_qm_stop_qp(qp_ctx->qp);
225 ctx->req_op->resource_free(ctx, qp_ctx);
227 hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
228 hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
230 idr_destroy(&qp_ctx->req_idr);
231 kfree(qp_ctx->req_list);
232 hisi_qm_release_qp(qp_ctx->qp);
235 static int sec_skcipher_init(struct crypto_skcipher *tfm)
237 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
238 struct sec_cipher_ctx *c_ctx;
244 crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
246 sec = sec_find_device(cpu_to_node(smp_processor_id()));
248 pr_err("find no Hisilicon SEC device!\n");
253 dev = &qm->pdev->dev;
254 ctx->hlf_q_num = sec->ctx_q_num >> 0x1;
256 /* Half of queue depth is taken as fake requests limit in the queue. */
257 ctx->fake_req_limit = QM_Q_DEPTH >> 0x1;
258 ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
263 for (i = 0; i < sec->ctx_q_num; i++) {
264 ret = sec_create_qp_ctx(qm, ctx, i, 0);
266 goto err_sec_release_qp_ctx;
270 c_ctx->ivsize = crypto_skcipher_ivsize(tfm);
271 if (c_ctx->ivsize > SEC_IV_SIZE) {
272 dev_err(dev, "get error iv size!\n");
274 goto err_sec_release_qp_ctx;
276 c_ctx->c_key = dma_alloc_coherent(dev, SEC_MAX_KEY_SIZE,
277 &c_ctx->c_key_dma, GFP_KERNEL);
280 goto err_sec_release_qp_ctx;
285 err_sec_release_qp_ctx:
286 for (i = i - 1; i >= 0; i--)
287 sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
293 static void sec_skcipher_exit(struct crypto_skcipher *tfm)
295 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
296 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
300 dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
301 c_ctx->c_key, c_ctx->c_key_dma);
305 for (i = 0; i < ctx->sec->ctx_q_num; i++)
306 sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
311 static int sec_skcipher_3des_setkey(struct sec_cipher_ctx *c_ctx,
313 const enum sec_cmode c_mode)
316 case SEC_DES3_2KEY_SIZE:
317 c_ctx->c_key_len = SEC_CKEY_3DES_2KEY;
319 case SEC_DES3_3KEY_SIZE:
320 c_ctx->c_key_len = SEC_CKEY_3DES_3KEY;
329 static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx,
331 const enum sec_cmode c_mode)
333 if (c_mode == SEC_CMODE_XTS) {
335 case SEC_XTS_MIN_KEY_SIZE:
336 c_ctx->c_key_len = SEC_CKEY_128BIT;
338 case SEC_XTS_MAX_KEY_SIZE:
339 c_ctx->c_key_len = SEC_CKEY_256BIT;
342 pr_err("hisi_sec2: xts mode key error!\n");
347 case AES_KEYSIZE_128:
348 c_ctx->c_key_len = SEC_CKEY_128BIT;
350 case AES_KEYSIZE_192:
351 c_ctx->c_key_len = SEC_CKEY_192BIT;
353 case AES_KEYSIZE_256:
354 c_ctx->c_key_len = SEC_CKEY_256BIT;
357 pr_err("hisi_sec2: aes key error!\n");
365 static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
366 const u32 keylen, const enum sec_calg c_alg,
367 const enum sec_cmode c_mode)
369 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
370 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
373 if (c_mode == SEC_CMODE_XTS) {
374 ret = xts_verify_key(tfm, key, keylen);
376 dev_err(SEC_CTX_DEV(ctx), "xts mode key err!\n");
381 c_ctx->c_alg = c_alg;
382 c_ctx->c_mode = c_mode;
386 ret = sec_skcipher_3des_setkey(c_ctx, keylen, c_mode);
390 ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
397 dev_err(SEC_CTX_DEV(ctx), "set sec key err!\n");
401 memcpy(c_ctx->c_key, key, keylen);
406 #define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode) \
407 static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\
410 return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode); \
413 GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB)
414 GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC)
415 GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS)
417 GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB)
418 GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
420 GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
421 GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
423 static int sec_skcipher_get_res(struct sec_ctx *ctx,
426 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
427 struct sec_cipher_res *c_res = qp_ctx->alg_meta_data;
428 struct sec_cipher_req *c_req = &req->c_req;
429 int req_id = req->req_id;
431 c_req->c_ivin = c_res[req_id].c_ivin;
432 c_req->c_ivin_dma = c_res[req_id].c_ivin_dma;
437 static int sec_skcipher_resource_alloc(struct sec_ctx *ctx,
438 struct sec_qp_ctx *qp_ctx)
440 struct device *dev = SEC_CTX_DEV(ctx);
441 struct sec_cipher_res *res;
444 res = kcalloc(QM_Q_DEPTH, sizeof(struct sec_cipher_res), GFP_KERNEL);
448 res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
449 &res->c_ivin_dma, GFP_KERNEL);
455 for (i = 1; i < QM_Q_DEPTH; i++) {
456 res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
457 res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
459 qp_ctx->alg_meta_data = res;
464 static void sec_skcipher_resource_free(struct sec_ctx *ctx,
465 struct sec_qp_ctx *qp_ctx)
467 struct sec_cipher_res *res = qp_ctx->alg_meta_data;
468 struct device *dev = SEC_CTX_DEV(ctx);
473 dma_free_coherent(dev, SEC_TOTAL_IV_SZ, res->c_ivin, res->c_ivin_dma);
477 static int sec_skcipher_map(struct device *dev, struct sec_req *req,
478 struct scatterlist *src, struct scatterlist *dst)
480 struct sec_cipher_req *c_req = &req->c_req;
481 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
483 c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
488 if (IS_ERR(c_req->c_in)) {
489 dev_err(dev, "fail to dma map input sgl buffers!\n");
490 return PTR_ERR(c_req->c_in);
494 c_req->c_out = c_req->c_in;
495 c_req->c_out_dma = c_req->c_in_dma;
497 c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst,
502 if (IS_ERR(c_req->c_out)) {
503 dev_err(dev, "fail to dma map output sgl buffers!\n");
504 hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
505 return PTR_ERR(c_req->c_out);
512 static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
514 struct sec_cipher_req *c_req = &req->c_req;
516 return sec_skcipher_map(SEC_CTX_DEV(ctx), req,
517 c_req->sk_req->src, c_req->sk_req->dst);
520 static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
522 struct device *dev = SEC_CTX_DEV(ctx);
523 struct sec_cipher_req *c_req = &req->c_req;
524 struct skcipher_request *sk_req = c_req->sk_req;
526 if (sk_req->dst != sk_req->src)
527 hisi_acc_sg_buf_unmap(dev, sk_req->src, c_req->c_in);
529 hisi_acc_sg_buf_unmap(dev, sk_req->dst, c_req->c_out);
532 static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
536 ret = ctx->req_op->buf_map(ctx, req);
540 ctx->req_op->do_transfer(ctx, req);
542 ret = ctx->req_op->bd_fill(ctx, req);
549 ctx->req_op->buf_unmap(ctx, req);
554 static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
556 ctx->req_op->buf_unmap(ctx, req);
559 static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
561 struct skcipher_request *sk_req = req->c_req.sk_req;
562 struct sec_cipher_req *c_req = &req->c_req;
564 c_req->c_len = sk_req->cryptlen;
565 memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
568 static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
570 struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
571 struct sec_cipher_req *c_req = &req->c_req;
572 struct sec_sqe *sec_sqe = &req->sec_sqe;
574 u8 scene, sa_type, da_type;
577 memset(sec_sqe, 0, sizeof(struct sec_sqe));
579 sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
580 sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
581 sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma);
582 sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
584 sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) <<
586 sec_sqe->type2.c_alg = c_ctx->c_alg;
587 sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
590 bd_type = SEC_BD_TYPE2;
592 cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET;
594 cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET;
595 sec_sqe->type_cipher_auth = bd_type | cipher;
597 sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET;
598 scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
599 if (c_req->c_in_dma != c_req->c_out_dma)
600 de = 0x1 << SEC_DE_OFFSET;
602 sec_sqe->sds_sa_type = (de | scene | sa_type);
604 /* Just set DST address type */
605 da_type = SEC_SGL << SEC_DST_SGL_OFFSET;
606 sec_sqe->sdm_addr_type |= da_type;
608 sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len);
609 sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id);
614 static void sec_update_iv(struct sec_req *req)
616 struct skcipher_request *sk_req = req->c_req.sk_req;
617 u32 iv_size = req->ctx->c_ctx.ivsize;
618 struct scatterlist *sgl;
621 if (req->c_req.encrypt)
626 sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), sk_req->iv,
627 iv_size, sk_req->cryptlen - iv_size);
629 dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n");
632 static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req)
634 struct skcipher_request *sk_req = req->c_req.sk_req;
635 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
637 atomic_dec(&qp_ctx->pending_reqs);
638 sec_free_req_id(req);
640 /* IV output at encrypto of CBC mode */
641 if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
644 if (atomic_cmpxchg(&req->fake_busy, 1, 0) != 1)
645 sk_req->base.complete(&sk_req->base, -EINPROGRESS);
647 sk_req->base.complete(&sk_req->base, req->err_type);
650 static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
652 struct sec_qp_ctx *qp_ctx = req->qp_ctx;
654 atomic_dec(&qp_ctx->pending_reqs);
655 sec_free_req_id(req);
656 sec_put_queue_id(ctx, req);
659 static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
661 struct sec_qp_ctx *qp_ctx;
664 /* To load balance */
665 issue_id = sec_get_queue_id(ctx, req);
666 qp_ctx = &ctx->qp_ctx[issue_id];
668 req->req_id = sec_alloc_req_id(req, qp_ctx);
669 if (req->req_id < 0) {
670 sec_put_queue_id(ctx, req);
674 if (ctx->fake_req_limit <= atomic_inc_return(&qp_ctx->pending_reqs))
675 atomic_set(&req->fake_busy, 1);
677 atomic_set(&req->fake_busy, 0);
679 ret = ctx->req_op->get_res(ctx, req);
681 atomic_dec(&qp_ctx->pending_reqs);
682 sec_request_uninit(ctx, req);
683 dev_err(SEC_CTX_DEV(ctx), "get resources failed!\n");
689 static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
693 ret = sec_request_init(ctx, req);
697 ret = sec_request_transfer(ctx, req);
701 /* Output IV as decrypto */
702 if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt)
705 ret = ctx->req_op->bd_send(ctx, req);
706 if (ret != -EBUSY && ret != -EINPROGRESS) {
707 dev_err(SEC_CTX_DEV(ctx), "send sec request failed!\n");
714 /* As failing, restore the IV from user */
715 if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt)
716 memcpy(req->c_req.sk_req->iv, req->c_req.c_ivin,
719 sec_request_untransfer(ctx, req);
721 sec_request_uninit(ctx, req);
726 static struct sec_req_op sec_req_ops_tbl = {
727 .get_res = sec_skcipher_get_res,
728 .resource_alloc = sec_skcipher_resource_alloc,
729 .resource_free = sec_skcipher_resource_free,
730 .buf_map = sec_skcipher_sgl_map,
731 .buf_unmap = sec_skcipher_sgl_unmap,
732 .do_transfer = sec_skcipher_copy_iv,
733 .bd_fill = sec_skcipher_bd_fill,
734 .bd_send = sec_bd_send,
735 .callback = sec_skcipher_callback,
736 .process = sec_process,
739 static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
741 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
743 ctx->req_op = &sec_req_ops_tbl;
745 return sec_skcipher_init(tfm);
748 static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)
750 sec_skcipher_exit(tfm);
753 static int sec_skcipher_param_check(struct sec_ctx *ctx,
754 struct skcipher_request *sk_req)
756 u8 c_alg = ctx->c_ctx.c_alg;
757 struct device *dev = SEC_CTX_DEV(ctx);
759 if (!sk_req->src || !sk_req->dst) {
760 dev_err(dev, "skcipher input param error!\n");
764 if (c_alg == SEC_CALG_3DES) {
765 if (sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1)) {
766 dev_err(dev, "skcipher 3des input length error!\n");
770 } else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {
771 if (sk_req->cryptlen & (AES_BLOCK_SIZE - 1)) {
772 dev_err(dev, "skcipher aes input length error!\n");
778 dev_err(dev, "skcipher algorithm error!\n");
782 static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
784 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
785 struct sec_req *req = skcipher_request_ctx(sk_req);
786 struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
789 if (!sk_req->cryptlen)
792 ret = sec_skcipher_param_check(ctx, sk_req);
796 req->c_req.sk_req = sk_req;
797 req->c_req.encrypt = encrypt;
800 return ctx->req_op->process(ctx, req);
803 static int sec_skcipher_encrypt(struct skcipher_request *sk_req)
805 return sec_skcipher_crypto(sk_req, true);
808 static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
810 return sec_skcipher_crypto(sk_req, false);
813 #define SEC_SKCIPHER_GEN_ALG(sec_cra_name, sec_set_key, sec_min_key_size, \
814 sec_max_key_size, ctx_init, ctx_exit, blk_size, iv_size)\
817 .cra_name = sec_cra_name,\
818 .cra_driver_name = "hisi_sec_"sec_cra_name,\
819 .cra_priority = SEC_PRIORITY,\
820 .cra_flags = CRYPTO_ALG_ASYNC,\
821 .cra_blocksize = blk_size,\
822 .cra_ctxsize = sizeof(struct sec_ctx),\
823 .cra_module = THIS_MODULE,\
827 .setkey = sec_set_key,\
828 .decrypt = sec_skcipher_decrypt,\
829 .encrypt = sec_skcipher_encrypt,\
830 .min_keysize = sec_min_key_size,\
831 .max_keysize = sec_max_key_size,\
835 #define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \
836 max_key_size, blk_size, iv_size) \
837 SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
838 sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size)
840 static struct skcipher_alg sec_algs[] = {
841 SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb,
842 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
845 SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc,
846 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
847 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
849 SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts,
850 SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE,
851 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
853 SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb,
854 SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
855 DES3_EDE_BLOCK_SIZE, 0)
857 SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc,
858 SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
859 DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE)
861 SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts,
862 SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE,
863 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
865 SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc,
866 AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
867 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
870 int sec_register_to_crypto(void)
874 /* To avoid repeat register */
875 mutex_lock(&sec_algs_lock);
876 if (++sec_active_devs == 1)
877 ret = crypto_register_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
878 mutex_unlock(&sec_algs_lock);
883 void sec_unregister_from_crypto(void)
885 mutex_lock(&sec_algs_lock);
886 if (--sec_active_devs == 0)
887 crypto_unregister_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
888 mutex_unlock(&sec_algs_lock);