23c41d87d835ff77ac062d41148aed37f064367d
[platform/kernel/linux-starfive.git] / drivers / crypto / virtio / virtio_crypto_skcipher_algs.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2  /* Algorithms supported by virtio crypto device
3   *
4   * Authors: Gonglei <arei.gonglei@huawei.com>
5   *
6   * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
7   */
8
9 #include <crypto/engine.h>
10 #include <crypto/internal/skcipher.h>
11 #include <crypto/scatterwalk.h>
12 #include <linux/err.h>
13 #include <linux/scatterlist.h>
14 #include <uapi/linux/virtio_crypto.h>
15 #include "virtio_crypto_common.h"
16
17
18 struct virtio_crypto_skcipher_ctx {
19         struct virtio_crypto *vcrypto;
20         struct crypto_skcipher *tfm;
21
22         struct virtio_crypto_sym_session_info enc_sess_info;
23         struct virtio_crypto_sym_session_info dec_sess_info;
24 };
25
26 struct virtio_crypto_sym_request {
27         struct virtio_crypto_request base;
28
29         /* Cipher or aead */
30         uint32_t type;
31         struct virtio_crypto_skcipher_ctx *skcipher_ctx;
32         struct skcipher_request *skcipher_req;
33         uint8_t *iv;
34         /* Encryption? */
35         bool encrypt;
36 };
37
38 struct virtio_crypto_algo {
39         uint32_t algonum;
40         uint32_t service;
41         unsigned int active_devs;
42         struct skcipher_engine_alg algo;
43 };
44
45 /*
46  * The algs_lock protects the below global virtio_crypto_active_devs
47  * and crypto algorithms registion.
48  */
49 static DEFINE_MUTEX(algs_lock);
50 static void virtio_crypto_skcipher_finalize_req(
51         struct virtio_crypto_sym_request *vc_sym_req,
52         struct skcipher_request *req,
53         int err);
54
55 static void virtio_crypto_dataq_sym_callback
56                 (struct virtio_crypto_request *vc_req, int len)
57 {
58         struct virtio_crypto_sym_request *vc_sym_req =
59                 container_of(vc_req, struct virtio_crypto_sym_request, base);
60         struct skcipher_request *ablk_req;
61         int error;
62
63         /* Finish the encrypt or decrypt process */
64         if (vc_sym_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
65                 switch (vc_req->status) {
66                 case VIRTIO_CRYPTO_OK:
67                         error = 0;
68                         break;
69                 case VIRTIO_CRYPTO_INVSESS:
70                 case VIRTIO_CRYPTO_ERR:
71                         error = -EINVAL;
72                         break;
73                 case VIRTIO_CRYPTO_BADMSG:
74                         error = -EBADMSG;
75                         break;
76                 default:
77                         error = -EIO;
78                         break;
79                 }
80                 ablk_req = vc_sym_req->skcipher_req;
81                 virtio_crypto_skcipher_finalize_req(vc_sym_req,
82                                                         ablk_req, error);
83         }
84 }
85
86 static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg)
87 {
88         u64 total = 0;
89
90         for (total = 0; sg; sg = sg_next(sg))
91                 total += sg->length;
92
93         return total;
94 }
95
96 static int
97 virtio_crypto_alg_validate_key(int key_len, uint32_t *alg)
98 {
99         switch (key_len) {
100         case AES_KEYSIZE_128:
101         case AES_KEYSIZE_192:
102         case AES_KEYSIZE_256:
103                 *alg = VIRTIO_CRYPTO_CIPHER_AES_CBC;
104                 break;
105         default:
106                 return -EINVAL;
107         }
108         return 0;
109 }
110
111 static int virtio_crypto_alg_skcipher_init_session(
112                 struct virtio_crypto_skcipher_ctx *ctx,
113                 uint32_t alg, const uint8_t *key,
114                 unsigned int keylen,
115                 int encrypt)
116 {
117         struct scatterlist outhdr, key_sg, inhdr, *sgs[3];
118         struct virtio_crypto *vcrypto = ctx->vcrypto;
119         int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT;
120         int err;
121         unsigned int num_out = 0, num_in = 0;
122         struct virtio_crypto_op_ctrl_req *ctrl;
123         struct virtio_crypto_session_input *input;
124         struct virtio_crypto_sym_create_session_req *sym_create_session;
125         struct virtio_crypto_ctrl_request *vc_ctrl_req;
126
127         /*
128          * Avoid to do DMA from the stack, switch to using
129          * dynamically-allocated for the key
130          */
131         uint8_t *cipher_key = kmemdup(key, keylen, GFP_ATOMIC);
132
133         if (!cipher_key)
134                 return -ENOMEM;
135
136         vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
137         if (!vc_ctrl_req) {
138                 err = -ENOMEM;
139                 goto out;
140         }
141
142         /* Pad ctrl header */
143         ctrl = &vc_ctrl_req->ctrl;
144         ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
145         ctrl->header.algo = cpu_to_le32(alg);
146         /* Set the default dataqueue id to 0 */
147         ctrl->header.queue_id = 0;
148
149         input = &vc_ctrl_req->input;
150         input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
151         /* Pad cipher's parameters */
152         sym_create_session = &ctrl->u.sym_create_session;
153         sym_create_session->op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
154         sym_create_session->u.cipher.para.algo = ctrl->header.algo;
155         sym_create_session->u.cipher.para.keylen = cpu_to_le32(keylen);
156         sym_create_session->u.cipher.para.op = cpu_to_le32(op);
157
158         sg_init_one(&outhdr, ctrl, sizeof(*ctrl));
159         sgs[num_out++] = &outhdr;
160
161         /* Set key */
162         sg_init_one(&key_sg, cipher_key, keylen);
163         sgs[num_out++] = &key_sg;
164
165         /* Return status and session id back */
166         sg_init_one(&inhdr, input, sizeof(*input));
167         sgs[num_out + num_in++] = &inhdr;
168
169         err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
170         if (err < 0)
171                 goto out;
172
173         if (le32_to_cpu(input->status) != VIRTIO_CRYPTO_OK) {
174                 pr_err("virtio_crypto: Create session failed status: %u\n",
175                         le32_to_cpu(input->status));
176                 err = -EINVAL;
177                 goto out;
178         }
179
180         if (encrypt)
181                 ctx->enc_sess_info.session_id = le64_to_cpu(input->session_id);
182         else
183                 ctx->dec_sess_info.session_id = le64_to_cpu(input->session_id);
184
185         err = 0;
186 out:
187         kfree(vc_ctrl_req);
188         kfree_sensitive(cipher_key);
189         return err;
190 }
191
192 static int virtio_crypto_alg_skcipher_close_session(
193                 struct virtio_crypto_skcipher_ctx *ctx,
194                 int encrypt)
195 {
196         struct scatterlist outhdr, status_sg, *sgs[2];
197         struct virtio_crypto_destroy_session_req *destroy_session;
198         struct virtio_crypto *vcrypto = ctx->vcrypto;
199         int err;
200         unsigned int num_out = 0, num_in = 0;
201         struct virtio_crypto_op_ctrl_req *ctrl;
202         struct virtio_crypto_inhdr *ctrl_status;
203         struct virtio_crypto_ctrl_request *vc_ctrl_req;
204
205         vc_ctrl_req = kzalloc(sizeof(*vc_ctrl_req), GFP_KERNEL);
206         if (!vc_ctrl_req)
207                 return -ENOMEM;
208
209         ctrl_status = &vc_ctrl_req->ctrl_status;
210         ctrl_status->status = VIRTIO_CRYPTO_ERR;
211         /* Pad ctrl header */
212         ctrl = &vc_ctrl_req->ctrl;
213         ctrl->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
214         /* Set the default virtqueue id to 0 */
215         ctrl->header.queue_id = 0;
216
217         destroy_session = &ctrl->u.destroy_session;
218
219         if (encrypt)
220                 destroy_session->session_id = cpu_to_le64(ctx->enc_sess_info.session_id);
221         else
222                 destroy_session->session_id = cpu_to_le64(ctx->dec_sess_info.session_id);
223
224         sg_init_one(&outhdr, ctrl, sizeof(*ctrl));
225         sgs[num_out++] = &outhdr;
226
227         /* Return status and session id back */
228         sg_init_one(&status_sg, &ctrl_status->status, sizeof(ctrl_status->status));
229         sgs[num_out + num_in++] = &status_sg;
230
231         err = virtio_crypto_ctrl_vq_request(vcrypto, sgs, num_out, num_in, vc_ctrl_req);
232         if (err < 0)
233                 goto out;
234
235         if (ctrl_status->status != VIRTIO_CRYPTO_OK) {
236                 pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
237                         ctrl_status->status, destroy_session->session_id);
238
239                 err = -EINVAL;
240                 goto out;
241         }
242
243         err = 0;
244 out:
245         kfree(vc_ctrl_req);
246         return err;
247 }
248
249 static int virtio_crypto_alg_skcipher_init_sessions(
250                 struct virtio_crypto_skcipher_ctx *ctx,
251                 const uint8_t *key, unsigned int keylen)
252 {
253         uint32_t alg;
254         int ret;
255         struct virtio_crypto *vcrypto = ctx->vcrypto;
256
257         if (keylen > vcrypto->max_cipher_key_len) {
258                 pr_err("virtio_crypto: the key is too long\n");
259                 return -EINVAL;
260         }
261
262         if (virtio_crypto_alg_validate_key(keylen, &alg))
263                 return -EINVAL;
264
265         /* Create encryption session */
266         ret = virtio_crypto_alg_skcipher_init_session(ctx,
267                         alg, key, keylen, 1);
268         if (ret)
269                 return ret;
270         /* Create decryption session */
271         ret = virtio_crypto_alg_skcipher_init_session(ctx,
272                         alg, key, keylen, 0);
273         if (ret) {
274                 virtio_crypto_alg_skcipher_close_session(ctx, 1);
275                 return ret;
276         }
277         return 0;
278 }
279
280 /* Note: kernel crypto API realization */
281 static int virtio_crypto_skcipher_setkey(struct crypto_skcipher *tfm,
282                                          const uint8_t *key,
283                                          unsigned int keylen)
284 {
285         struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
286         uint32_t alg;
287         int ret;
288
289         ret = virtio_crypto_alg_validate_key(keylen, &alg);
290         if (ret)
291                 return ret;
292
293         if (!ctx->vcrypto) {
294                 /* New key */
295                 int node = virtio_crypto_get_current_node();
296                 struct virtio_crypto *vcrypto =
297                                       virtcrypto_get_dev_node(node,
298                                       VIRTIO_CRYPTO_SERVICE_CIPHER, alg);
299                 if (!vcrypto) {
300                         pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
301                         return -ENODEV;
302                 }
303
304                 ctx->vcrypto = vcrypto;
305         } else {
306                 /* Rekeying, we should close the created sessions previously */
307                 virtio_crypto_alg_skcipher_close_session(ctx, 1);
308                 virtio_crypto_alg_skcipher_close_session(ctx, 0);
309         }
310
311         ret = virtio_crypto_alg_skcipher_init_sessions(ctx, key, keylen);
312         if (ret) {
313                 virtcrypto_dev_put(ctx->vcrypto);
314                 ctx->vcrypto = NULL;
315
316                 return ret;
317         }
318
319         return 0;
320 }
321
322 static int
323 __virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
324                 struct skcipher_request *req,
325                 struct data_queue *data_vq)
326 {
327         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
328         struct virtio_crypto_skcipher_ctx *ctx = vc_sym_req->skcipher_ctx;
329         struct virtio_crypto_request *vc_req = &vc_sym_req->base;
330         unsigned int ivsize = crypto_skcipher_ivsize(tfm);
331         struct virtio_crypto *vcrypto = ctx->vcrypto;
332         struct virtio_crypto_op_data_req *req_data;
333         int src_nents, dst_nents;
334         int err;
335         unsigned long flags;
336         struct scatterlist outhdr, iv_sg, status_sg, **sgs;
337         u64 dst_len;
338         unsigned int num_out = 0, num_in = 0;
339         int sg_total;
340         uint8_t *iv;
341         struct scatterlist *sg;
342
343         src_nents = sg_nents_for_len(req->src, req->cryptlen);
344         if (src_nents < 0) {
345                 pr_err("Invalid number of src SG.\n");
346                 return src_nents;
347         }
348
349         dst_nents = sg_nents(req->dst);
350
351         pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
352                         src_nents, dst_nents);
353
354         /* Why 3?  outhdr + iv + inhdr */
355         sg_total = src_nents + dst_nents + 3;
356         sgs = kcalloc_node(sg_total, sizeof(*sgs), GFP_KERNEL,
357                                 dev_to_node(&vcrypto->vdev->dev));
358         if (!sgs)
359                 return -ENOMEM;
360
361         req_data = kzalloc_node(sizeof(*req_data), GFP_KERNEL,
362                                 dev_to_node(&vcrypto->vdev->dev));
363         if (!req_data) {
364                 kfree(sgs);
365                 return -ENOMEM;
366         }
367
368         vc_req->req_data = req_data;
369         vc_sym_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
370         /* Head of operation */
371         if (vc_sym_req->encrypt) {
372                 req_data->header.session_id =
373                         cpu_to_le64(ctx->enc_sess_info.session_id);
374                 req_data->header.opcode =
375                         cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT);
376         } else {
377                 req_data->header.session_id =
378                         cpu_to_le64(ctx->dec_sess_info.session_id);
379                 req_data->header.opcode =
380                         cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT);
381         }
382         req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
383         req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize);
384         req_data->u.sym_req.u.cipher.para.src_data_len =
385                         cpu_to_le32(req->cryptlen);
386
387         dst_len = virtio_crypto_alg_sg_nents_length(req->dst);
388         if (unlikely(dst_len > U32_MAX)) {
389                 pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
390                 err = -EINVAL;
391                 goto free;
392         }
393
394         dst_len = min_t(unsigned int, req->cryptlen, dst_len);
395         pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
396                         req->cryptlen, dst_len);
397
398         if (unlikely(req->cryptlen + dst_len + ivsize +
399                 sizeof(vc_req->status) > vcrypto->max_size)) {
400                 pr_err("virtio_crypto: The length is too big\n");
401                 err = -EINVAL;
402                 goto free;
403         }
404
405         req_data->u.sym_req.u.cipher.para.dst_data_len =
406                         cpu_to_le32((uint32_t)dst_len);
407
408         /* Outhdr */
409         sg_init_one(&outhdr, req_data, sizeof(*req_data));
410         sgs[num_out++] = &outhdr;
411
412         /* IV */
413
414         /*
415          * Avoid to do DMA from the stack, switch to using
416          * dynamically-allocated for the IV
417          */
418         iv = kzalloc_node(ivsize, GFP_ATOMIC,
419                                 dev_to_node(&vcrypto->vdev->dev));
420         if (!iv) {
421                 err = -ENOMEM;
422                 goto free;
423         }
424         memcpy(iv, req->iv, ivsize);
425         if (!vc_sym_req->encrypt)
426                 scatterwalk_map_and_copy(req->iv, req->src,
427                                          req->cryptlen - AES_BLOCK_SIZE,
428                                          AES_BLOCK_SIZE, 0);
429
430         sg_init_one(&iv_sg, iv, ivsize);
431         sgs[num_out++] = &iv_sg;
432         vc_sym_req->iv = iv;
433
434         /* Source data */
435         for (sg = req->src; src_nents; sg = sg_next(sg), src_nents--)
436                 sgs[num_out++] = sg;
437
438         /* Destination data */
439         for (sg = req->dst; sg; sg = sg_next(sg))
440                 sgs[num_out + num_in++] = sg;
441
442         /* Status */
443         sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
444         sgs[num_out + num_in++] = &status_sg;
445
446         vc_req->sgs = sgs;
447
448         spin_lock_irqsave(&data_vq->lock, flags);
449         err = virtqueue_add_sgs(data_vq->vq, sgs, num_out,
450                                 num_in, vc_req, GFP_ATOMIC);
451         virtqueue_kick(data_vq->vq);
452         spin_unlock_irqrestore(&data_vq->lock, flags);
453         if (unlikely(err < 0))
454                 goto free_iv;
455
456         return 0;
457
458 free_iv:
459         kfree_sensitive(iv);
460 free:
461         kfree_sensitive(req_data);
462         kfree(sgs);
463         return err;
464 }
465
466 static int virtio_crypto_skcipher_encrypt(struct skcipher_request *req)
467 {
468         struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req);
469         struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(atfm);
470         struct virtio_crypto_sym_request *vc_sym_req =
471                                 skcipher_request_ctx(req);
472         struct virtio_crypto_request *vc_req = &vc_sym_req->base;
473         struct virtio_crypto *vcrypto = ctx->vcrypto;
474         /* Use the first data virtqueue as default */
475         struct data_queue *data_vq = &vcrypto->data_vq[0];
476
477         if (!req->cryptlen)
478                 return 0;
479         if (req->cryptlen % AES_BLOCK_SIZE)
480                 return -EINVAL;
481
482         vc_req->dataq = data_vq;
483         vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
484         vc_sym_req->skcipher_ctx = ctx;
485         vc_sym_req->skcipher_req = req;
486         vc_sym_req->encrypt = true;
487
488         return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
489 }
490
491 static int virtio_crypto_skcipher_decrypt(struct skcipher_request *req)
492 {
493         struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req);
494         struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(atfm);
495         struct virtio_crypto_sym_request *vc_sym_req =
496                                 skcipher_request_ctx(req);
497         struct virtio_crypto_request *vc_req = &vc_sym_req->base;
498         struct virtio_crypto *vcrypto = ctx->vcrypto;
499         /* Use the first data virtqueue as default */
500         struct data_queue *data_vq = &vcrypto->data_vq[0];
501
502         if (!req->cryptlen)
503                 return 0;
504         if (req->cryptlen % AES_BLOCK_SIZE)
505                 return -EINVAL;
506
507         vc_req->dataq = data_vq;
508         vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
509         vc_sym_req->skcipher_ctx = ctx;
510         vc_sym_req->skcipher_req = req;
511         vc_sym_req->encrypt = false;
512
513         return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
514 }
515
516 static int virtio_crypto_skcipher_init(struct crypto_skcipher *tfm)
517 {
518         struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
519
520         crypto_skcipher_set_reqsize(tfm, sizeof(struct virtio_crypto_sym_request));
521         ctx->tfm = tfm;
522
523         return 0;
524 }
525
526 static void virtio_crypto_skcipher_exit(struct crypto_skcipher *tfm)
527 {
528         struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
529
530         if (!ctx->vcrypto)
531                 return;
532
533         virtio_crypto_alg_skcipher_close_session(ctx, 1);
534         virtio_crypto_alg_skcipher_close_session(ctx, 0);
535         virtcrypto_dev_put(ctx->vcrypto);
536         ctx->vcrypto = NULL;
537 }
538
539 int virtio_crypto_skcipher_crypt_req(
540         struct crypto_engine *engine, void *vreq)
541 {
542         struct skcipher_request *req = container_of(vreq, struct skcipher_request, base);
543         struct virtio_crypto_sym_request *vc_sym_req =
544                                 skcipher_request_ctx(req);
545         struct virtio_crypto_request *vc_req = &vc_sym_req->base;
546         struct data_queue *data_vq = vc_req->dataq;
547         int ret;
548
549         ret = __virtio_crypto_skcipher_do_req(vc_sym_req, req, data_vq);
550         if (ret < 0)
551                 return ret;
552
553         virtqueue_kick(data_vq->vq);
554
555         return 0;
556 }
557
558 static void virtio_crypto_skcipher_finalize_req(
559         struct virtio_crypto_sym_request *vc_sym_req,
560         struct skcipher_request *req,
561         int err)
562 {
563         if (vc_sym_req->encrypt)
564                 scatterwalk_map_and_copy(req->iv, req->dst,
565                                          req->cryptlen - AES_BLOCK_SIZE,
566                                          AES_BLOCK_SIZE, 0);
567         kfree_sensitive(vc_sym_req->iv);
568         virtcrypto_clear_request(&vc_sym_req->base);
569
570         crypto_finalize_skcipher_request(vc_sym_req->base.dataq->engine,
571                                            req, err);
572 }
573
574 static struct virtio_crypto_algo virtio_crypto_algs[] = { {
575         .algonum = VIRTIO_CRYPTO_CIPHER_AES_CBC,
576         .service = VIRTIO_CRYPTO_SERVICE_CIPHER,
577         .algo.base = {
578                 .base.cra_name          = "cbc(aes)",
579                 .base.cra_driver_name   = "virtio_crypto_aes_cbc",
580                 .base.cra_priority      = 150,
581                 .base.cra_flags         = CRYPTO_ALG_ASYNC |
582                                           CRYPTO_ALG_ALLOCATES_MEMORY,
583                 .base.cra_blocksize     = AES_BLOCK_SIZE,
584                 .base.cra_ctxsize       = sizeof(struct virtio_crypto_skcipher_ctx),
585                 .base.cra_module        = THIS_MODULE,
586                 .init                   = virtio_crypto_skcipher_init,
587                 .exit                   = virtio_crypto_skcipher_exit,
588                 .setkey                 = virtio_crypto_skcipher_setkey,
589                 .decrypt                = virtio_crypto_skcipher_decrypt,
590                 .encrypt                = virtio_crypto_skcipher_encrypt,
591                 .min_keysize            = AES_MIN_KEY_SIZE,
592                 .max_keysize            = AES_MAX_KEY_SIZE,
593                 .ivsize                 = AES_BLOCK_SIZE,
594         },
595         .algo.op = {
596                 .do_one_request = virtio_crypto_skcipher_crypt_req,
597         },
598 } };
599
600 int virtio_crypto_skcipher_algs_register(struct virtio_crypto *vcrypto)
601 {
602         int ret = 0;
603         int i = 0;
604
605         mutex_lock(&algs_lock);
606
607         for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
608
609                 uint32_t service = virtio_crypto_algs[i].service;
610                 uint32_t algonum = virtio_crypto_algs[i].algonum;
611
612                 if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
613                         continue;
614
615                 if (virtio_crypto_algs[i].active_devs == 0) {
616                         ret = crypto_engine_register_skcipher(&virtio_crypto_algs[i].algo);
617                         if (ret)
618                                 goto unlock;
619                 }
620
621                 virtio_crypto_algs[i].active_devs++;
622                 dev_info(&vcrypto->vdev->dev, "Registered algo %s\n",
623                          virtio_crypto_algs[i].algo.base.base.cra_name);
624         }
625
626 unlock:
627         mutex_unlock(&algs_lock);
628         return ret;
629 }
630
631 void virtio_crypto_skcipher_algs_unregister(struct virtio_crypto *vcrypto)
632 {
633         int i = 0;
634
635         mutex_lock(&algs_lock);
636
637         for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
638
639                 uint32_t service = virtio_crypto_algs[i].service;
640                 uint32_t algonum = virtio_crypto_algs[i].algonum;
641
642                 if (virtio_crypto_algs[i].active_devs == 0 ||
643                     !virtcrypto_algo_is_supported(vcrypto, service, algonum))
644                         continue;
645
646                 if (virtio_crypto_algs[i].active_devs == 1)
647                         crypto_engine_unregister_skcipher(&virtio_crypto_algs[i].algo);
648
649                 virtio_crypto_algs[i].active_devs--;
650         }
651
652         mutex_unlock(&algs_lock);
653 }