2 * algif_akcipher: User-space interface for asymmetric cipher algorithms
4 * Copyright (C) 2018 - 2020, Stephan Mueller <smueller@chronox.de>
6 * This file provides the user-space API for asymmetric ciphers.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
13 * The following concept of the memory management is used:
15 * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is
16 * filled by user space with the data submitted via sendpage/sendmsg. Filling
17 * up the TX SGL does not cause a crypto operation -- the data will only be
18 * tracked by the kernel. Upon receipt of one recvmsg call, the caller must
19 * provide a buffer which is tracked with the RX SGL.
21 * During the processing of the recvmsg operation, the cipher request is
22 * allocated and prepared. As part of the recvmsg operation, the processed
23 * TX buffers are extracted from the TX SGL into a separate SGL.
25 * After the completion of the crypto operation, the RX SGL and the cipher
26 * request is released. The extracted TX SGL parts are released together with
30 #include <crypto/akcipher.h>
31 #include <crypto/if_alg.h>
32 #include <crypto/scatterwalk.h>
33 #include <linux/init.h>
34 #include <linux/list.h>
35 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/net.h>
42 struct crypto_akcipher *akcipher;
46 static int akcipher_sendmsg(struct socket *sock, struct msghdr *msg,
49 return af_alg_sendmsg(sock, msg, size, 0);
52 static inline int akcipher_cipher_op(struct af_alg_ctx *ctx,
53 struct af_alg_async_req *areq)
57 return crypto_akcipher_encrypt(&areq->cra_u.akcipher_req);
59 return crypto_akcipher_decrypt(&areq->cra_u.akcipher_req);
61 return crypto_akcipher_sign(&areq->cra_u.akcipher_req);
63 return crypto_akcipher_verify(&areq->cra_u.akcipher_req);
69 static int _akcipher_recvmsg(struct socket *sock, struct msghdr *msg,
70 size_t ignored, int flags)
72 struct sock *sk = sock->sk;
73 struct alg_sock *ask = alg_sk(sk);
74 struct sock *psk = ask->parent;
75 struct alg_sock *pask = alg_sk(psk);
76 struct af_alg_ctx *ctx = ask->private;
77 struct akcipher_tfm *akc = pask->private;
78 struct crypto_akcipher *tfm = akc->akcipher;
79 struct af_alg_async_req *areq;
86 err = af_alg_wait_for_data(sk, flags, 0);
91 maxsize = crypto_akcipher_maxsize(tfm);
95 /* Allocate cipher request for current operation. */
96 areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) +
97 crypto_akcipher_reqsize(tfm));
101 /* convert iovecs of output buffers into RX SGL */
102 err = af_alg_get_rsgl(sk, msg, flags, areq, maxsize, &len);
106 /* ensure output buffer is sufficiently large */
113 * Create a per request TX SGL for this request which tracks the
114 * SG entries from the global TX SGL.
117 areq->tsgl_entries = af_alg_count_tsgl(sk, used, 0);
118 if (!areq->tsgl_entries)
119 areq->tsgl_entries = 1;
120 areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * areq->tsgl_entries,
126 sg_init_table(areq->tsgl, areq->tsgl_entries);
127 af_alg_pull_tsgl(sk, used, areq->tsgl, 0);
129 /* Initialize the crypto operation */
130 akcipher_request_set_tfm(&areq->cra_u.akcipher_req, tfm);
131 akcipher_request_set_crypt(&areq->cra_u.akcipher_req, areq->tsgl,
132 areq->first_rsgl.sgl.sg, used, len);
134 if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
137 areq->iocb = msg->msg_iocb;
139 /* Remember output size that will be generated. */
140 areq->outlen = areq->cra_u.akcipher_req.dst_len ?
141 areq->cra_u.akcipher_req.dst_len : len;
143 akcipher_request_set_callback(&areq->cra_u.akcipher_req,
144 CRYPTO_TFM_REQ_MAY_SLEEP,
145 af_alg_async_cb, areq);
146 err = akcipher_cipher_op(ctx, areq);
148 /* AIO operation in progress */
149 if (err == -EINPROGRESS || err == -EBUSY)
154 /* Synchronous operation */
155 akcipher_request_set_callback(&areq->cra_u.akcipher_req,
156 CRYPTO_TFM_REQ_MAY_SLEEP |
157 CRYPTO_TFM_REQ_MAY_BACKLOG,
160 err = crypto_wait_req(akcipher_cipher_op(ctx, areq),
165 af_alg_free_resources(areq);
167 return err ? err : areq->cra_u.akcipher_req.dst_len;
170 static int akcipher_recvmsg(struct socket *sock, struct msghdr *msg,
171 size_t ignored, int flags)
173 struct sock *sk = sock->sk;
174 struct alg_sock *ask = alg_sk(sk);
175 struct sock *psk = ask->parent;
176 struct alg_sock *pask = alg_sk(psk);
177 struct akcipher_tfm *akc = pask->private;
178 struct crypto_akcipher *tfm = akc->akcipher;
184 while (msg_data_left(msg)) {
185 err = _akcipher_recvmsg(sock, msg, ignored, flags);
188 * This error covers -EIOCBQUEUED which implies that we can
189 * only handle one AIO request. If the caller wants to have
190 * multiple AIO requests in parallel, he must make multiple
191 * separate AIO calls.
194 if (err == -EIOCBQUEUED || err == -EBADMSG || !ret)
202 * The caller must provide crypto_akcipher_maxsize per request.
203 * If he provides more, we conclude that multiple akcipher
204 * operations are requested.
206 iov_iter_advance(&msg->msg_iter,
207 crypto_akcipher_maxsize(tfm) - err);
211 af_alg_wmem_wakeup(sk);
216 static struct proto_ops algif_akcipher_ops = {
219 .connect = sock_no_connect,
220 .socketpair = sock_no_socketpair,
221 .getname = sock_no_getname,
222 .ioctl = sock_no_ioctl,
223 .listen = sock_no_listen,
224 .shutdown = sock_no_shutdown,
225 .mmap = sock_no_mmap,
226 .bind = sock_no_bind,
227 .accept = sock_no_accept,
229 .release = af_alg_release,
230 .sendmsg = akcipher_sendmsg,
231 .sendpage = af_alg_sendpage,
232 .recvmsg = akcipher_recvmsg,
236 static int akcipher_check_key(struct socket *sock)
239 struct alg_sock *pask;
240 struct akcipher_tfm *tfm;
241 struct sock *sk = sock->sk;
242 struct alg_sock *ask = alg_sk(sk);
246 if (!atomic_read(&ask->nokey_refcnt))
250 pask = alg_sk(ask->parent);
253 lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
259 atomic_dec(&pask->nokey_refcnt);
260 atomic_set(&ask->nokey_refcnt, 0);
272 static int akcipher_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
277 err = akcipher_check_key(sock);
281 return akcipher_sendmsg(sock, msg, size);
284 static ssize_t akcipher_sendpage_nokey(struct socket *sock, struct page *page,
285 int offset, size_t size, int flags)
289 err = akcipher_check_key(sock);
293 return af_alg_sendpage(sock, page, offset, size, flags);
296 static int akcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
297 size_t ignored, int flags)
301 err = akcipher_check_key(sock);
305 return akcipher_recvmsg(sock, msg, ignored, flags);
308 static struct proto_ops algif_akcipher_ops_nokey = {
311 .connect = sock_no_connect,
312 .socketpair = sock_no_socketpair,
313 .getname = sock_no_getname,
314 .ioctl = sock_no_ioctl,
315 .listen = sock_no_listen,
316 .shutdown = sock_no_shutdown,
317 .mmap = sock_no_mmap,
318 .bind = sock_no_bind,
319 .accept = sock_no_accept,
321 .release = af_alg_release,
322 .sendmsg = akcipher_sendmsg_nokey,
323 .sendpage = akcipher_sendpage_nokey,
324 .recvmsg = akcipher_recvmsg_nokey,
328 static void *akcipher_bind(const char *name, u32 type, u32 mask)
330 struct akcipher_tfm *tfm;
331 struct crypto_akcipher *akcipher;
333 tfm = kmalloc(sizeof(*tfm), GFP_KERNEL);
335 return ERR_PTR(-ENOMEM);
337 akcipher = crypto_alloc_akcipher(name, type, mask);
338 if (IS_ERR(akcipher)) {
340 return ERR_CAST(akcipher);
343 tfm->akcipher = akcipher;
344 tfm->has_key = false;
349 static void akcipher_release(void *private)
351 struct akcipher_tfm *tfm = private;
352 struct crypto_akcipher *akcipher = tfm->akcipher;
354 crypto_free_akcipher(akcipher);
358 static int akcipher_setprivkey(void *private, const u8 *key,
361 struct akcipher_tfm *tfm = private;
362 struct crypto_akcipher *akcipher = tfm->akcipher;
365 err = crypto_akcipher_set_priv_key(akcipher, key, keylen);
368 /* Return the maximum size of the akcipher operation. */
370 err = crypto_akcipher_maxsize(akcipher);
375 static int akcipher_setpubkey(void *private, const u8 *key, unsigned int keylen)
377 struct akcipher_tfm *tfm = private;
378 struct crypto_akcipher *akcipher = tfm->akcipher;
381 err = crypto_akcipher_set_pub_key(akcipher, key, keylen);
384 /* Return the maximum size of the akcipher operation. */
386 err = crypto_akcipher_maxsize(akcipher);
391 static void akcipher_sock_destruct(struct sock *sk)
393 struct alg_sock *ask = alg_sk(sk);
394 struct af_alg_ctx *ctx = ask->private;
396 af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
397 sock_kfree_s(sk, ctx, ctx->len);
398 af_alg_release_parent(sk);
401 static int akcipher_accept_parent_nokey(void *private, struct sock *sk)
403 struct af_alg_ctx *ctx;
404 struct alg_sock *ask = alg_sk(sk);
405 unsigned int len = sizeof(*ctx);
407 ctx = sock_kmalloc(sk, len, GFP_KERNEL);
411 INIT_LIST_HEAD(&ctx->tsgl_list);
414 atomic_set(&ctx->rcvused, 0);
418 crypto_init_wait(&ctx->wait);
422 sk->sk_destruct = akcipher_sock_destruct;
427 static int akcipher_accept_parent(void *private, struct sock *sk)
429 struct akcipher_tfm *tfm = private;
434 return akcipher_accept_parent_nokey(private, sk);
437 static const struct af_alg_type algif_type_akcipher = {
438 .bind = akcipher_bind,
439 .release = akcipher_release,
440 .setkey = akcipher_setprivkey,
441 .setpubkey = akcipher_setpubkey,
443 .accept = akcipher_accept_parent,
444 .accept_nokey = akcipher_accept_parent_nokey,
445 .ops = &algif_akcipher_ops,
446 .ops_nokey = &algif_akcipher_ops_nokey,
451 static int __init algif_akcipher_init(void)
453 return af_alg_register_type(&algif_type_akcipher);
456 static void __exit algif_akcipher_exit(void)
458 int err = af_alg_unregister_type(&algif_type_akcipher);
463 module_init(algif_akcipher_init);
464 module_exit(algif_akcipher_exit);
465 MODULE_LICENSE("GPL");
466 MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
467 MODULE_DESCRIPTION("Asymmetric kernel crypto API user space interface");