2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/module.h>
38 #include <crypto/aead.h>
42 static void trim_sg(struct sock *sk, struct scatterlist *sg,
43 int *sg_num_elem, unsigned int *sg_size, int target_size)
45 int i = *sg_num_elem - 1;
46 int trim = *sg_size - target_size;
53 *sg_size = target_size;
54 while (trim >= sg[i].length) {
56 sk_mem_uncharge(sk, sg[i].length);
57 put_page(sg_page(&sg[i]));
65 sk_mem_uncharge(sk, trim);
71 static void trim_both_sgl(struct sock *sk, int target_size)
73 struct tls_context *tls_ctx = tls_get_ctx(sk);
74 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
76 trim_sg(sk, ctx->sg_plaintext_data,
77 &ctx->sg_plaintext_num_elem,
78 &ctx->sg_plaintext_size,
82 target_size += tls_ctx->overhead_size;
84 trim_sg(sk, ctx->sg_encrypted_data,
85 &ctx->sg_encrypted_num_elem,
86 &ctx->sg_encrypted_size,
90 static int alloc_encrypted_sg(struct sock *sk, int len)
92 struct tls_context *tls_ctx = tls_get_ctx(sk);
93 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
96 rc = sk_alloc_sg(sk, len,
97 ctx->sg_encrypted_data,
98 &ctx->sg_encrypted_num_elem,
99 &ctx->sg_encrypted_size, 0);
104 static int alloc_plaintext_sg(struct sock *sk, int len)
106 struct tls_context *tls_ctx = tls_get_ctx(sk);
107 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
110 rc = sk_alloc_sg(sk, len, ctx->sg_plaintext_data,
111 &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size,
112 tls_ctx->pending_open_record_frags);
117 static void free_sg(struct sock *sk, struct scatterlist *sg,
118 int *sg_num_elem, unsigned int *sg_size)
120 int i, n = *sg_num_elem;
122 for (i = 0; i < n; ++i) {
123 sk_mem_uncharge(sk, sg[i].length);
124 put_page(sg_page(&sg[i]));
130 static void tls_free_both_sg(struct sock *sk)
132 struct tls_context *tls_ctx = tls_get_ctx(sk);
133 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
135 free_sg(sk, ctx->sg_encrypted_data, &ctx->sg_encrypted_num_elem,
136 &ctx->sg_encrypted_size);
138 free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
139 &ctx->sg_plaintext_size);
142 static int tls_do_encryption(struct tls_context *tls_ctx,
143 struct tls_sw_context *ctx, size_t data_len,
146 unsigned int req_size = sizeof(struct aead_request) +
147 crypto_aead_reqsize(ctx->aead_send);
148 struct aead_request *aead_req;
151 aead_req = kzalloc(req_size, flags);
155 ctx->sg_encrypted_data[0].offset += tls_ctx->prepend_size;
156 ctx->sg_encrypted_data[0].length -= tls_ctx->prepend_size;
158 aead_request_set_tfm(aead_req, ctx->aead_send);
159 aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE);
160 aead_request_set_crypt(aead_req, ctx->sg_aead_in, ctx->sg_aead_out,
161 data_len, tls_ctx->iv);
163 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
164 crypto_req_done, &ctx->async_wait);
166 rc = crypto_wait_req(crypto_aead_encrypt(aead_req), &ctx->async_wait);
168 ctx->sg_encrypted_data[0].offset -= tls_ctx->prepend_size;
169 ctx->sg_encrypted_data[0].length += tls_ctx->prepend_size;
175 static int tls_push_record(struct sock *sk, int flags,
176 unsigned char record_type)
178 struct tls_context *tls_ctx = tls_get_ctx(sk);
179 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
182 sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1);
183 sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1);
185 tls_make_aad(ctx->aad_space, ctx->sg_plaintext_size,
186 tls_ctx->rec_seq, tls_ctx->rec_seq_size,
189 tls_fill_prepend(tls_ctx,
190 page_address(sg_page(&ctx->sg_encrypted_data[0])) +
191 ctx->sg_encrypted_data[0].offset,
192 ctx->sg_plaintext_size, record_type);
194 tls_ctx->pending_open_record_frags = 0;
195 set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags);
197 rc = tls_do_encryption(tls_ctx, ctx, ctx->sg_plaintext_size,
200 /* If we are called from write_space and
201 * we fail, we need to set this SOCK_NOSPACE
202 * to trigger another write_space in the future.
204 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
208 free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem,
209 &ctx->sg_plaintext_size);
211 ctx->sg_encrypted_num_elem = 0;
212 ctx->sg_encrypted_size = 0;
214 /* Only pass through MSG_DONTWAIT and MSG_NOSIGNAL flags */
215 rc = tls_push_sg(sk, tls_ctx, ctx->sg_encrypted_data, 0, flags);
216 if (rc < 0 && rc != -EAGAIN)
219 tls_advance_record_sn(sk, tls_ctx);
223 static int tls_sw_push_pending_record(struct sock *sk, int flags)
225 return tls_push_record(sk, flags, TLS_RECORD_TYPE_DATA);
228 static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
231 struct tls_context *tls_ctx = tls_get_ctx(sk);
232 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
233 struct page *pages[MAX_SKB_FRAGS];
238 unsigned int size = ctx->sg_plaintext_size;
239 int num_elem = ctx->sg_plaintext_num_elem;
245 maxpages = ARRAY_SIZE(ctx->sg_plaintext_data) - num_elem;
250 copied = iov_iter_get_pages(from, pages,
258 iov_iter_advance(from, copied);
263 use = min_t(int, copied, PAGE_SIZE - offset);
265 sg_set_page(&ctx->sg_plaintext_data[num_elem],
266 pages[i], use, offset);
267 sg_unmark_end(&ctx->sg_plaintext_data[num_elem]);
268 sk_mem_charge(sk, use);
279 ctx->sg_plaintext_size = size;
280 ctx->sg_plaintext_num_elem = num_elem;
284 static int memcopy_from_iter(struct sock *sk, struct iov_iter *from,
287 struct tls_context *tls_ctx = tls_get_ctx(sk);
288 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
289 struct scatterlist *sg = ctx->sg_plaintext_data;
292 for (i = tls_ctx->pending_open_record_frags;
293 i < ctx->sg_plaintext_num_elem; ++i) {
296 page_address(sg_page(&sg[i])) + sg[i].offset,
297 copy, from) != copy) {
303 ++tls_ctx->pending_open_record_frags;
313 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
315 struct tls_context *tls_ctx = tls_get_ctx(sk);
316 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
319 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
320 bool eor = !(msg->msg_flags & MSG_MORE);
321 size_t try_to_copy, copied = 0;
322 unsigned char record_type = TLS_RECORD_TYPE_DATA;
327 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
332 if (tls_complete_pending_work(sk, tls_ctx, msg->msg_flags, &timeo))
335 if (unlikely(msg->msg_controllen)) {
336 ret = tls_proccess_cmsg(sk, msg, &record_type);
341 while (msg_data_left(msg)) {
347 orig_size = ctx->sg_plaintext_size;
349 try_to_copy = msg_data_left(msg);
350 record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size;
351 if (try_to_copy >= record_room) {
352 try_to_copy = record_room;
356 required_size = ctx->sg_plaintext_size + try_to_copy +
357 tls_ctx->overhead_size;
359 if (!sk_stream_memory_free(sk))
360 goto wait_for_sndbuf;
362 ret = alloc_encrypted_sg(sk, required_size);
365 goto wait_for_memory;
367 /* Adjust try_to_copy according to the amount that was
368 * actually allocated. The difference is due
369 * to max sg elements limit
371 try_to_copy -= required_size - ctx->sg_encrypted_size;
375 if (full_record || eor) {
376 ret = zerocopy_from_iter(sk, &msg->msg_iter,
379 goto fallback_to_reg_send;
381 copied += try_to_copy;
382 ret = tls_push_record(sk, msg->msg_flags, record_type);
388 copied -= try_to_copy;
389 fallback_to_reg_send:
390 iov_iter_revert(&msg->msg_iter,
391 ctx->sg_plaintext_size - orig_size);
392 trim_sg(sk, ctx->sg_plaintext_data,
393 &ctx->sg_plaintext_num_elem,
394 &ctx->sg_plaintext_size,
398 required_size = ctx->sg_plaintext_size + try_to_copy;
400 ret = alloc_plaintext_sg(sk, required_size);
403 goto wait_for_memory;
405 /* Adjust try_to_copy according to the amount that was
406 * actually allocated. The difference is due
407 * to max sg elements limit
409 try_to_copy -= required_size - ctx->sg_plaintext_size;
412 trim_sg(sk, ctx->sg_encrypted_data,
413 &ctx->sg_encrypted_num_elem,
414 &ctx->sg_encrypted_size,
415 ctx->sg_plaintext_size +
416 tls_ctx->overhead_size);
419 ret = memcopy_from_iter(sk, &msg->msg_iter, try_to_copy);
423 copied += try_to_copy;
424 if (full_record || eor) {
426 ret = tls_push_record(sk, msg->msg_flags, record_type);
429 goto wait_for_memory;
438 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
440 ret = sk_stream_wait_memory(sk, &timeo);
443 trim_both_sgl(sk, orig_size);
447 if (tls_is_pending_closed_record(tls_ctx))
450 if (ctx->sg_encrypted_size < required_size)
451 goto alloc_encrypted;
453 goto alloc_plaintext;
457 ret = sk_stream_error(sk, msg->msg_flags, ret);
460 return copied ? copied : ret;
463 int tls_sw_sendpage(struct sock *sk, struct page *page,
464 int offset, size_t size, int flags)
466 struct tls_context *tls_ctx = tls_get_ctx(sk);
467 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
469 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
471 size_t orig_size = size;
472 unsigned char record_type = TLS_RECORD_TYPE_DATA;
473 struct scatterlist *sg;
477 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
478 MSG_SENDPAGE_NOTLAST))
481 /* No MSG_EOR from splice, only look at MSG_MORE */
482 eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
486 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
488 if (tls_complete_pending_work(sk, tls_ctx, flags, &timeo))
491 /* Call the sk_stream functions to manage the sndbuf mem. */
493 size_t copy, required_size;
501 record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size;
503 if (copy >= record_room) {
507 required_size = ctx->sg_plaintext_size + copy +
508 tls_ctx->overhead_size;
510 if (!sk_stream_memory_free(sk))
511 goto wait_for_sndbuf;
513 ret = alloc_encrypted_sg(sk, required_size);
516 goto wait_for_memory;
518 /* Adjust copy according to the amount that was
519 * actually allocated. The difference is due
520 * to max sg elements limit
522 copy -= required_size - ctx->sg_plaintext_size;
527 sg = ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem;
528 sg_set_page(sg, page, copy, offset);
531 ctx->sg_plaintext_num_elem++;
533 sk_mem_charge(sk, copy);
536 ctx->sg_plaintext_size += copy;
537 tls_ctx->pending_open_record_frags = ctx->sg_plaintext_num_elem;
539 if (full_record || eor ||
540 ctx->sg_plaintext_num_elem ==
541 ARRAY_SIZE(ctx->sg_plaintext_data)) {
543 ret = tls_push_record(sk, flags, record_type);
546 goto wait_for_memory;
553 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
555 ret = sk_stream_wait_memory(sk, &timeo);
557 trim_both_sgl(sk, ctx->sg_plaintext_size);
561 if (tls_is_pending_closed_record(tls_ctx))
568 if (orig_size > size)
569 ret = orig_size - size;
571 ret = sk_stream_error(sk, flags, ret);
577 void tls_sw_free_tx_resources(struct sock *sk)
579 struct tls_context *tls_ctx = tls_get_ctx(sk);
580 struct tls_sw_context *ctx = tls_sw_ctx(tls_ctx);
583 crypto_free_aead(ctx->aead_send);
585 tls_free_both_sg(sk);
591 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx)
593 char keyval[TLS_CIPHER_AES_GCM_128_KEY_SIZE];
594 struct tls_crypto_info *crypto_info;
595 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
596 struct tls_sw_context *sw_ctx;
597 u16 nonce_size, tag_size, iv_size, rec_seq_size;
611 sw_ctx = kzalloc(sizeof(*sw_ctx), GFP_KERNEL);
617 crypto_init_wait(&sw_ctx->async_wait);
619 ctx->priv_ctx = (struct tls_offload_context *)sw_ctx;
621 crypto_info = &ctx->crypto_send;
622 switch (crypto_info->cipher_type) {
623 case TLS_CIPHER_AES_GCM_128: {
624 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
625 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
626 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
627 iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
628 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
630 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
632 (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
640 ctx->prepend_size = TLS_HEADER_SIZE + nonce_size;
641 ctx->tag_size = tag_size;
642 ctx->overhead_size = ctx->prepend_size + ctx->tag_size;
643 ctx->iv_size = iv_size;
644 ctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE, GFP_KERNEL);
649 memcpy(ctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
650 memcpy(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
651 ctx->rec_seq_size = rec_seq_size;
652 ctx->rec_seq = kmalloc(rec_seq_size, GFP_KERNEL);
657 memcpy(ctx->rec_seq, rec_seq, rec_seq_size);
659 sg_init_table(sw_ctx->sg_encrypted_data,
660 ARRAY_SIZE(sw_ctx->sg_encrypted_data));
661 sg_init_table(sw_ctx->sg_plaintext_data,
662 ARRAY_SIZE(sw_ctx->sg_plaintext_data));
664 sg_init_table(sw_ctx->sg_aead_in, 2);
665 sg_set_buf(&sw_ctx->sg_aead_in[0], sw_ctx->aad_space,
666 sizeof(sw_ctx->aad_space));
667 sg_unmark_end(&sw_ctx->sg_aead_in[1]);
668 sg_chain(sw_ctx->sg_aead_in, 2, sw_ctx->sg_plaintext_data);
669 sg_init_table(sw_ctx->sg_aead_out, 2);
670 sg_set_buf(&sw_ctx->sg_aead_out[0], sw_ctx->aad_space,
671 sizeof(sw_ctx->aad_space));
672 sg_unmark_end(&sw_ctx->sg_aead_out[1]);
673 sg_chain(sw_ctx->sg_aead_out, 2, sw_ctx->sg_encrypted_data);
675 if (!sw_ctx->aead_send) {
676 sw_ctx->aead_send = crypto_alloc_aead("gcm(aes)", 0, 0);
677 if (IS_ERR(sw_ctx->aead_send)) {
678 rc = PTR_ERR(sw_ctx->aead_send);
679 sw_ctx->aead_send = NULL;
684 ctx->push_pending_record = tls_sw_push_pending_record;
686 memcpy(keyval, gcm_128_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
688 rc = crypto_aead_setkey(sw_ctx->aead_send, keyval,
689 TLS_CIPHER_AES_GCM_128_KEY_SIZE);
693 rc = crypto_aead_setauthsize(sw_ctx->aead_send, ctx->tag_size);
698 crypto_free_aead(sw_ctx->aead_send);
699 sw_ctx->aead_send = NULL;
707 kfree(ctx->priv_ctx);
708 ctx->priv_ctx = NULL;