2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7 * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #include <linux/bug.h>
39 #include <linux/sched/signal.h>
40 #include <linux/module.h>
41 #include <linux/kernel.h>
42 #include <linux/splice.h>
43 #include <crypto/aead.h>
45 #include <net/strparser.h>
47 #include <trace/events/sock.h>
51 struct tls_decrypt_arg {
61 struct tls_decrypt_ctx {
64 u8 aad[TLS_MAX_AAD_SIZE];
66 struct scatterlist sg[];
69 noinline void tls_err_abort(struct sock *sk, int err)
71 WARN_ON_ONCE(err >= 0);
72 /* sk->sk_err should contain a positive error code. */
73 WRITE_ONCE(sk->sk_err, -err);
74 /* Paired with smp_rmb() in tcp_poll() */
79 static int __skb_nsg(struct sk_buff *skb, int offset, int len,
80 unsigned int recursion_level)
82 int start = skb_headlen(skb);
83 int i, chunk = start - offset;
84 struct sk_buff *frag_iter;
87 if (unlikely(recursion_level >= 24))
100 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
103 WARN_ON(start > offset + len);
105 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
106 chunk = end - offset;
119 if (unlikely(skb_has_frag_list(skb))) {
120 skb_walk_frags(skb, frag_iter) {
123 WARN_ON(start > offset + len);
125 end = start + frag_iter->len;
126 chunk = end - offset;
130 ret = __skb_nsg(frag_iter, offset - start, chunk,
131 recursion_level + 1);
132 if (unlikely(ret < 0))
147 /* Return the number of scatterlist elements required to completely map the
148 * skb, or -EMSGSIZE if the recursion depth is exceeded.
150 static int skb_nsg(struct sk_buff *skb, int offset, int len)
152 return __skb_nsg(skb, offset, len, 0);
155 static int tls_padding_length(struct tls_prot_info *prot, struct sk_buff *skb,
156 struct tls_decrypt_arg *darg)
158 struct strp_msg *rxm = strp_msg(skb);
159 struct tls_msg *tlm = tls_msg(skb);
162 /* Determine zero-padding length */
163 if (prot->version == TLS_1_3_VERSION) {
164 int offset = rxm->full_len - TLS_TAG_SIZE - 1;
165 char content_type = darg->zc ? darg->tail : 0;
168 while (content_type == 0) {
169 if (offset < prot->prepend_size)
171 err = skb_copy_bits(skb, rxm->offset + offset,
180 tlm->control = content_type;
185 static void tls_decrypt_done(void *data, int err)
187 struct aead_request *aead_req = data;
188 struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
189 struct scatterlist *sgout = aead_req->dst;
190 struct scatterlist *sgin = aead_req->src;
191 struct tls_sw_context_rx *ctx;
192 struct tls_decrypt_ctx *dctx;
193 struct tls_context *tls_ctx;
194 struct scatterlist *sg;
199 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(aead);
200 aead_size = ALIGN(aead_size, __alignof__(*dctx));
201 dctx = (void *)((u8 *)aead_req + aead_size);
204 tls_ctx = tls_get_ctx(sk);
205 ctx = tls_sw_ctx_rx(tls_ctx);
207 /* Propagate if there was an err */
210 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
211 ctx->async_wait.err = err;
212 tls_err_abort(sk, err);
215 /* Free the destination pages if skb was not decrypted inplace */
217 /* Skip the first S/G entry as it points to AAD */
218 for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
221 put_page(sg_page(sg));
227 spin_lock_bh(&ctx->decrypt_compl_lock);
228 if (!atomic_dec_return(&ctx->decrypt_pending))
229 complete(&ctx->async_wait.completion);
230 spin_unlock_bh(&ctx->decrypt_compl_lock);
233 static int tls_do_decryption(struct sock *sk,
234 struct scatterlist *sgin,
235 struct scatterlist *sgout,
238 struct aead_request *aead_req,
239 struct tls_decrypt_arg *darg)
241 struct tls_context *tls_ctx = tls_get_ctx(sk);
242 struct tls_prot_info *prot = &tls_ctx->prot_info;
243 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
246 aead_request_set_tfm(aead_req, ctx->aead_recv);
247 aead_request_set_ad(aead_req, prot->aad_size);
248 aead_request_set_crypt(aead_req, sgin, sgout,
249 data_len + prot->tag_size,
253 aead_request_set_callback(aead_req,
254 CRYPTO_TFM_REQ_MAY_BACKLOG,
255 tls_decrypt_done, aead_req);
256 atomic_inc(&ctx->decrypt_pending);
258 aead_request_set_callback(aead_req,
259 CRYPTO_TFM_REQ_MAY_BACKLOG,
260 crypto_req_done, &ctx->async_wait);
263 ret = crypto_aead_decrypt(aead_req);
264 if (ret == -EINPROGRESS) {
268 ret = crypto_wait_req(ret, &ctx->async_wait);
275 static void tls_trim_both_msgs(struct sock *sk, int target_size)
277 struct tls_context *tls_ctx = tls_get_ctx(sk);
278 struct tls_prot_info *prot = &tls_ctx->prot_info;
279 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
280 struct tls_rec *rec = ctx->open_rec;
282 sk_msg_trim(sk, &rec->msg_plaintext, target_size);
284 target_size += prot->overhead_size;
285 sk_msg_trim(sk, &rec->msg_encrypted, target_size);
288 static int tls_alloc_encrypted_msg(struct sock *sk, int len)
290 struct tls_context *tls_ctx = tls_get_ctx(sk);
291 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
292 struct tls_rec *rec = ctx->open_rec;
293 struct sk_msg *msg_en = &rec->msg_encrypted;
295 return sk_msg_alloc(sk, msg_en, len, 0);
298 static int tls_clone_plaintext_msg(struct sock *sk, int required)
300 struct tls_context *tls_ctx = tls_get_ctx(sk);
301 struct tls_prot_info *prot = &tls_ctx->prot_info;
302 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
303 struct tls_rec *rec = ctx->open_rec;
304 struct sk_msg *msg_pl = &rec->msg_plaintext;
305 struct sk_msg *msg_en = &rec->msg_encrypted;
308 /* We add page references worth len bytes from encrypted sg
309 * at the end of plaintext sg. It is guaranteed that msg_en
310 * has enough required room (ensured by caller).
312 len = required - msg_pl->sg.size;
314 /* Skip initial bytes in msg_en's data to be able to use
315 * same offset of both plain and encrypted data.
317 skip = prot->prepend_size + msg_pl->sg.size;
319 return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
322 static struct tls_rec *tls_get_rec(struct sock *sk)
324 struct tls_context *tls_ctx = tls_get_ctx(sk);
325 struct tls_prot_info *prot = &tls_ctx->prot_info;
326 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
327 struct sk_msg *msg_pl, *msg_en;
331 mem_size = sizeof(struct tls_rec) + crypto_aead_reqsize(ctx->aead_send);
333 rec = kzalloc(mem_size, sk->sk_allocation);
337 msg_pl = &rec->msg_plaintext;
338 msg_en = &rec->msg_encrypted;
343 sg_init_table(rec->sg_aead_in, 2);
344 sg_set_buf(&rec->sg_aead_in[0], rec->aad_space, prot->aad_size);
345 sg_unmark_end(&rec->sg_aead_in[1]);
347 sg_init_table(rec->sg_aead_out, 2);
348 sg_set_buf(&rec->sg_aead_out[0], rec->aad_space, prot->aad_size);
349 sg_unmark_end(&rec->sg_aead_out[1]);
356 static void tls_free_rec(struct sock *sk, struct tls_rec *rec)
358 sk_msg_free(sk, &rec->msg_encrypted);
359 sk_msg_free(sk, &rec->msg_plaintext);
363 static void tls_free_open_rec(struct sock *sk)
365 struct tls_context *tls_ctx = tls_get_ctx(sk);
366 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
367 struct tls_rec *rec = ctx->open_rec;
370 tls_free_rec(sk, rec);
371 ctx->open_rec = NULL;
375 int tls_tx_records(struct sock *sk, int flags)
377 struct tls_context *tls_ctx = tls_get_ctx(sk);
378 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
379 struct tls_rec *rec, *tmp;
380 struct sk_msg *msg_en;
381 int tx_flags, rc = 0;
383 if (tls_is_partially_sent_record(tls_ctx)) {
384 rec = list_first_entry(&ctx->tx_list,
385 struct tls_rec, list);
388 tx_flags = rec->tx_flags;
392 rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
396 /* Full record has been transmitted.
397 * Remove the head of tx_list
399 list_del(&rec->list);
400 sk_msg_free(sk, &rec->msg_plaintext);
404 /* Tx all ready records */
405 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
406 if (READ_ONCE(rec->tx_ready)) {
408 tx_flags = rec->tx_flags;
412 msg_en = &rec->msg_encrypted;
413 rc = tls_push_sg(sk, tls_ctx,
414 &msg_en->sg.data[msg_en->sg.curr],
419 list_del(&rec->list);
420 sk_msg_free(sk, &rec->msg_plaintext);
428 if (rc < 0 && rc != -EAGAIN)
429 tls_err_abort(sk, -EBADMSG);
434 static void tls_encrypt_done(void *data, int err)
436 struct tls_sw_context_tx *ctx;
437 struct tls_context *tls_ctx;
438 struct tls_prot_info *prot;
439 struct tls_rec *rec = data;
440 struct scatterlist *sge;
441 struct sk_msg *msg_en;
446 msg_en = &rec->msg_encrypted;
449 tls_ctx = tls_get_ctx(sk);
450 prot = &tls_ctx->prot_info;
451 ctx = tls_sw_ctx_tx(tls_ctx);
453 sge = sk_msg_elem(msg_en, msg_en->sg.curr);
454 sge->offset -= prot->prepend_size;
455 sge->length += prot->prepend_size;
457 /* Check if error is previously set on socket */
458 if (err || sk->sk_err) {
461 /* If err is already set on socket, return the same code */
463 ctx->async_wait.err = -sk->sk_err;
465 ctx->async_wait.err = err;
466 tls_err_abort(sk, err);
471 struct tls_rec *first_rec;
473 /* Mark the record as ready for transmission */
474 smp_store_mb(rec->tx_ready, true);
476 /* If received record is at head of tx_list, schedule tx */
477 first_rec = list_first_entry(&ctx->tx_list,
478 struct tls_rec, list);
479 if (rec == first_rec)
483 spin_lock_bh(&ctx->encrypt_compl_lock);
484 pending = atomic_dec_return(&ctx->encrypt_pending);
486 if (!pending && ctx->async_notify)
487 complete(&ctx->async_wait.completion);
488 spin_unlock_bh(&ctx->encrypt_compl_lock);
493 /* Schedule the transmission */
494 if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
495 schedule_delayed_work(&ctx->tx_work.work, 1);
498 static int tls_do_encryption(struct sock *sk,
499 struct tls_context *tls_ctx,
500 struct tls_sw_context_tx *ctx,
501 struct aead_request *aead_req,
502 size_t data_len, u32 start)
504 struct tls_prot_info *prot = &tls_ctx->prot_info;
505 struct tls_rec *rec = ctx->open_rec;
506 struct sk_msg *msg_en = &rec->msg_encrypted;
507 struct scatterlist *sge = sk_msg_elem(msg_en, start);
508 int rc, iv_offset = 0;
510 /* For CCM based ciphers, first byte of IV is a constant */
511 switch (prot->cipher_type) {
512 case TLS_CIPHER_AES_CCM_128:
513 rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
516 case TLS_CIPHER_SM4_CCM:
517 rec->iv_data[0] = TLS_SM4_CCM_IV_B0_BYTE;
522 memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
523 prot->iv_size + prot->salt_size);
525 tls_xor_iv_with_seq(prot, rec->iv_data + iv_offset,
526 tls_ctx->tx.rec_seq);
528 sge->offset += prot->prepend_size;
529 sge->length -= prot->prepend_size;
531 msg_en->sg.curr = start;
533 aead_request_set_tfm(aead_req, ctx->aead_send);
534 aead_request_set_ad(aead_req, prot->aad_size);
535 aead_request_set_crypt(aead_req, rec->sg_aead_in,
537 data_len, rec->iv_data);
539 aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
540 tls_encrypt_done, rec);
542 /* Add the record in tx_list */
543 list_add_tail((struct list_head *)&rec->list, &ctx->tx_list);
544 atomic_inc(&ctx->encrypt_pending);
546 rc = crypto_aead_encrypt(aead_req);
547 if (!rc || rc != -EINPROGRESS) {
548 atomic_dec(&ctx->encrypt_pending);
549 sge->offset -= prot->prepend_size;
550 sge->length += prot->prepend_size;
554 WRITE_ONCE(rec->tx_ready, true);
555 } else if (rc != -EINPROGRESS) {
556 list_del(&rec->list);
560 /* Unhook the record from context if encryption is not failure */
561 ctx->open_rec = NULL;
562 tls_advance_record_sn(sk, prot, &tls_ctx->tx);
566 static int tls_split_open_record(struct sock *sk, struct tls_rec *from,
567 struct tls_rec **to, struct sk_msg *msg_opl,
568 struct sk_msg *msg_oen, u32 split_point,
569 u32 tx_overhead_size, u32 *orig_end)
571 u32 i, j, bytes = 0, apply = msg_opl->apply_bytes;
572 struct scatterlist *sge, *osge, *nsge;
573 u32 orig_size = msg_opl->sg.size;
574 struct scatterlist tmp = { };
575 struct sk_msg *msg_npl;
579 new = tls_get_rec(sk);
582 ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size +
583 tx_overhead_size, 0);
585 tls_free_rec(sk, new);
589 *orig_end = msg_opl->sg.end;
590 i = msg_opl->sg.start;
591 sge = sk_msg_elem(msg_opl, i);
592 while (apply && sge->length) {
593 if (sge->length > apply) {
594 u32 len = sge->length - apply;
596 get_page(sg_page(sge));
597 sg_set_page(&tmp, sg_page(sge), len,
598 sge->offset + apply);
603 apply -= sge->length;
604 bytes += sge->length;
607 sk_msg_iter_var_next(i);
608 if (i == msg_opl->sg.end)
610 sge = sk_msg_elem(msg_opl, i);
614 msg_opl->sg.curr = i;
615 msg_opl->sg.copybreak = 0;
616 msg_opl->apply_bytes = 0;
617 msg_opl->sg.size = bytes;
619 msg_npl = &new->msg_plaintext;
620 msg_npl->apply_bytes = apply;
621 msg_npl->sg.size = orig_size - bytes;
623 j = msg_npl->sg.start;
624 nsge = sk_msg_elem(msg_npl, j);
626 memcpy(nsge, &tmp, sizeof(*nsge));
627 sk_msg_iter_var_next(j);
628 nsge = sk_msg_elem(msg_npl, j);
631 osge = sk_msg_elem(msg_opl, i);
632 while (osge->length) {
633 memcpy(nsge, osge, sizeof(*nsge));
635 sk_msg_iter_var_next(i);
636 sk_msg_iter_var_next(j);
639 osge = sk_msg_elem(msg_opl, i);
640 nsge = sk_msg_elem(msg_npl, j);
644 msg_npl->sg.curr = j;
645 msg_npl->sg.copybreak = 0;
651 static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
652 struct tls_rec *from, u32 orig_end)
654 struct sk_msg *msg_npl = &from->msg_plaintext;
655 struct sk_msg *msg_opl = &to->msg_plaintext;
656 struct scatterlist *osge, *nsge;
660 sk_msg_iter_var_prev(i);
661 j = msg_npl->sg.start;
663 osge = sk_msg_elem(msg_opl, i);
664 nsge = sk_msg_elem(msg_npl, j);
666 if (sg_page(osge) == sg_page(nsge) &&
667 osge->offset + osge->length == nsge->offset) {
668 osge->length += nsge->length;
669 put_page(sg_page(nsge));
672 msg_opl->sg.end = orig_end;
673 msg_opl->sg.curr = orig_end;
674 msg_opl->sg.copybreak = 0;
675 msg_opl->apply_bytes = msg_opl->sg.size + msg_npl->sg.size;
676 msg_opl->sg.size += msg_npl->sg.size;
678 sk_msg_free(sk, &to->msg_encrypted);
679 sk_msg_xfer_full(&to->msg_encrypted, &from->msg_encrypted);
684 static int tls_push_record(struct sock *sk, int flags,
685 unsigned char record_type)
687 struct tls_context *tls_ctx = tls_get_ctx(sk);
688 struct tls_prot_info *prot = &tls_ctx->prot_info;
689 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
690 struct tls_rec *rec = ctx->open_rec, *tmp = NULL;
691 u32 i, split_point, orig_end;
692 struct sk_msg *msg_pl, *msg_en;
693 struct aead_request *req;
700 msg_pl = &rec->msg_plaintext;
701 msg_en = &rec->msg_encrypted;
703 split_point = msg_pl->apply_bytes;
704 split = split_point && split_point < msg_pl->sg.size;
705 if (unlikely((!split &&
707 prot->overhead_size > msg_en->sg.size) ||
710 prot->overhead_size > msg_en->sg.size))) {
712 split_point = msg_en->sg.size;
715 rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
716 split_point, prot->overhead_size,
720 /* This can happen if above tls_split_open_record allocates
721 * a single large encryption buffer instead of two smaller
722 * ones. In this case adjust pointers and continue without
725 if (!msg_pl->sg.size) {
726 tls_merge_open_record(sk, rec, tmp, orig_end);
727 msg_pl = &rec->msg_plaintext;
728 msg_en = &rec->msg_encrypted;
731 sk_msg_trim(sk, msg_en, msg_pl->sg.size +
732 prot->overhead_size);
735 rec->tx_flags = flags;
736 req = &rec->aead_req;
739 sk_msg_iter_var_prev(i);
741 rec->content_type = record_type;
742 if (prot->version == TLS_1_3_VERSION) {
743 /* Add content type to end of message. No padding added */
744 sg_set_buf(&rec->sg_content_type, &rec->content_type, 1);
745 sg_mark_end(&rec->sg_content_type);
746 sg_chain(msg_pl->sg.data, msg_pl->sg.end + 1,
747 &rec->sg_content_type);
749 sg_mark_end(sk_msg_elem(msg_pl, i));
752 if (msg_pl->sg.end < msg_pl->sg.start) {
753 sg_chain(&msg_pl->sg.data[msg_pl->sg.start],
754 MAX_SKB_FRAGS - msg_pl->sg.start + 1,
758 i = msg_pl->sg.start;
759 sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
762 sk_msg_iter_var_prev(i);
763 sg_mark_end(sk_msg_elem(msg_en, i));
765 i = msg_en->sg.start;
766 sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
768 tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size,
769 tls_ctx->tx.rec_seq, record_type, prot);
771 tls_fill_prepend(tls_ctx,
772 page_address(sg_page(&msg_en->sg.data[i])) +
773 msg_en->sg.data[i].offset,
774 msg_pl->sg.size + prot->tail_size,
777 tls_ctx->pending_open_record_frags = false;
779 rc = tls_do_encryption(sk, tls_ctx, ctx, req,
780 msg_pl->sg.size + prot->tail_size, i);
782 if (rc != -EINPROGRESS) {
783 tls_err_abort(sk, -EBADMSG);
785 tls_ctx->pending_open_record_frags = true;
786 tls_merge_open_record(sk, rec, tmp, orig_end);
789 ctx->async_capable = 1;
792 msg_pl = &tmp->msg_plaintext;
793 msg_en = &tmp->msg_encrypted;
794 sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size);
795 tls_ctx->pending_open_record_frags = true;
799 return tls_tx_records(sk, flags);
802 static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
803 bool full_record, u8 record_type,
804 ssize_t *copied, int flags)
806 struct tls_context *tls_ctx = tls_get_ctx(sk);
807 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
808 struct sk_msg msg_redir = { };
809 struct sk_psock *psock;
810 struct sock *sk_redir;
812 bool enospc, policy, redir_ingress;
816 policy = !(flags & MSG_SENDPAGE_NOPOLICY);
817 psock = sk_psock_get(sk);
818 if (!psock || !policy) {
819 err = tls_push_record(sk, flags, record_type);
820 if (err && sk->sk_err == EBADMSG) {
821 *copied -= sk_msg_free(sk, msg);
822 tls_free_open_rec(sk);
826 sk_psock_put(sk, psock);
830 enospc = sk_msg_full(msg);
831 if (psock->eval == __SK_NONE) {
832 delta = msg->sg.size;
833 psock->eval = sk_psock_msg_verdict(sk, psock, msg);
834 delta -= msg->sg.size;
836 if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
837 !enospc && !full_record) {
843 if (msg->apply_bytes && msg->apply_bytes < send)
844 send = msg->apply_bytes;
846 switch (psock->eval) {
848 err = tls_push_record(sk, flags, record_type);
849 if (err && sk->sk_err == EBADMSG) {
850 *copied -= sk_msg_free(sk, msg);
851 tls_free_open_rec(sk);
857 redir_ingress = psock->redir_ingress;
858 sk_redir = psock->sk_redir;
859 memcpy(&msg_redir, msg, sizeof(*msg));
860 if (msg->apply_bytes < send)
861 msg->apply_bytes = 0;
863 msg->apply_bytes -= send;
864 sk_msg_return_zero(sk, msg, send);
865 msg->sg.size -= send;
867 err = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress,
868 &msg_redir, send, flags);
871 *copied -= sk_msg_free_nocharge(sk, &msg_redir);
874 if (msg->sg.size == 0)
875 tls_free_open_rec(sk);
879 sk_msg_free_partial(sk, msg, send);
880 if (msg->apply_bytes < send)
881 msg->apply_bytes = 0;
883 msg->apply_bytes -= send;
884 if (msg->sg.size == 0)
885 tls_free_open_rec(sk);
886 *copied -= (send + delta);
891 bool reset_eval = !ctx->open_rec;
895 msg = &rec->msg_plaintext;
896 if (!msg->apply_bytes)
900 psock->eval = __SK_NONE;
901 if (psock->sk_redir) {
902 sock_put(psock->sk_redir);
903 psock->sk_redir = NULL;
910 sk_psock_put(sk, psock);
914 static int tls_sw_push_pending_record(struct sock *sk, int flags)
916 struct tls_context *tls_ctx = tls_get_ctx(sk);
917 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
918 struct tls_rec *rec = ctx->open_rec;
919 struct sk_msg *msg_pl;
925 msg_pl = &rec->msg_plaintext;
926 copied = msg_pl->sg.size;
930 return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA,
934 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
936 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
937 struct tls_context *tls_ctx = tls_get_ctx(sk);
938 struct tls_prot_info *prot = &tls_ctx->prot_info;
939 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
940 bool async_capable = ctx->async_capable;
941 unsigned char record_type = TLS_RECORD_TYPE_DATA;
942 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
943 bool eor = !(msg->msg_flags & MSG_MORE);
946 struct sk_msg *msg_pl, *msg_en;
957 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
958 MSG_CMSG_COMPAT | MSG_SPLICE_PAGES))
961 ret = mutex_lock_interruptible(&tls_ctx->tx_lock);
966 if (unlikely(msg->msg_controllen)) {
967 ret = tls_process_cmsg(sk, msg, &record_type);
969 if (ret == -EINPROGRESS)
971 else if (ret != -EAGAIN)
976 while (msg_data_left(msg)) {
985 rec = ctx->open_rec = tls_get_rec(sk);
991 msg_pl = &rec->msg_plaintext;
992 msg_en = &rec->msg_encrypted;
994 orig_size = msg_pl->sg.size;
996 try_to_copy = msg_data_left(msg);
997 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
998 if (try_to_copy >= record_room) {
999 try_to_copy = record_room;
1003 required_size = msg_pl->sg.size + try_to_copy +
1004 prot->overhead_size;
1006 if (!sk_stream_memory_free(sk))
1007 goto wait_for_sndbuf;
1010 ret = tls_alloc_encrypted_msg(sk, required_size);
1013 goto wait_for_memory;
1015 /* Adjust try_to_copy according to the amount that was
1016 * actually allocated. The difference is due
1017 * to max sg elements limit
1019 try_to_copy -= required_size - msg_en->sg.size;
1023 if (!is_kvec && (full_record || eor) && !async_capable) {
1024 u32 first = msg_pl->sg.end;
1026 ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
1027 msg_pl, try_to_copy);
1029 goto fallback_to_reg_send;
1032 copied += try_to_copy;
1034 sk_msg_sg_copy_set(msg_pl, first);
1035 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1036 record_type, &copied,
1039 if (ret == -EINPROGRESS)
1041 else if (ret == -ENOMEM)
1042 goto wait_for_memory;
1043 else if (ctx->open_rec && ret == -ENOSPC)
1045 else if (ret != -EAGAIN)
1050 copied -= try_to_copy;
1051 sk_msg_sg_copy_clear(msg_pl, first);
1052 iov_iter_revert(&msg->msg_iter,
1053 msg_pl->sg.size - orig_size);
1054 fallback_to_reg_send:
1055 sk_msg_trim(sk, msg_pl, orig_size);
1058 required_size = msg_pl->sg.size + try_to_copy;
1060 ret = tls_clone_plaintext_msg(sk, required_size);
1065 /* Adjust try_to_copy according to the amount that was
1066 * actually allocated. The difference is due
1067 * to max sg elements limit
1069 try_to_copy -= required_size - msg_pl->sg.size;
1071 sk_msg_trim(sk, msg_en,
1072 msg_pl->sg.size + prot->overhead_size);
1076 ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
1077 msg_pl, try_to_copy);
1082 /* Open records defined only if successfully copied, otherwise
1083 * we would trim the sg but not reset the open record frags.
1085 tls_ctx->pending_open_record_frags = true;
1086 copied += try_to_copy;
1087 if (full_record || eor) {
1088 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1089 record_type, &copied,
1092 if (ret == -EINPROGRESS)
1094 else if (ret == -ENOMEM)
1095 goto wait_for_memory;
1096 else if (ret != -EAGAIN) {
1107 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1109 ret = sk_stream_wait_memory(sk, &timeo);
1113 tls_trim_both_msgs(sk, orig_size);
1117 if (ctx->open_rec && msg_en->sg.size < required_size)
1118 goto alloc_encrypted;
1123 } else if (num_zc) {
1124 /* Wait for pending encryptions to get completed */
1125 spin_lock_bh(&ctx->encrypt_compl_lock);
1126 ctx->async_notify = true;
1128 pending = atomic_read(&ctx->encrypt_pending);
1129 spin_unlock_bh(&ctx->encrypt_compl_lock);
1131 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1133 reinit_completion(&ctx->async_wait.completion);
1135 /* There can be no concurrent accesses, since we have no
1136 * pending encrypt operations
1138 WRITE_ONCE(ctx->async_notify, false);
1140 if (ctx->async_wait.err) {
1141 ret = ctx->async_wait.err;
1146 /* Transmit if any encryptions have completed */
1147 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1148 cancel_delayed_work(&ctx->tx_work.work);
1149 tls_tx_records(sk, msg->msg_flags);
1153 ret = sk_stream_error(sk, msg->msg_flags, ret);
1156 mutex_unlock(&tls_ctx->tx_lock);
1157 return copied > 0 ? copied : ret;
1161 * Handle unexpected EOF during splice without SPLICE_F_MORE set.
1163 void tls_sw_splice_eof(struct socket *sock)
1165 struct sock *sk = sock->sk;
1166 struct tls_context *tls_ctx = tls_get_ctx(sk);
1167 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1168 struct tls_rec *rec;
1169 struct sk_msg *msg_pl;
1171 bool retrying = false;
1178 mutex_lock(&tls_ctx->tx_lock);
1182 rec = ctx->open_rec;
1186 msg_pl = &rec->msg_plaintext;
1188 /* Check the BPF advisor and perform transmission. */
1189 ret = bpf_exec_tx_verdict(msg_pl, sk, false, TLS_RECORD_TYPE_DATA,
1204 /* Wait for pending encryptions to get completed */
1205 spin_lock_bh(&ctx->encrypt_compl_lock);
1206 ctx->async_notify = true;
1208 pending = atomic_read(&ctx->encrypt_pending);
1209 spin_unlock_bh(&ctx->encrypt_compl_lock);
1211 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
1213 reinit_completion(&ctx->async_wait.completion);
1215 /* There can be no concurrent accesses, since we have no pending
1216 * encrypt operations
1218 WRITE_ONCE(ctx->async_notify, false);
1220 if (ctx->async_wait.err)
1223 /* Transmit if any encryptions have completed */
1224 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1225 cancel_delayed_work(&ctx->tx_work.work);
1226 tls_tx_records(sk, 0);
1231 mutex_unlock(&tls_ctx->tx_lock);
1234 static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
1235 int offset, size_t size, int flags)
1237 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1238 struct tls_context *tls_ctx = tls_get_ctx(sk);
1239 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
1240 struct tls_prot_info *prot = &tls_ctx->prot_info;
1241 unsigned char record_type = TLS_RECORD_TYPE_DATA;
1242 struct sk_msg *msg_pl;
1243 struct tls_rec *rec;
1251 eor = !(flags & MSG_SENDPAGE_NOTLAST);
1252 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1254 /* Call the sk_stream functions to manage the sndbuf mem. */
1256 size_t copy, required_size;
1264 rec = ctx->open_rec;
1266 rec = ctx->open_rec = tls_get_rec(sk);
1272 msg_pl = &rec->msg_plaintext;
1274 full_record = false;
1275 record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size;
1277 if (copy >= record_room) {
1282 required_size = msg_pl->sg.size + copy + prot->overhead_size;
1284 if (!sk_stream_memory_free(sk))
1285 goto wait_for_sndbuf;
1287 ret = tls_alloc_encrypted_msg(sk, required_size);
1290 goto wait_for_memory;
1292 /* Adjust copy according to the amount that was
1293 * actually allocated. The difference is due
1294 * to max sg elements limit
1296 copy -= required_size - msg_pl->sg.size;
1300 sk_msg_page_add(msg_pl, page, copy, offset);
1301 sk_mem_charge(sk, copy);
1307 tls_ctx->pending_open_record_frags = true;
1308 if (full_record || eor || sk_msg_full(msg_pl)) {
1309 ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
1310 record_type, &copied, flags);
1312 if (ret == -EINPROGRESS)
1314 else if (ret == -ENOMEM)
1315 goto wait_for_memory;
1316 else if (ret != -EAGAIN) {
1325 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1327 ret = sk_stream_wait_memory(sk, &timeo);
1330 tls_trim_both_msgs(sk, msg_pl->sg.size);
1339 /* Transmit if any encryptions have completed */
1340 if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
1341 cancel_delayed_work(&ctx->tx_work.work);
1342 tls_tx_records(sk, flags);
1346 ret = sk_stream_error(sk, flags, ret);
1347 return copied > 0 ? copied : ret;
1350 int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
1351 int offset, size_t size, int flags)
1353 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1354 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY |
1355 MSG_NO_SHARED_FRAGS))
1358 return tls_sw_do_sendpage(sk, page, offset, size, flags);
1361 int tls_sw_sendpage(struct sock *sk, struct page *page,
1362 int offset, size_t size, int flags)
1364 struct tls_context *tls_ctx = tls_get_ctx(sk);
1367 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1368 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
1371 ret = mutex_lock_interruptible(&tls_ctx->tx_lock);
1375 ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
1377 mutex_unlock(&tls_ctx->tx_lock);
1382 tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
1385 struct tls_context *tls_ctx = tls_get_ctx(sk);
1386 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1387 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1390 timeo = sock_rcvtimeo(sk, nonblock);
1392 while (!tls_strp_msg_ready(ctx)) {
1393 if (!sk_psock_queue_empty(psock))
1397 return sock_error(sk);
1399 if (!skb_queue_empty(&sk->sk_receive_queue)) {
1400 tls_strp_check_rcv(&ctx->strp);
1401 if (tls_strp_msg_ready(ctx))
1405 if (sk->sk_shutdown & RCV_SHUTDOWN)
1408 if (sock_flag(sk, SOCK_DONE))
1415 add_wait_queue(sk_sleep(sk), &wait);
1416 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1417 sk_wait_event(sk, &timeo,
1418 tls_strp_msg_ready(ctx) ||
1419 !sk_psock_queue_empty(psock),
1421 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
1422 remove_wait_queue(sk_sleep(sk), &wait);
1424 /* Handle signals */
1425 if (signal_pending(current))
1426 return sock_intr_errno(timeo);
1429 tls_strp_msg_load(&ctx->strp, released);
1434 static int tls_setup_from_iter(struct iov_iter *from,
1435 int length, int *pages_used,
1436 struct scatterlist *to,
1439 int rc = 0, i = 0, num_elem = *pages_used, maxpages;
1440 struct page *pages[MAX_SKB_FRAGS];
1441 unsigned int size = 0;
1442 ssize_t copied, use;
1445 while (length > 0) {
1447 maxpages = to_max_pages - num_elem;
1448 if (maxpages == 0) {
1452 copied = iov_iter_get_pages2(from, pages,
1463 use = min_t(int, copied, PAGE_SIZE - offset);
1465 sg_set_page(&to[num_elem],
1466 pages[i], use, offset);
1467 sg_unmark_end(&to[num_elem]);
1468 /* We do not uncharge memory from this API */
1477 /* Mark the end in the last sg entry if newly added */
1478 if (num_elem > *pages_used)
1479 sg_mark_end(&to[num_elem - 1]);
1482 iov_iter_revert(from, size);
1483 *pages_used = num_elem;
1488 static struct sk_buff *
1489 tls_alloc_clrtxt_skb(struct sock *sk, struct sk_buff *skb,
1490 unsigned int full_len)
1492 struct strp_msg *clr_rxm;
1493 struct sk_buff *clr_skb;
1496 clr_skb = alloc_skb_with_frags(0, full_len, TLS_PAGE_ORDER,
1497 &err, sk->sk_allocation);
1501 skb_copy_header(clr_skb, skb);
1502 clr_skb->len = full_len;
1503 clr_skb->data_len = full_len;
1505 clr_rxm = strp_msg(clr_skb);
1506 clr_rxm->offset = 0;
1513 * tls_decrypt_sw() and tls_decrypt_device() are decrypt handlers.
1514 * They must transform the darg in/out argument are as follows:
1516 * -------------------------------------------------------------------
1517 * zc | Zero-copy decrypt allowed | Zero-copy performed
1518 * async | Async decrypt allowed | Async crypto used / in progress
1519 * skb | * | Output skb
1521 * If ZC decryption was performed darg.skb will point to the input skb.
1524 /* This function decrypts the input skb into either out_iov or in out_sg
1525 * or in skb buffers itself. The input parameter 'darg->zc' indicates if
1526 * zero-copy mode needs to be tried or not. With zero-copy mode, either
1527 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
1528 * NULL, then the decryption happens inside skb buffers itself, i.e.
1529 * zero-copy gets disabled and 'darg->zc' is updated.
1531 static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
1532 struct scatterlist *out_sg,
1533 struct tls_decrypt_arg *darg)
1535 struct tls_context *tls_ctx = tls_get_ctx(sk);
1536 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1537 struct tls_prot_info *prot = &tls_ctx->prot_info;
1538 int n_sgin, n_sgout, aead_size, err, pages = 0;
1539 struct sk_buff *skb = tls_strp_msg(ctx);
1540 const struct strp_msg *rxm = strp_msg(skb);
1541 const struct tls_msg *tlm = tls_msg(skb);
1542 struct aead_request *aead_req;
1543 struct scatterlist *sgin = NULL;
1544 struct scatterlist *sgout = NULL;
1545 const int data_len = rxm->full_len - prot->overhead_size;
1546 int tail_pages = !!prot->tail_size;
1547 struct tls_decrypt_ctx *dctx;
1548 struct sk_buff *clear_skb;
1552 n_sgin = skb_nsg(skb, rxm->offset + prot->prepend_size,
1553 rxm->full_len - prot->prepend_size);
1555 return n_sgin ?: -EBADMSG;
1557 if (darg->zc && (out_iov || out_sg)) {
1561 n_sgout = 1 + tail_pages +
1562 iov_iter_npages_cap(out_iov, INT_MAX, data_len);
1564 n_sgout = sg_nents(out_sg);
1568 clear_skb = tls_alloc_clrtxt_skb(sk, skb, rxm->full_len);
1572 n_sgout = 1 + skb_shinfo(clear_skb)->nr_frags;
1575 /* Increment to accommodate AAD */
1576 n_sgin = n_sgin + 1;
1578 /* Allocate a single block of memory which contains
1579 * aead_req || tls_decrypt_ctx.
1580 * Both structs are variable length.
1582 aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv);
1583 aead_size = ALIGN(aead_size, __alignof__(*dctx));
1584 mem = kmalloc(aead_size + struct_size(dctx, sg, n_sgin + n_sgout),
1591 /* Segment the allocated memory */
1592 aead_req = (struct aead_request *)mem;
1593 dctx = (struct tls_decrypt_ctx *)(mem + aead_size);
1595 sgin = &dctx->sg[0];
1596 sgout = &dctx->sg[n_sgin];
1598 /* For CCM based ciphers, first byte of nonce+iv is a constant */
1599 switch (prot->cipher_type) {
1600 case TLS_CIPHER_AES_CCM_128:
1601 dctx->iv[0] = TLS_AES_CCM_IV_B0_BYTE;
1604 case TLS_CIPHER_SM4_CCM:
1605 dctx->iv[0] = TLS_SM4_CCM_IV_B0_BYTE;
1611 if (prot->version == TLS_1_3_VERSION ||
1612 prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305) {
1613 memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv,
1614 prot->iv_size + prot->salt_size);
1616 err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
1617 &dctx->iv[iv_offset] + prot->salt_size,
1621 memcpy(&dctx->iv[iv_offset], tls_ctx->rx.iv, prot->salt_size);
1623 tls_xor_iv_with_seq(prot, &dctx->iv[iv_offset], tls_ctx->rx.rec_seq);
1626 tls_make_aad(dctx->aad, rxm->full_len - prot->overhead_size +
1628 tls_ctx->rx.rec_seq, tlm->control, prot);
1631 sg_init_table(sgin, n_sgin);
1632 sg_set_buf(&sgin[0], dctx->aad, prot->aad_size);
1633 err = skb_to_sgvec(skb, &sgin[1],
1634 rxm->offset + prot->prepend_size,
1635 rxm->full_len - prot->prepend_size);
1640 sg_init_table(sgout, n_sgout);
1641 sg_set_buf(&sgout[0], dctx->aad, prot->aad_size);
1643 err = skb_to_sgvec(clear_skb, &sgout[1], prot->prepend_size,
1644 data_len + prot->tail_size);
1647 } else if (out_iov) {
1648 sg_init_table(sgout, n_sgout);
1649 sg_set_buf(&sgout[0], dctx->aad, prot->aad_size);
1651 err = tls_setup_from_iter(out_iov, data_len, &pages, &sgout[1],
1652 (n_sgout - 1 - tail_pages));
1654 goto exit_free_pages;
1656 if (prot->tail_size) {
1657 sg_unmark_end(&sgout[pages]);
1658 sg_set_buf(&sgout[pages + 1], &dctx->tail,
1660 sg_mark_end(&sgout[pages + 1]);
1662 } else if (out_sg) {
1663 memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
1666 /* Prepare and submit AEAD request */
1667 err = tls_do_decryption(sk, sgin, sgout, dctx->iv,
1668 data_len + prot->tail_size, aead_req, darg);
1670 goto exit_free_pages;
1672 darg->skb = clear_skb ?: tls_strp_msg(ctx);
1675 if (unlikely(darg->async)) {
1676 err = tls_strp_msg_hold(&ctx->strp, &ctx->async_hold);
1678 __skb_queue_tail(&ctx->async_hold, darg->skb);
1682 if (prot->tail_size)
1683 darg->tail = dctx->tail;
1686 /* Release the pages in case iov was mapped to pages */
1687 for (; pages > 0; pages--)
1688 put_page(sg_page(&sgout[pages]));
1692 consume_skb(clear_skb);
1697 tls_decrypt_sw(struct sock *sk, struct tls_context *tls_ctx,
1698 struct msghdr *msg, struct tls_decrypt_arg *darg)
1700 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1701 struct tls_prot_info *prot = &tls_ctx->prot_info;
1702 struct strp_msg *rxm;
1705 err = tls_decrypt_sg(sk, &msg->msg_iter, NULL, darg);
1707 if (err == -EBADMSG)
1708 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR);
1711 /* keep going even for ->async, the code below is TLS 1.3 */
1713 /* If opportunistic TLS 1.3 ZC failed retry without ZC */
1714 if (unlikely(darg->zc && prot->version == TLS_1_3_VERSION &&
1715 darg->tail != TLS_RECORD_TYPE_DATA)) {
1718 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXNOPADVIOL);
1719 TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTRETRY);
1720 return tls_decrypt_sw(sk, tls_ctx, msg, darg);
1723 pad = tls_padding_length(prot, darg->skb, darg);
1725 if (darg->skb != tls_strp_msg(ctx))
1726 consume_skb(darg->skb);
1730 rxm = strp_msg(darg->skb);
1731 rxm->full_len -= pad;
1737 tls_decrypt_device(struct sock *sk, struct msghdr *msg,
1738 struct tls_context *tls_ctx, struct tls_decrypt_arg *darg)
1740 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
1741 struct tls_prot_info *prot = &tls_ctx->prot_info;
1742 struct strp_msg *rxm;
1745 if (tls_ctx->rx_conf != TLS_HW)
1748 err = tls_device_decrypted(sk, tls_ctx);
1752 pad = tls_padding_length(prot, tls_strp_msg(ctx), darg);
1756 darg->async = false;
1757 darg->skb = tls_strp_msg(ctx);
1758 /* ->zc downgrade check, in case TLS 1.3 gets here */
1759 darg->zc &= !(prot->version == TLS_1_3_VERSION &&
1760 tls_msg(darg->skb)->control != TLS_RECORD_TYPE_DATA);
1762 rxm = strp_msg(darg->skb);
1763 rxm->full_len -= pad;
1766 /* Non-ZC case needs a real skb */
1767 darg->skb = tls_strp_msg_detach(ctx);
1771 unsigned int off, len;
1773 /* In ZC case nobody cares about the output skb.
1774 * Just copy the data here. Note the skb is not fully trimmed.
1776 off = rxm->offset + prot->prepend_size;
1777 len = rxm->full_len - prot->overhead_size;
1779 err = skb_copy_datagram_msg(darg->skb, off, msg, len);
1786 static int tls_rx_one_record(struct sock *sk, struct msghdr *msg,
1787 struct tls_decrypt_arg *darg)
1789 struct tls_context *tls_ctx = tls_get_ctx(sk);
1790 struct tls_prot_info *prot = &tls_ctx->prot_info;
1791 struct strp_msg *rxm;
1794 err = tls_decrypt_device(sk, msg, tls_ctx, darg);
1796 err = tls_decrypt_sw(sk, tls_ctx, msg, darg);
1800 rxm = strp_msg(darg->skb);
1801 rxm->offset += prot->prepend_size;
1802 rxm->full_len -= prot->overhead_size;
1803 tls_advance_record_sn(sk, prot, &tls_ctx->rx);
1808 int decrypt_skb(struct sock *sk, struct scatterlist *sgout)
1810 struct tls_decrypt_arg darg = { .zc = true, };
1812 return tls_decrypt_sg(sk, NULL, sgout, &darg);
1815 static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm,
1821 *control = tlm->control;
1825 err = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
1826 sizeof(*control), control);
1827 if (*control != TLS_RECORD_TYPE_DATA) {
1828 if (err || msg->msg_flags & MSG_CTRUNC)
1831 } else if (*control != tlm->control) {
1838 static void tls_rx_rec_done(struct tls_sw_context_rx *ctx)
1840 tls_strp_msg_done(&ctx->strp);
1843 /* This function traverses the rx_list in tls receive context to copies the
1844 * decrypted records into the buffer provided by caller zero copy is not
1845 * true. Further, the records are removed from the rx_list if it is not a peek
1846 * case and the record has been consumed completely.
1848 static int process_rx_list(struct tls_sw_context_rx *ctx,
1855 struct sk_buff *skb = skb_peek(&ctx->rx_list);
1856 struct tls_msg *tlm;
1860 while (skip && skb) {
1861 struct strp_msg *rxm = strp_msg(skb);
1864 err = tls_record_content_type(msg, tlm, control);
1868 if (skip < rxm->full_len)
1871 skip = skip - rxm->full_len;
1872 skb = skb_peek_next(skb, &ctx->rx_list);
1875 while (len && skb) {
1876 struct sk_buff *next_skb;
1877 struct strp_msg *rxm = strp_msg(skb);
1878 int chunk = min_t(unsigned int, rxm->full_len - skip, len);
1882 err = tls_record_content_type(msg, tlm, control);
1886 err = skb_copy_datagram_msg(skb, rxm->offset + skip,
1892 copied = copied + chunk;
1894 /* Consume the data from record if it is non-peek case*/
1896 rxm->offset = rxm->offset + chunk;
1897 rxm->full_len = rxm->full_len - chunk;
1899 /* Return if there is unconsumed data in the record */
1900 if (rxm->full_len - skip)
1904 /* The remaining skip-bytes must lie in 1st record in rx_list.
1905 * So from the 2nd record, 'skip' should be 0.
1910 msg->msg_flags |= MSG_EOR;
1912 next_skb = skb_peek_next(skb, &ctx->rx_list);
1915 __skb_unlink(skb, &ctx->rx_list);
1924 return copied ? : err;
1928 tls_read_flush_backlog(struct sock *sk, struct tls_prot_info *prot,
1929 size_t len_left, size_t decrypted, ssize_t done,
1934 if (len_left <= decrypted)
1937 max_rec = prot->overhead_size - prot->tail_size + TLS_MAX_PAYLOAD_SIZE;
1938 if (done - *flushed_at < SZ_128K && tcp_inq(sk) > max_rec)
1942 return sk_flush_backlog(sk);
1945 static int tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx,
1953 timeo = sock_rcvtimeo(sk, nonblock);
1955 while (unlikely(ctx->reader_present)) {
1956 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1958 ctx->reader_contended = 1;
1960 add_wait_queue(&ctx->wq, &wait);
1961 sk_wait_event(sk, &timeo,
1962 !READ_ONCE(ctx->reader_present), &wait);
1963 remove_wait_queue(&ctx->wq, &wait);
1969 if (signal_pending(current)) {
1970 err = sock_intr_errno(timeo);
1975 WRITE_ONCE(ctx->reader_present, 1);
1984 static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx)
1986 if (unlikely(ctx->reader_contended)) {
1987 if (wq_has_sleeper(&ctx->wq))
1990 ctx->reader_contended = 0;
1992 WARN_ON_ONCE(!ctx->reader_present);
1995 WRITE_ONCE(ctx->reader_present, 0);
1999 int tls_sw_recvmsg(struct sock *sk,
2005 struct tls_context *tls_ctx = tls_get_ctx(sk);
2006 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2007 struct tls_prot_info *prot = &tls_ctx->prot_info;
2008 ssize_t decrypted = 0, async_copy_bytes = 0;
2009 struct sk_psock *psock;
2010 unsigned char control = 0;
2011 size_t flushed_at = 0;
2012 struct strp_msg *rxm;
2013 struct tls_msg *tlm;
2017 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
2018 bool is_peek = flags & MSG_PEEK;
2019 bool released = true;
2020 bool bpf_strp_enabled;
2023 if (unlikely(flags & MSG_ERRQUEUE))
2024 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
2026 psock = sk_psock_get(sk);
2027 err = tls_rx_reader_lock(sk, ctx, flags & MSG_DONTWAIT);
2030 bpf_strp_enabled = sk_psock_strp_enabled(psock);
2032 /* If crypto failed the connection is broken */
2033 err = ctx->async_wait.err;
2037 /* Process pending decrypted records. It must be non-zero-copy */
2038 err = process_rx_list(ctx, msg, &control, 0, len, is_peek);
2046 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
2049 zc_capable = !bpf_strp_enabled && !is_kvec && !is_peek &&
2052 while (len && (decrypted + copied < target || tls_strp_msg_ready(ctx))) {
2053 struct tls_decrypt_arg darg;
2054 int to_decrypt, chunk;
2056 err = tls_rx_rec_wait(sk, psock, flags & MSG_DONTWAIT,
2060 chunk = sk_msg_recvmsg(sk, psock, msg, len,
2071 memset(&darg.inargs, 0, sizeof(darg.inargs));
2073 rxm = strp_msg(tls_strp_msg(ctx));
2074 tlm = tls_msg(tls_strp_msg(ctx));
2076 to_decrypt = rxm->full_len - prot->overhead_size;
2078 if (zc_capable && to_decrypt <= len &&
2079 tlm->control == TLS_RECORD_TYPE_DATA)
2082 /* Do not use async mode if record is non-data */
2083 if (tlm->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled)
2084 darg.async = ctx->async_capable;
2088 err = tls_rx_one_record(sk, msg, &darg);
2090 tls_err_abort(sk, -EBADMSG);
2094 async |= darg.async;
2096 /* If the type of records being processed is not known yet,
2097 * set it to record type just dequeued. If it is already known,
2098 * but does not match the record type just dequeued, go to end.
2099 * We always get record type here since for tls1.2, record type
2100 * is known just after record is dequeued from stream parser.
2101 * For tls1.3, we disable async.
2103 err = tls_record_content_type(msg, tls_msg(darg.skb), &control);
2105 DEBUG_NET_WARN_ON_ONCE(darg.zc);
2106 tls_rx_rec_done(ctx);
2108 __skb_queue_tail(&ctx->rx_list, darg.skb);
2112 /* periodically flush backlog, and feed strparser */
2113 released = tls_read_flush_backlog(sk, prot, len, to_decrypt,
2117 /* TLS 1.3 may have updated the length by more than overhead */
2118 rxm = strp_msg(darg.skb);
2119 chunk = rxm->full_len;
2120 tls_rx_rec_done(ctx);
2123 bool partially_consumed = chunk > len;
2124 struct sk_buff *skb = darg.skb;
2126 DEBUG_NET_WARN_ON_ONCE(darg.skb == ctx->strp.anchor);
2129 /* TLS 1.2-only, to_decrypt must be text len */
2130 chunk = min_t(int, to_decrypt, len);
2131 async_copy_bytes += chunk;
2135 __skb_queue_tail(&ctx->rx_list, skb);
2139 if (bpf_strp_enabled) {
2141 err = sk_psock_tls_strp_read(psock, skb);
2142 if (err != __SK_PASS) {
2143 rxm->offset = rxm->offset + rxm->full_len;
2145 if (err == __SK_DROP)
2151 if (partially_consumed)
2154 err = skb_copy_datagram_msg(skb, rxm->offset,
2157 goto put_on_rx_list_err;
2160 goto put_on_rx_list;
2162 if (partially_consumed) {
2163 rxm->offset += chunk;
2164 rxm->full_len -= chunk;
2165 goto put_on_rx_list;
2174 /* Return full control message to userspace before trying
2175 * to parse another message type
2177 msg->msg_flags |= MSG_EOR;
2178 if (control != TLS_RECORD_TYPE_DATA)
2186 /* Wait for all previously submitted records to be decrypted */
2187 spin_lock_bh(&ctx->decrypt_compl_lock);
2188 reinit_completion(&ctx->async_wait.completion);
2189 pending = atomic_read(&ctx->decrypt_pending);
2190 spin_unlock_bh(&ctx->decrypt_compl_lock);
2193 ret = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
2194 __skb_queue_purge(&ctx->async_hold);
2197 if (err >= 0 || err == -EINPROGRESS)
2203 /* Drain records from the rx_list & copy if required */
2204 if (is_peek || is_kvec)
2205 err = process_rx_list(ctx, msg, &control, copied,
2206 decrypted, is_peek);
2208 err = process_rx_list(ctx, msg, &control, 0,
2209 async_copy_bytes, is_peek);
2210 decrypted += max(err, 0);
2213 copied += decrypted;
2216 tls_rx_reader_unlock(sk, ctx);
2218 sk_psock_put(sk, psock);
2219 return copied ? : err;
2222 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
2223 struct pipe_inode_info *pipe,
2224 size_t len, unsigned int flags)
2226 struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
2227 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2228 struct strp_msg *rxm = NULL;
2229 struct sock *sk = sock->sk;
2230 struct tls_msg *tlm;
2231 struct sk_buff *skb;
2236 err = tls_rx_reader_lock(sk, ctx, flags & SPLICE_F_NONBLOCK);
2240 if (!skb_queue_empty(&ctx->rx_list)) {
2241 skb = __skb_dequeue(&ctx->rx_list);
2243 struct tls_decrypt_arg darg;
2245 err = tls_rx_rec_wait(sk, NULL, flags & SPLICE_F_NONBLOCK,
2248 goto splice_read_end;
2250 memset(&darg.inargs, 0, sizeof(darg.inargs));
2252 err = tls_rx_one_record(sk, NULL, &darg);
2254 tls_err_abort(sk, -EBADMSG);
2255 goto splice_read_end;
2258 tls_rx_rec_done(ctx);
2262 rxm = strp_msg(skb);
2265 /* splice does not support reading control messages */
2266 if (tlm->control != TLS_RECORD_TYPE_DATA) {
2268 goto splice_requeue;
2271 chunk = min_t(unsigned int, rxm->full_len, len);
2272 copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
2274 goto splice_requeue;
2276 if (chunk < rxm->full_len) {
2278 rxm->full_len -= len;
2279 goto splice_requeue;
2285 tls_rx_reader_unlock(sk, ctx);
2286 return copied ? : err;
2289 __skb_queue_head(&ctx->rx_list, skb);
2290 goto splice_read_end;
2293 bool tls_sw_sock_is_readable(struct sock *sk)
2295 struct tls_context *tls_ctx = tls_get_ctx(sk);
2296 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2297 bool ingress_empty = true;
2298 struct sk_psock *psock;
2301 psock = sk_psock(sk);
2303 ingress_empty = list_empty(&psock->ingress_msg);
2306 return !ingress_empty || tls_strp_msg_ready(ctx) ||
2307 !skb_queue_empty(&ctx->rx_list);
2310 int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb)
2312 struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
2313 struct tls_prot_info *prot = &tls_ctx->prot_info;
2314 char header[TLS_HEADER_SIZE + MAX_IV_SIZE];
2315 size_t cipher_overhead;
2316 size_t data_len = 0;
2319 /* Verify that we have a full TLS header, or wait for more data */
2320 if (strp->stm.offset + prot->prepend_size > skb->len)
2323 /* Sanity-check size of on-stack buffer. */
2324 if (WARN_ON(prot->prepend_size > sizeof(header))) {
2329 /* Linearize header to local buffer */
2330 ret = skb_copy_bits(skb, strp->stm.offset, header, prot->prepend_size);
2334 strp->mark = header[0];
2336 data_len = ((header[4] & 0xFF) | (header[3] << 8));
2338 cipher_overhead = prot->tag_size;
2339 if (prot->version != TLS_1_3_VERSION &&
2340 prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305)
2341 cipher_overhead += prot->iv_size;
2343 if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
2348 if (data_len < cipher_overhead) {
2353 /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
2354 if (header[1] != TLS_1_2_VERSION_MINOR ||
2355 header[2] != TLS_1_2_VERSION_MAJOR) {
2360 tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
2361 TCP_SKB_CB(skb)->seq + strp->stm.offset);
2362 return data_len + TLS_HEADER_SIZE;
2365 tls_err_abort(strp->sk, ret);
2370 void tls_rx_msg_ready(struct tls_strparser *strp)
2372 struct tls_sw_context_rx *ctx;
2374 ctx = container_of(strp, struct tls_sw_context_rx, strp);
2375 ctx->saved_data_ready(strp->sk);
2378 static void tls_data_ready(struct sock *sk)
2380 struct tls_context *tls_ctx = tls_get_ctx(sk);
2381 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2382 struct sk_psock *psock;
2385 trace_sk_data_ready(sk);
2387 alloc_save = sk->sk_allocation;
2388 sk->sk_allocation = GFP_ATOMIC;
2389 tls_strp_data_ready(&ctx->strp);
2390 sk->sk_allocation = alloc_save;
2392 psock = sk_psock_get(sk);
2394 if (!list_empty(&psock->ingress_msg))
2395 ctx->saved_data_ready(sk);
2396 sk_psock_put(sk, psock);
2400 void tls_sw_cancel_work_tx(struct tls_context *tls_ctx)
2402 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2404 set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask);
2405 set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask);
2406 cancel_delayed_work_sync(&ctx->tx_work.work);
2409 void tls_sw_release_resources_tx(struct sock *sk)
2411 struct tls_context *tls_ctx = tls_get_ctx(sk);
2412 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2413 struct tls_rec *rec, *tmp;
2416 /* Wait for any pending async encryptions to complete */
2417 spin_lock_bh(&ctx->encrypt_compl_lock);
2418 ctx->async_notify = true;
2419 pending = atomic_read(&ctx->encrypt_pending);
2420 spin_unlock_bh(&ctx->encrypt_compl_lock);
2423 crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
2425 tls_tx_records(sk, -1);
2427 /* Free up un-sent records in tx_list. First, free
2428 * the partially sent record if any at head of tx_list.
2430 if (tls_ctx->partially_sent_record) {
2431 tls_free_partial_record(sk, tls_ctx);
2432 rec = list_first_entry(&ctx->tx_list,
2433 struct tls_rec, list);
2434 list_del(&rec->list);
2435 sk_msg_free(sk, &rec->msg_plaintext);
2439 list_for_each_entry_safe(rec, tmp, &ctx->tx_list, list) {
2440 list_del(&rec->list);
2441 sk_msg_free(sk, &rec->msg_encrypted);
2442 sk_msg_free(sk, &rec->msg_plaintext);
2446 crypto_free_aead(ctx->aead_send);
2447 tls_free_open_rec(sk);
2450 void tls_sw_free_ctx_tx(struct tls_context *tls_ctx)
2452 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
2457 void tls_sw_release_resources_rx(struct sock *sk)
2459 struct tls_context *tls_ctx = tls_get_ctx(sk);
2460 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2462 kfree(tls_ctx->rx.rec_seq);
2463 kfree(tls_ctx->rx.iv);
2465 if (ctx->aead_recv) {
2466 __skb_queue_purge(&ctx->rx_list);
2467 crypto_free_aead(ctx->aead_recv);
2468 tls_strp_stop(&ctx->strp);
2469 /* If tls_sw_strparser_arm() was not called (cleanup paths)
2470 * we still want to tls_strp_stop(), but sk->sk_data_ready was
2473 if (ctx->saved_data_ready) {
2474 write_lock_bh(&sk->sk_callback_lock);
2475 sk->sk_data_ready = ctx->saved_data_ready;
2476 write_unlock_bh(&sk->sk_callback_lock);
2481 void tls_sw_strparser_done(struct tls_context *tls_ctx)
2483 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2485 tls_strp_done(&ctx->strp);
2488 void tls_sw_free_ctx_rx(struct tls_context *tls_ctx)
2490 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
2495 void tls_sw_free_resources_rx(struct sock *sk)
2497 struct tls_context *tls_ctx = tls_get_ctx(sk);
2499 tls_sw_release_resources_rx(sk);
2500 tls_sw_free_ctx_rx(tls_ctx);
2503 /* The work handler to transmitt the encrypted records in tx_list */
2504 static void tx_work_handler(struct work_struct *work)
2506 struct delayed_work *delayed_work = to_delayed_work(work);
2507 struct tx_work *tx_work = container_of(delayed_work,
2508 struct tx_work, work);
2509 struct sock *sk = tx_work->sk;
2510 struct tls_context *tls_ctx = tls_get_ctx(sk);
2511 struct tls_sw_context_tx *ctx;
2513 if (unlikely(!tls_ctx))
2516 ctx = tls_sw_ctx_tx(tls_ctx);
2517 if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask))
2520 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
2523 if (mutex_trylock(&tls_ctx->tx_lock)) {
2525 tls_tx_records(sk, -1);
2527 mutex_unlock(&tls_ctx->tx_lock);
2528 } else if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) {
2529 /* Someone is holding the tx_lock, they will likely run Tx
2530 * and cancel the work on their way out of the lock section.
2531 * Schedule a long delay just in case.
2533 schedule_delayed_work(&ctx->tx_work.work, msecs_to_jiffies(10));
2537 static bool tls_is_tx_ready(struct tls_sw_context_tx *ctx)
2539 struct tls_rec *rec;
2541 rec = list_first_entry_or_null(&ctx->tx_list, struct tls_rec, list);
2545 return READ_ONCE(rec->tx_ready);
2548 void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
2550 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
2552 /* Schedule the transmission if tx list is ready */
2553 if (tls_is_tx_ready(tx_ctx) &&
2554 !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
2555 schedule_delayed_work(&tx_ctx->tx_work.work, 0);
2558 void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
2560 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2562 write_lock_bh(&sk->sk_callback_lock);
2563 rx_ctx->saved_data_ready = sk->sk_data_ready;
2564 sk->sk_data_ready = tls_data_ready;
2565 write_unlock_bh(&sk->sk_callback_lock);
2568 void tls_update_rx_zc_capable(struct tls_context *tls_ctx)
2570 struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
2572 rx_ctx->zc_capable = tls_ctx->rx_no_pad ||
2573 tls_ctx->prot_info.version != TLS_1_3_VERSION;
2576 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
2578 struct tls_context *tls_ctx = tls_get_ctx(sk);
2579 struct tls_prot_info *prot = &tls_ctx->prot_info;
2580 struct tls_crypto_info *crypto_info;
2581 struct tls_sw_context_tx *sw_ctx_tx = NULL;
2582 struct tls_sw_context_rx *sw_ctx_rx = NULL;
2583 struct cipher_context *cctx;
2584 struct crypto_aead **aead;
2585 u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size;
2586 struct crypto_tfm *tfm;
2587 char *iv, *rec_seq, *key, *salt, *cipher_name;
2597 if (!ctx->priv_ctx_tx) {
2598 sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL);
2603 ctx->priv_ctx_tx = sw_ctx_tx;
2606 (struct tls_sw_context_tx *)ctx->priv_ctx_tx;
2609 if (!ctx->priv_ctx_rx) {
2610 sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL);
2615 ctx->priv_ctx_rx = sw_ctx_rx;
2618 (struct tls_sw_context_rx *)ctx->priv_ctx_rx;
2623 crypto_init_wait(&sw_ctx_tx->async_wait);
2624 spin_lock_init(&sw_ctx_tx->encrypt_compl_lock);
2625 crypto_info = &ctx->crypto_send.info;
2627 aead = &sw_ctx_tx->aead_send;
2628 INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
2629 INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
2630 sw_ctx_tx->tx_work.sk = sk;
2632 crypto_init_wait(&sw_ctx_rx->async_wait);
2633 spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
2634 init_waitqueue_head(&sw_ctx_rx->wq);
2635 crypto_info = &ctx->crypto_recv.info;
2637 skb_queue_head_init(&sw_ctx_rx->rx_list);
2638 skb_queue_head_init(&sw_ctx_rx->async_hold);
2639 aead = &sw_ctx_rx->aead_recv;
2642 switch (crypto_info->cipher_type) {
2643 case TLS_CIPHER_AES_GCM_128: {
2644 struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
2646 gcm_128_info = (void *)crypto_info;
2647 nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2648 tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
2649 iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
2650 iv = gcm_128_info->iv;
2651 rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
2652 rec_seq = gcm_128_info->rec_seq;
2653 keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
2654 key = gcm_128_info->key;
2655 salt = gcm_128_info->salt;
2656 salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
2657 cipher_name = "gcm(aes)";
2660 case TLS_CIPHER_AES_GCM_256: {
2661 struct tls12_crypto_info_aes_gcm_256 *gcm_256_info;
2663 gcm_256_info = (void *)crypto_info;
2664 nonce_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2665 tag_size = TLS_CIPHER_AES_GCM_256_TAG_SIZE;
2666 iv_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
2667 iv = gcm_256_info->iv;
2668 rec_seq_size = TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE;
2669 rec_seq = gcm_256_info->rec_seq;
2670 keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
2671 key = gcm_256_info->key;
2672 salt = gcm_256_info->salt;
2673 salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE;
2674 cipher_name = "gcm(aes)";
2677 case TLS_CIPHER_AES_CCM_128: {
2678 struct tls12_crypto_info_aes_ccm_128 *ccm_128_info;
2680 ccm_128_info = (void *)crypto_info;
2681 nonce_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2682 tag_size = TLS_CIPHER_AES_CCM_128_TAG_SIZE;
2683 iv_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
2684 iv = ccm_128_info->iv;
2685 rec_seq_size = TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE;
2686 rec_seq = ccm_128_info->rec_seq;
2687 keysize = TLS_CIPHER_AES_CCM_128_KEY_SIZE;
2688 key = ccm_128_info->key;
2689 salt = ccm_128_info->salt;
2690 salt_size = TLS_CIPHER_AES_CCM_128_SALT_SIZE;
2691 cipher_name = "ccm(aes)";
2694 case TLS_CIPHER_CHACHA20_POLY1305: {
2695 struct tls12_crypto_info_chacha20_poly1305 *chacha20_poly1305_info;
2697 chacha20_poly1305_info = (void *)crypto_info;
2699 tag_size = TLS_CIPHER_CHACHA20_POLY1305_TAG_SIZE;
2700 iv_size = TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE;
2701 iv = chacha20_poly1305_info->iv;
2702 rec_seq_size = TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE;
2703 rec_seq = chacha20_poly1305_info->rec_seq;
2704 keysize = TLS_CIPHER_CHACHA20_POLY1305_KEY_SIZE;
2705 key = chacha20_poly1305_info->key;
2706 salt = chacha20_poly1305_info->salt;
2707 salt_size = TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE;
2708 cipher_name = "rfc7539(chacha20,poly1305)";
2711 case TLS_CIPHER_SM4_GCM: {
2712 struct tls12_crypto_info_sm4_gcm *sm4_gcm_info;
2714 sm4_gcm_info = (void *)crypto_info;
2715 nonce_size = TLS_CIPHER_SM4_GCM_IV_SIZE;
2716 tag_size = TLS_CIPHER_SM4_GCM_TAG_SIZE;
2717 iv_size = TLS_CIPHER_SM4_GCM_IV_SIZE;
2718 iv = sm4_gcm_info->iv;
2719 rec_seq_size = TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE;
2720 rec_seq = sm4_gcm_info->rec_seq;
2721 keysize = TLS_CIPHER_SM4_GCM_KEY_SIZE;
2722 key = sm4_gcm_info->key;
2723 salt = sm4_gcm_info->salt;
2724 salt_size = TLS_CIPHER_SM4_GCM_SALT_SIZE;
2725 cipher_name = "gcm(sm4)";
2728 case TLS_CIPHER_SM4_CCM: {
2729 struct tls12_crypto_info_sm4_ccm *sm4_ccm_info;
2731 sm4_ccm_info = (void *)crypto_info;
2732 nonce_size = TLS_CIPHER_SM4_CCM_IV_SIZE;
2733 tag_size = TLS_CIPHER_SM4_CCM_TAG_SIZE;
2734 iv_size = TLS_CIPHER_SM4_CCM_IV_SIZE;
2735 iv = sm4_ccm_info->iv;
2736 rec_seq_size = TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE;
2737 rec_seq = sm4_ccm_info->rec_seq;
2738 keysize = TLS_CIPHER_SM4_CCM_KEY_SIZE;
2739 key = sm4_ccm_info->key;
2740 salt = sm4_ccm_info->salt;
2741 salt_size = TLS_CIPHER_SM4_CCM_SALT_SIZE;
2742 cipher_name = "ccm(sm4)";
2745 case TLS_CIPHER_ARIA_GCM_128: {
2746 struct tls12_crypto_info_aria_gcm_128 *aria_gcm_128_info;
2748 aria_gcm_128_info = (void *)crypto_info;
2749 nonce_size = TLS_CIPHER_ARIA_GCM_128_IV_SIZE;
2750 tag_size = TLS_CIPHER_ARIA_GCM_128_TAG_SIZE;
2751 iv_size = TLS_CIPHER_ARIA_GCM_128_IV_SIZE;
2752 iv = aria_gcm_128_info->iv;
2753 rec_seq_size = TLS_CIPHER_ARIA_GCM_128_REC_SEQ_SIZE;
2754 rec_seq = aria_gcm_128_info->rec_seq;
2755 keysize = TLS_CIPHER_ARIA_GCM_128_KEY_SIZE;
2756 key = aria_gcm_128_info->key;
2757 salt = aria_gcm_128_info->salt;
2758 salt_size = TLS_CIPHER_ARIA_GCM_128_SALT_SIZE;
2759 cipher_name = "gcm(aria)";
2762 case TLS_CIPHER_ARIA_GCM_256: {
2763 struct tls12_crypto_info_aria_gcm_256 *gcm_256_info;
2765 gcm_256_info = (void *)crypto_info;
2766 nonce_size = TLS_CIPHER_ARIA_GCM_256_IV_SIZE;
2767 tag_size = TLS_CIPHER_ARIA_GCM_256_TAG_SIZE;
2768 iv_size = TLS_CIPHER_ARIA_GCM_256_IV_SIZE;
2769 iv = gcm_256_info->iv;
2770 rec_seq_size = TLS_CIPHER_ARIA_GCM_256_REC_SEQ_SIZE;
2771 rec_seq = gcm_256_info->rec_seq;
2772 keysize = TLS_CIPHER_ARIA_GCM_256_KEY_SIZE;
2773 key = gcm_256_info->key;
2774 salt = gcm_256_info->salt;
2775 salt_size = TLS_CIPHER_ARIA_GCM_256_SALT_SIZE;
2776 cipher_name = "gcm(aria)";
2784 if (crypto_info->version == TLS_1_3_VERSION) {
2786 prot->aad_size = TLS_HEADER_SIZE;
2787 prot->tail_size = 1;
2789 prot->aad_size = TLS_AAD_SPACE_SIZE;
2790 prot->tail_size = 0;
2793 /* Sanity-check the sizes for stack allocations. */
2794 if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE ||
2795 rec_seq_size > TLS_MAX_REC_SEQ_SIZE || tag_size != TLS_TAG_SIZE ||
2796 prot->aad_size > TLS_MAX_AAD_SIZE) {
2801 prot->version = crypto_info->version;
2802 prot->cipher_type = crypto_info->cipher_type;
2803 prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
2804 prot->tag_size = tag_size;
2805 prot->overhead_size = prot->prepend_size +
2806 prot->tag_size + prot->tail_size;
2807 prot->iv_size = iv_size;
2808 prot->salt_size = salt_size;
2809 cctx->iv = kmalloc(iv_size + salt_size, GFP_KERNEL);
2814 /* Note: 128 & 256 bit salt are the same size */
2815 prot->rec_seq_size = rec_seq_size;
2816 memcpy(cctx->iv, salt, salt_size);
2817 memcpy(cctx->iv + salt_size, iv, iv_size);
2818 cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
2819 if (!cctx->rec_seq) {
2825 *aead = crypto_alloc_aead(cipher_name, 0, 0);
2826 if (IS_ERR(*aead)) {
2827 rc = PTR_ERR(*aead);
2833 ctx->push_pending_record = tls_sw_push_pending_record;
2835 rc = crypto_aead_setkey(*aead, key, keysize);
2840 rc = crypto_aead_setauthsize(*aead, prot->tag_size);
2845 tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
2847 tls_update_rx_zc_capable(ctx);
2848 sw_ctx_rx->async_capable =
2849 crypto_info->version != TLS_1_3_VERSION &&
2850 !!(tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC);
2852 rc = tls_strp_init(&sw_ctx_rx->strp, sk);
2860 crypto_free_aead(*aead);
2863 kfree(cctx->rec_seq);
2864 cctx->rec_seq = NULL;
2870 kfree(ctx->priv_ctx_tx);
2871 ctx->priv_ctx_tx = NULL;
2873 kfree(ctx->priv_ctx_rx);
2874 ctx->priv_ctx_rx = NULL;