2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #ifndef _TLS_OFFLOAD_H
35 #define _TLS_OFFLOAD_H
37 #include <linux/types.h>
38 #include <asm/byteorder.h>
39 #include <linux/crypto.h>
40 #include <linux/socket.h>
41 #include <linux/tcp.h>
42 #include <linux/mutex.h>
43 #include <linux/netdevice.h>
44 #include <linux/rcupdate.h>
46 #include <net/net_namespace.h>
48 #include <net/strparser.h>
49 #include <crypto/aead.h>
50 #include <uapi/linux/tls.h>
54 struct tls_cipher_size_desc {
62 extern const struct tls_cipher_size_desc tls_cipher_size_desc[];
64 /* Maximum data size carried in a TLS record */
65 #define TLS_MAX_PAYLOAD_SIZE ((size_t)1 << 14)
67 #define TLS_HEADER_SIZE 5
68 #define TLS_NONCE_OFFSET TLS_HEADER_SIZE
70 #define TLS_CRYPTO_INFO_READY(info) ((info)->cipher_type)
72 #define TLS_RECORD_TYPE_ALERT 0x15
73 #define TLS_RECORD_TYPE_HANDSHAKE 0x16
74 #define TLS_RECORD_TYPE_DATA 0x17
76 #define TLS_AAD_SPACE_SIZE 13
78 #define MAX_IV_SIZE 16
79 #define TLS_TAG_SIZE 16
80 #define TLS_MAX_REC_SEQ_SIZE 8
81 #define TLS_MAX_AAD_SIZE TLS_AAD_SPACE_SIZE
83 /* For CCM mode, the full 16-bytes of IV is made of '4' fields of given sizes.
85 * IV[16] = b0[1] || implicit nonce[4] || explicit nonce[8] || length[3]
87 * The field 'length' is encoded in field 'b0' as '(length width - 1)'.
88 * Hence b0 contains (3 - 1) = 2.
90 #define TLS_AES_CCM_IV_B0_BYTE 2
91 #define TLS_SM4_CCM_IV_B0_BYTE 2
102 struct delayed_work work;
106 struct tls_sw_context_tx {
107 struct crypto_aead *aead_send;
108 struct crypto_wait async_wait;
109 struct tx_work tx_work;
110 struct tls_rec *open_rec;
111 struct list_head tx_list;
112 atomic_t encrypt_pending;
113 /* protect crypto_wait with encrypt_pending */
114 spinlock_t encrypt_compl_lock;
118 #define BIT_TX_SCHEDULED 0
119 #define BIT_TX_CLOSING 1
120 unsigned long tx_bitmask;
123 struct tls_strparser {
129 u32 mixed_decrypted : 1;
134 struct sk_buff *anchor;
135 struct work_struct work;
138 struct tls_sw_context_rx {
139 struct crypto_aead *aead_recv;
140 struct crypto_wait async_wait;
141 struct sk_buff_head rx_list; /* list of decrypted 'data' records */
142 void (*saved_data_ready)(struct sock *sk);
147 u8 reader_contended:1;
149 struct tls_strparser strp;
151 atomic_t decrypt_pending;
152 /* protect crypto_wait with decrypt_pending*/
153 spinlock_t decrypt_compl_lock;
154 struct sk_buff_head async_hold;
155 struct wait_queue_head wq;
158 struct tls_record_info {
159 struct list_head list;
163 skb_frag_t frags[MAX_SKB_FRAGS];
166 struct tls_offload_context_tx {
167 struct crypto_aead *aead_send;
168 spinlock_t lock; /* protects records list */
169 struct list_head records_list;
170 struct tls_record_info *open_record;
171 struct tls_record_info *retransmit_hint;
173 u64 unacked_record_sn;
175 struct scatterlist sg_tx_data[MAX_SKB_FRAGS];
176 void (*sk_destruct)(struct sock *sk);
177 struct work_struct destruct_work;
178 struct tls_context *ctx;
179 u8 driver_state[] __aligned(8);
180 /* The TLS layer reserves room for driver specific state
181 * Currently the belief is that there is not enough
182 * driver specific state to justify another layer of indirection
184 #define TLS_DRIVER_STATE_SIZE_TX 16
187 #define TLS_OFFLOAD_CONTEXT_SIZE_TX \
188 (sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX)
190 enum tls_context_flags {
191 /* tls_device_down was called after the netdev went down, device state
192 * was released, and kTLS works in software, even though rx_conf is
193 * still TLS_HW (needed for transition).
195 TLS_RX_DEV_DEGRADED = 0,
196 /* Unlike RX where resync is driven entirely by the core in TX only
197 * the driver knows when things went out of sync, so we need the flag
200 TLS_TX_SYNC_SCHED = 1,
201 /* tls_dev_del was called for the RX side, device state was released,
202 * but tls_ctx->netdev might still be kept, because TX-side driver
203 * resources might not be released yet. Used to prevent the second
204 * tls_dev_del call in tls_device_down if it happens simultaneously.
206 TLS_RX_DEV_CLOSED = 2,
209 struct cipher_context {
214 union tls_crypto_context {
215 struct tls_crypto_info info;
217 struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
218 struct tls12_crypto_info_aes_gcm_256 aes_gcm_256;
219 struct tls12_crypto_info_chacha20_poly1305 chacha20_poly1305;
220 struct tls12_crypto_info_sm4_gcm sm4_gcm;
221 struct tls12_crypto_info_sm4_ccm sm4_ccm;
225 struct tls_prot_info {
239 /* read-only cache line */
240 struct tls_prot_info prot_info;
244 u8 zerocopy_sendfile:1;
247 int (*push_pending_record)(struct sock *sk, int flags);
248 void (*sk_write_space)(struct sock *sk);
253 struct net_device __rcu *netdev;
256 struct cipher_context tx;
257 struct cipher_context rx;
259 struct scatterlist *partially_sent_record;
260 u16 partially_sent_offset;
263 bool pending_open_record_frags;
265 struct mutex tx_lock; /* protects partially_sent_* fields and
270 /* cache cold stuff */
271 struct proto *sk_proto;
274 void (*sk_destruct)(struct sock *sk);
276 union tls_crypto_context crypto_send;
277 union tls_crypto_context crypto_recv;
279 struct list_head list;
284 enum tls_offload_ctx_dir {
285 TLS_OFFLOAD_CTX_DIR_RX,
286 TLS_OFFLOAD_CTX_DIR_TX,
290 int (*tls_dev_add)(struct net_device *netdev, struct sock *sk,
291 enum tls_offload_ctx_dir direction,
292 struct tls_crypto_info *crypto_info,
293 u32 start_offload_tcp_sn);
294 void (*tls_dev_del)(struct net_device *netdev,
295 struct tls_context *ctx,
296 enum tls_offload_ctx_dir direction);
297 int (*tls_dev_resync)(struct net_device *netdev,
298 struct sock *sk, u32 seq, u8 *rcd_sn,
299 enum tls_offload_ctx_dir direction);
302 enum tls_offload_sync_type {
303 TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ = 0,
304 TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT = 1,
305 TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC = 2,
308 #define TLS_DEVICE_RESYNC_NH_START_IVAL 2
309 #define TLS_DEVICE_RESYNC_NH_MAX_IVAL 128
311 #define TLS_DEVICE_RESYNC_ASYNC_LOGMAX 13
312 struct tls_offload_resync_async {
316 u32 log[TLS_DEVICE_RESYNC_ASYNC_LOGMAX];
319 struct tls_offload_context_rx {
320 /* sw must be the first member of tls_offload_context_rx */
321 struct tls_sw_context_rx sw;
322 enum tls_offload_sync_type resync_type;
323 /* this member is set regardless of resync_type, to avoid branches */
324 u8 resync_nh_reset:1;
325 /* CORE_NEXT_HINT-only member, but use the hole here */
326 u8 resync_nh_do_now:1;
328 /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ */
330 atomic64_t resync_req;
332 /* TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT */
334 u32 decrypted_failed;
337 /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC */
339 struct tls_offload_resync_async *resync_async;
342 u8 driver_state[] __aligned(8);
343 /* The TLS layer reserves room for driver specific state
344 * Currently the belief is that there is not enough
345 * driver specific state to justify another layer of indirection
347 #define TLS_DRIVER_STATE_SIZE_RX 8
350 #define TLS_OFFLOAD_CONTEXT_SIZE_RX \
351 (sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX)
353 struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
354 u32 seq, u64 *p_record_sn);
356 static inline bool tls_record_is_start_marker(struct tls_record_info *rec)
358 return rec->len == 0;
361 static inline u32 tls_record_start_seq(struct tls_record_info *rec)
363 return rec->end_seq - rec->len;
367 tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
368 struct sk_buff *skb);
370 tls_validate_xmit_skb_sw(struct sock *sk, struct net_device *dev,
371 struct sk_buff *skb);
373 static inline bool tls_is_skb_tx_device_offloaded(const struct sk_buff *skb)
375 #ifdef CONFIG_TLS_DEVICE
376 struct sock *sk = skb->sk;
378 return sk && sk_fullsock(sk) &&
379 (smp_load_acquire(&sk->sk_validate_xmit_skb) ==
380 &tls_validate_xmit_skb);
386 static inline struct tls_context *tls_get_ctx(const struct sock *sk)
388 struct inet_connection_sock *icsk = inet_csk(sk);
390 /* Use RCU on icsk_ulp_data only for sock diag code,
391 * TLS data path doesn't need rcu_dereference().
393 return (__force void *)icsk->icsk_ulp_data;
396 static inline struct tls_sw_context_rx *tls_sw_ctx_rx(
397 const struct tls_context *tls_ctx)
399 return (struct tls_sw_context_rx *)tls_ctx->priv_ctx_rx;
402 static inline struct tls_sw_context_tx *tls_sw_ctx_tx(
403 const struct tls_context *tls_ctx)
405 return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx;
408 static inline struct tls_offload_context_tx *
409 tls_offload_ctx_tx(const struct tls_context *tls_ctx)
411 return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx;
414 static inline bool tls_sw_has_ctx_tx(const struct sock *sk)
416 struct tls_context *ctx = tls_get_ctx(sk);
420 return !!tls_sw_ctx_tx(ctx);
423 static inline bool tls_sw_has_ctx_rx(const struct sock *sk)
425 struct tls_context *ctx = tls_get_ctx(sk);
429 return !!tls_sw_ctx_rx(ctx);
432 static inline struct tls_offload_context_rx *
433 tls_offload_ctx_rx(const struct tls_context *tls_ctx)
435 return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx;
438 static inline void *__tls_driver_ctx(struct tls_context *tls_ctx,
439 enum tls_offload_ctx_dir direction)
441 if (direction == TLS_OFFLOAD_CTX_DIR_TX)
442 return tls_offload_ctx_tx(tls_ctx)->driver_state;
444 return tls_offload_ctx_rx(tls_ctx)->driver_state;
448 tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction)
450 return __tls_driver_ctx(tls_get_ctx(sk), direction);
453 #define RESYNC_REQ BIT(0)
454 #define RESYNC_REQ_ASYNC BIT(1)
455 /* The TLS context is valid until sk_destruct is called */
456 static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
458 struct tls_context *tls_ctx = tls_get_ctx(sk);
459 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
461 atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | RESYNC_REQ);
464 /* Log all TLS record header TCP sequences in [seq, seq+len] */
466 tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len)
468 struct tls_context *tls_ctx = tls_get_ctx(sk);
469 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
471 atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) |
472 ((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC);
473 rx_ctx->resync_async->loglen = 0;
474 rx_ctx->resync_async->rcd_delta = 0;
478 tls_offload_rx_resync_async_request_end(struct sock *sk, __be32 seq)
480 struct tls_context *tls_ctx = tls_get_ctx(sk);
481 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
483 atomic64_set(&rx_ctx->resync_async->req,
484 ((u64)ntohl(seq) << 32) | RESYNC_REQ);
488 tls_offload_rx_resync_set_type(struct sock *sk, enum tls_offload_sync_type type)
490 struct tls_context *tls_ctx = tls_get_ctx(sk);
492 tls_offload_ctx_rx(tls_ctx)->resync_type = type;
495 /* Driver's seq tracking has to be disabled until resync succeeded */
496 static inline bool tls_offload_tx_resync_pending(struct sock *sk)
498 struct tls_context *tls_ctx = tls_get_ctx(sk);
501 ret = test_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
502 smp_mb__after_atomic();
506 struct sk_buff *tls_encrypt_skb(struct sk_buff *skb);
508 #ifdef CONFIG_TLS_DEVICE
509 void tls_device_sk_destruct(struct sock *sk);
510 void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq);
512 static inline bool tls_is_sk_rx_device_offloaded(struct sock *sk)
514 if (!sk_fullsock(sk) ||
515 smp_load_acquire(&sk->sk_destruct) != tls_device_sk_destruct)
517 return tls_get_ctx(sk)->rx_conf == TLS_HW;
520 #endif /* _TLS_OFFLOAD_H */