2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #ifndef _TLS_OFFLOAD_H
35 #define _TLS_OFFLOAD_H
37 #include <linux/types.h>
38 #include <asm/byteorder.h>
39 #include <linux/crypto.h>
40 #include <linux/socket.h>
41 #include <linux/tcp.h>
42 #include <linux/mutex.h>
43 #include <linux/netdevice.h>
44 #include <linux/rcupdate.h>
46 #include <net/net_namespace.h>
48 #include <net/strparser.h>
49 #include <crypto/aead.h>
50 #include <uapi/linux/tls.h>
54 /* Maximum data size carried in a TLS record */
55 #define TLS_MAX_PAYLOAD_SIZE ((size_t)1 << 14)
57 #define TLS_HEADER_SIZE 5
58 #define TLS_NONCE_OFFSET TLS_HEADER_SIZE
60 #define TLS_CRYPTO_INFO_READY(info) ((info)->cipher_type)
62 #define TLS_RECORD_TYPE_DATA 0x17
64 #define TLS_AAD_SPACE_SIZE 13
66 #define MAX_IV_SIZE 16
67 #define TLS_TAG_SIZE 16
68 #define TLS_MAX_REC_SEQ_SIZE 8
69 #define TLS_MAX_AAD_SIZE TLS_AAD_SPACE_SIZE
71 /* For CCM mode, the full 16-bytes of IV is made of '4' fields of given sizes.
73 * IV[16] = b0[1] || implicit nonce[4] || explicit nonce[8] || length[3]
75 * The field 'length' is encoded in field 'b0' as '(length width - 1)'.
76 * Hence b0 contains (3 - 1) = 2.
78 #define TLS_AES_CCM_IV_B0_BYTE 2
79 #define TLS_SM4_CCM_IV_B0_BYTE 2
90 struct delayed_work work;
94 struct tls_sw_context_tx {
95 struct crypto_aead *aead_send;
96 struct crypto_wait async_wait;
97 struct tx_work tx_work;
98 struct tls_rec *open_rec;
99 struct list_head tx_list;
100 atomic_t encrypt_pending;
101 /* protect crypto_wait with encrypt_pending */
102 spinlock_t encrypt_compl_lock;
106 #define BIT_TX_SCHEDULED 0
107 #define BIT_TX_CLOSING 1
108 unsigned long tx_bitmask;
111 struct tls_strparser {
121 struct sk_buff *anchor;
122 struct work_struct work;
125 struct tls_sw_context_rx {
126 struct crypto_aead *aead_recv;
127 struct crypto_wait async_wait;
128 struct sk_buff_head rx_list; /* list of decrypted 'data' records */
129 void (*saved_data_ready)(struct sock *sk);
134 u8 reader_contended:1;
136 struct tls_strparser strp;
138 atomic_t decrypt_pending;
139 /* protect crypto_wait with decrypt_pending*/
140 spinlock_t decrypt_compl_lock;
141 struct sk_buff_head async_hold;
142 struct wait_queue_head wq;
145 struct tls_record_info {
146 struct list_head list;
150 skb_frag_t frags[MAX_SKB_FRAGS];
153 struct tls_offload_context_tx {
154 struct crypto_aead *aead_send;
155 spinlock_t lock; /* protects records list */
156 struct list_head records_list;
157 struct tls_record_info *open_record;
158 struct tls_record_info *retransmit_hint;
160 u64 unacked_record_sn;
162 struct scatterlist sg_tx_data[MAX_SKB_FRAGS];
163 void (*sk_destruct)(struct sock *sk);
164 struct work_struct destruct_work;
165 struct tls_context *ctx;
166 u8 driver_state[] __aligned(8);
167 /* The TLS layer reserves room for driver specific state
168 * Currently the belief is that there is not enough
169 * driver specific state to justify another layer of indirection
171 #define TLS_DRIVER_STATE_SIZE_TX 16
174 #define TLS_OFFLOAD_CONTEXT_SIZE_TX \
175 (sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX)
177 enum tls_context_flags {
178 /* tls_device_down was called after the netdev went down, device state
179 * was released, and kTLS works in software, even though rx_conf is
180 * still TLS_HW (needed for transition).
182 TLS_RX_DEV_DEGRADED = 0,
183 /* Unlike RX where resync is driven entirely by the core in TX only
184 * the driver knows when things went out of sync, so we need the flag
187 TLS_TX_SYNC_SCHED = 1,
188 /* tls_dev_del was called for the RX side, device state was released,
189 * but tls_ctx->netdev might still be kept, because TX-side driver
190 * resources might not be released yet. Used to prevent the second
191 * tls_dev_del call in tls_device_down if it happens simultaneously.
193 TLS_RX_DEV_CLOSED = 2,
196 struct cipher_context {
201 union tls_crypto_context {
202 struct tls_crypto_info info;
204 struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
205 struct tls12_crypto_info_aes_gcm_256 aes_gcm_256;
206 struct tls12_crypto_info_chacha20_poly1305 chacha20_poly1305;
207 struct tls12_crypto_info_sm4_gcm sm4_gcm;
208 struct tls12_crypto_info_sm4_ccm sm4_ccm;
212 struct tls_prot_info {
226 /* read-only cache line */
227 struct tls_prot_info prot_info;
231 u8 zerocopy_sendfile:1;
234 int (*push_pending_record)(struct sock *sk, int flags);
235 void (*sk_write_space)(struct sock *sk);
240 struct net_device *netdev;
243 struct cipher_context tx;
244 struct cipher_context rx;
246 struct scatterlist *partially_sent_record;
247 u16 partially_sent_offset;
249 bool in_tcp_sendpages;
250 bool pending_open_record_frags;
252 struct mutex tx_lock; /* protects partially_sent_* fields and
257 /* cache cold stuff */
258 struct proto *sk_proto;
261 void (*sk_destruct)(struct sock *sk);
263 union tls_crypto_context crypto_send;
264 union tls_crypto_context crypto_recv;
266 struct list_head list;
271 enum tls_offload_ctx_dir {
272 TLS_OFFLOAD_CTX_DIR_RX,
273 TLS_OFFLOAD_CTX_DIR_TX,
277 int (*tls_dev_add)(struct net_device *netdev, struct sock *sk,
278 enum tls_offload_ctx_dir direction,
279 struct tls_crypto_info *crypto_info,
280 u32 start_offload_tcp_sn);
281 void (*tls_dev_del)(struct net_device *netdev,
282 struct tls_context *ctx,
283 enum tls_offload_ctx_dir direction);
284 int (*tls_dev_resync)(struct net_device *netdev,
285 struct sock *sk, u32 seq, u8 *rcd_sn,
286 enum tls_offload_ctx_dir direction);
289 enum tls_offload_sync_type {
290 TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ = 0,
291 TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT = 1,
292 TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC = 2,
295 #define TLS_DEVICE_RESYNC_NH_START_IVAL 2
296 #define TLS_DEVICE_RESYNC_NH_MAX_IVAL 128
298 #define TLS_DEVICE_RESYNC_ASYNC_LOGMAX 13
299 struct tls_offload_resync_async {
303 u32 log[TLS_DEVICE_RESYNC_ASYNC_LOGMAX];
306 struct tls_offload_context_rx {
307 /* sw must be the first member of tls_offload_context_rx */
308 struct tls_sw_context_rx sw;
309 enum tls_offload_sync_type resync_type;
310 /* this member is set regardless of resync_type, to avoid branches */
311 u8 resync_nh_reset:1;
312 /* CORE_NEXT_HINT-only member, but use the hole here */
313 u8 resync_nh_do_now:1;
315 /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ */
317 atomic64_t resync_req;
319 /* TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT */
321 u32 decrypted_failed;
324 /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC */
326 struct tls_offload_resync_async *resync_async;
329 u8 driver_state[] __aligned(8);
330 /* The TLS layer reserves room for driver specific state
331 * Currently the belief is that there is not enough
332 * driver specific state to justify another layer of indirection
334 #define TLS_DRIVER_STATE_SIZE_RX 8
337 #define TLS_OFFLOAD_CONTEXT_SIZE_RX \
338 (sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX)
340 struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
341 u32 seq, u64 *p_record_sn);
343 static inline bool tls_record_is_start_marker(struct tls_record_info *rec)
345 return rec->len == 0;
348 static inline u32 tls_record_start_seq(struct tls_record_info *rec)
350 return rec->end_seq - rec->len;
354 tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
355 struct sk_buff *skb);
357 tls_validate_xmit_skb_sw(struct sock *sk, struct net_device *dev,
358 struct sk_buff *skb);
360 static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
362 #ifdef CONFIG_SOCK_VALIDATE_XMIT
363 return sk_fullsock(sk) &&
364 (smp_load_acquire(&sk->sk_validate_xmit_skb) ==
365 &tls_validate_xmit_skb);
371 static inline struct tls_context *tls_get_ctx(const struct sock *sk)
373 struct inet_connection_sock *icsk = inet_csk(sk);
375 /* Use RCU on icsk_ulp_data only for sock diag code,
376 * TLS data path doesn't need rcu_dereference().
378 return (__force void *)icsk->icsk_ulp_data;
381 static inline struct tls_sw_context_rx *tls_sw_ctx_rx(
382 const struct tls_context *tls_ctx)
384 return (struct tls_sw_context_rx *)tls_ctx->priv_ctx_rx;
387 static inline struct tls_sw_context_tx *tls_sw_ctx_tx(
388 const struct tls_context *tls_ctx)
390 return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx;
393 static inline struct tls_offload_context_tx *
394 tls_offload_ctx_tx(const struct tls_context *tls_ctx)
396 return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx;
399 static inline bool tls_sw_has_ctx_tx(const struct sock *sk)
401 struct tls_context *ctx = tls_get_ctx(sk);
405 return !!tls_sw_ctx_tx(ctx);
408 static inline bool tls_sw_has_ctx_rx(const struct sock *sk)
410 struct tls_context *ctx = tls_get_ctx(sk);
414 return !!tls_sw_ctx_rx(ctx);
417 static inline struct tls_offload_context_rx *
418 tls_offload_ctx_rx(const struct tls_context *tls_ctx)
420 return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx;
423 static inline void *__tls_driver_ctx(struct tls_context *tls_ctx,
424 enum tls_offload_ctx_dir direction)
426 if (direction == TLS_OFFLOAD_CTX_DIR_TX)
427 return tls_offload_ctx_tx(tls_ctx)->driver_state;
429 return tls_offload_ctx_rx(tls_ctx)->driver_state;
433 tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction)
435 return __tls_driver_ctx(tls_get_ctx(sk), direction);
438 #define RESYNC_REQ BIT(0)
439 #define RESYNC_REQ_ASYNC BIT(1)
440 /* The TLS context is valid until sk_destruct is called */
441 static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
443 struct tls_context *tls_ctx = tls_get_ctx(sk);
444 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
446 atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | RESYNC_REQ);
449 /* Log all TLS record header TCP sequences in [seq, seq+len] */
451 tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len)
453 struct tls_context *tls_ctx = tls_get_ctx(sk);
454 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
456 atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) |
457 ((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC);
458 rx_ctx->resync_async->loglen = 0;
459 rx_ctx->resync_async->rcd_delta = 0;
463 tls_offload_rx_resync_async_request_end(struct sock *sk, __be32 seq)
465 struct tls_context *tls_ctx = tls_get_ctx(sk);
466 struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
468 atomic64_set(&rx_ctx->resync_async->req,
469 ((u64)ntohl(seq) << 32) | RESYNC_REQ);
473 tls_offload_rx_resync_set_type(struct sock *sk, enum tls_offload_sync_type type)
475 struct tls_context *tls_ctx = tls_get_ctx(sk);
477 tls_offload_ctx_rx(tls_ctx)->resync_type = type;
480 /* Driver's seq tracking has to be disabled until resync succeeded */
481 static inline bool tls_offload_tx_resync_pending(struct sock *sk)
483 struct tls_context *tls_ctx = tls_get_ctx(sk);
486 ret = test_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
487 smp_mb__after_atomic();
491 struct sk_buff *tls_encrypt_skb(struct sk_buff *skb);
493 #ifdef CONFIG_TLS_DEVICE
494 void tls_device_sk_destruct(struct sock *sk);
495 void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq);
497 static inline bool tls_is_sk_rx_device_offloaded(struct sock *sk)
499 if (!sk_fullsock(sk) ||
500 smp_load_acquire(&sk->sk_destruct) != tls_device_sk_destruct)
502 return tls_get_ctx(sk)->rx_conf == TLS_HW;
505 #endif /* _TLS_OFFLOAD_H */