1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2019 Netronome Systems, Inc. */
4 #include <linux/bitfield.h>
5 #include <linux/ipv6.h>
6 #include <linux/skbuff.h>
7 #include <linux/string.h>
8 #include <net/inet6_hashtables.h>
12 #include "../nfp_net.h"
16 #define NFP_NET_TLS_CCM_MBOX_OPS_MASK \
17 (BIT(NFP_CCM_TYPE_CRYPTO_RESET) | \
18 BIT(NFP_CCM_TYPE_CRYPTO_ADD) | \
19 BIT(NFP_CCM_TYPE_CRYPTO_DEL) | \
20 BIT(NFP_CCM_TYPE_CRYPTO_UPDATE))
22 #define NFP_NET_TLS_OPCODE_MASK_RX \
23 BIT(NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC)
25 #define NFP_NET_TLS_OPCODE_MASK_TX \
26 BIT(NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC)
28 #define NFP_NET_TLS_OPCODE_MASK \
29 (NFP_NET_TLS_OPCODE_MASK_RX | NFP_NET_TLS_OPCODE_MASK_TX)
31 static void nfp_net_crypto_set_op(struct nfp_net *nn, u8 opcode, bool on)
35 off = nn->tlv_caps.crypto_enable_off + round_down(opcode / 8, 4);
37 val = nn_readl(nn, off);
39 val |= BIT(opcode & 31);
41 val &= ~BIT(opcode & 31);
42 nn_writel(nn, off, val);
46 __nfp_net_tls_conn_cnt_changed(struct nfp_net *nn, int add,
47 enum tls_offload_ctx_dir direction)
52 if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
53 opcode = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC;
54 nn->ktls_tx_conn_cnt += add;
55 cnt = nn->ktls_tx_conn_cnt;
56 nn->dp.ktls_tx = !!nn->ktls_tx_conn_cnt;
58 opcode = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC;
59 nn->ktls_rx_conn_cnt += add;
60 cnt = nn->ktls_rx_conn_cnt;
63 /* Care only about 0 -> 1 and 1 -> 0 transitions */
67 nfp_net_crypto_set_op(nn, opcode, cnt);
72 nfp_net_tls_conn_cnt_changed(struct nfp_net *nn, int add,
73 enum tls_offload_ctx_dir direction)
77 /* Use the BAR lock to protect the connection counts */
79 if (__nfp_net_tls_conn_cnt_changed(nn, add, direction)) {
80 ret = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_CRYPTO);
81 /* Undo the cnt adjustment if failed */
83 __nfp_net_tls_conn_cnt_changed(nn, -add, direction);
85 nn_ctrl_bar_unlock(nn);
91 nfp_net_tls_conn_add(struct nfp_net *nn, enum tls_offload_ctx_dir direction)
93 return nfp_net_tls_conn_cnt_changed(nn, 1, direction);
97 nfp_net_tls_conn_remove(struct nfp_net *nn, enum tls_offload_ctx_dir direction)
99 return nfp_net_tls_conn_cnt_changed(nn, -1, direction);
102 static struct sk_buff *
103 nfp_net_tls_alloc_simple(struct nfp_net *nn, size_t req_sz, gfp_t flags)
105 return nfp_ccm_mbox_msg_alloc(nn, req_sz,
106 sizeof(struct nfp_crypto_reply_simple),
111 nfp_net_tls_communicate_simple(struct nfp_net *nn, struct sk_buff *skb,
112 const char *name, enum nfp_ccm_type type)
114 struct nfp_crypto_reply_simple *reply;
117 err = __nfp_ccm_mbox_communicate(nn, skb, type,
118 sizeof(*reply), sizeof(*reply),
119 type == NFP_CCM_TYPE_CRYPTO_DEL);
121 nn_dp_warn(&nn->dp, "failed to %s TLS: %d\n", name, err);
125 reply = (void *)skb->data;
126 err = -be32_to_cpu(reply->error);
128 nn_dp_warn(&nn->dp, "failed to %s TLS, fw replied: %d\n",
130 dev_consume_skb_any(skb);
135 static void nfp_net_tls_del_fw(struct nfp_net *nn, __be32 *fw_handle)
137 struct nfp_crypto_req_del *req;
140 skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), GFP_KERNEL);
144 req = (void *)skb->data;
146 memcpy(req->handle, fw_handle, sizeof(req->handle));
148 nfp_net_tls_communicate_simple(nn, skb, "delete",
149 NFP_CCM_TYPE_CRYPTO_DEL);
153 nfp_net_tls_set_ipver_vlan(struct nfp_crypto_req_add_front *front, u8 ipver)
155 front->ipver_vlan = cpu_to_be16(FIELD_PREP(NFP_NET_TLS_IPVER, ipver) |
156 FIELD_PREP(NFP_NET_TLS_VLAN,
157 NFP_NET_TLS_VLAN_UNUSED));
161 nfp_net_tls_assign_conn_id(struct nfp_net *nn,
162 struct nfp_crypto_req_add_front *front)
167 id = atomic64_inc_return(&nn->ktls_conn_id_gen);
168 len = front->key_len - NFP_NET_TLS_NON_ADDR_KEY_LEN;
170 memcpy(front->l3_addrs, &id, sizeof(id));
171 memset(front->l3_addrs + sizeof(id), 0, len - sizeof(id));
174 static struct nfp_crypto_req_add_back *
175 nfp_net_tls_set_ipv4(struct nfp_net *nn, struct nfp_crypto_req_add_v4 *req,
176 struct sock *sk, int direction)
178 struct inet_sock *inet = inet_sk(sk);
180 req->front.key_len += sizeof(__be32) * 2;
182 if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
183 nfp_net_tls_assign_conn_id(nn, &req->front);
185 req->src_ip = inet->inet_daddr;
186 req->dst_ip = inet->inet_saddr;
192 static struct nfp_crypto_req_add_back *
193 nfp_net_tls_set_ipv6(struct nfp_net *nn, struct nfp_crypto_req_add_v6 *req,
194 struct sock *sk, int direction)
196 #if IS_ENABLED(CONFIG_IPV6)
197 struct ipv6_pinfo *np = inet6_sk(sk);
199 req->front.key_len += sizeof(struct in6_addr) * 2;
201 if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
202 nfp_net_tls_assign_conn_id(nn, &req->front);
204 memcpy(req->src_ip, &sk->sk_v6_daddr, sizeof(req->src_ip));
205 memcpy(req->dst_ip, &np->saddr, sizeof(req->dst_ip));
213 nfp_net_tls_set_l4(struct nfp_crypto_req_add_front *front,
214 struct nfp_crypto_req_add_back *back, struct sock *sk,
217 struct inet_sock *inet = inet_sk(sk);
219 front->l4_proto = IPPROTO_TCP;
221 if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
225 back->src_port = inet->inet_dport;
226 back->dst_port = inet->inet_sport;
230 static u8 nfp_tls_1_2_dir_to_opcode(enum tls_offload_ctx_dir direction)
233 case TLS_OFFLOAD_CTX_DIR_TX:
234 return NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC;
235 case TLS_OFFLOAD_CTX_DIR_RX:
236 return NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC;
244 nfp_net_cipher_supported(struct nfp_net *nn, u16 cipher_type,
245 enum tls_offload_ctx_dir direction)
249 switch (cipher_type) {
250 case TLS_CIPHER_AES_GCM_128:
251 if (direction == TLS_OFFLOAD_CTX_DIR_TX)
252 bit = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_ENC;
254 bit = NFP_NET_CRYPTO_OP_TLS_1_2_AES_GCM_128_DEC;
260 return nn->tlv_caps.crypto_ops & BIT(bit);
264 nfp_net_tls_add(struct net_device *netdev, struct sock *sk,
265 enum tls_offload_ctx_dir direction,
266 struct tls_crypto_info *crypto_info,
267 u32 start_offload_tcp_sn)
269 struct tls12_crypto_info_aes_gcm_128 *tls_ci;
270 struct nfp_net *nn = netdev_priv(netdev);
271 struct nfp_crypto_req_add_front *front;
272 struct nfp_net_tls_offload_ctx *ntls;
273 struct nfp_crypto_req_add_back *back;
274 struct nfp_crypto_reply_add *reply;
281 BUILD_BUG_ON(sizeof(struct nfp_net_tls_offload_ctx) >
282 TLS_DRIVER_STATE_SIZE_TX);
283 BUILD_BUG_ON(offsetof(struct nfp_net_tls_offload_ctx, rx_end) >
284 TLS_DRIVER_STATE_SIZE_RX);
286 if (!nfp_net_cipher_supported(nn, crypto_info->cipher_type, direction))
289 switch (sk->sk_family) {
290 #if IS_ENABLED(CONFIG_IPV6)
292 if (ipv6_only_sock(sk) ||
293 ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) {
294 req_sz = sizeof(struct nfp_crypto_req_add_v6);
301 req_sz = sizeof(struct nfp_crypto_req_add_v4);
308 err = nfp_net_tls_conn_add(nn, direction);
312 skb = nfp_ccm_mbox_msg_alloc(nn, req_sz, sizeof(*reply), GFP_KERNEL);
315 goto err_conn_remove;
318 front = (void *)skb->data;
320 front->key_len = NFP_NET_TLS_NON_ADDR_KEY_LEN;
321 front->opcode = nfp_tls_1_2_dir_to_opcode(direction);
322 memset(front->resv, 0, sizeof(front->resv));
324 nfp_net_tls_set_ipver_vlan(front, ipv6 ? 6 : 4);
326 req = (void *)skb->data;
328 back = nfp_net_tls_set_ipv6(nn, req, sk, direction);
330 back = nfp_net_tls_set_ipv4(nn, req, sk, direction);
332 nfp_net_tls_set_l4(front, back, sk, direction);
335 back->tcp_seq = cpu_to_be32(start_offload_tcp_sn);
337 tls_ci = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
338 memcpy(back->key, tls_ci->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
339 memset(&back->key[TLS_CIPHER_AES_GCM_128_KEY_SIZE / 4], 0,
340 sizeof(back->key) - TLS_CIPHER_AES_GCM_128_KEY_SIZE);
341 memcpy(back->iv, tls_ci->iv, TLS_CIPHER_AES_GCM_128_IV_SIZE);
342 memcpy(&back->salt, tls_ci->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
343 memcpy(back->rec_no, tls_ci->rec_seq, sizeof(tls_ci->rec_seq));
345 /* Get an extra ref on the skb so we can wipe the key after */
348 err = nfp_ccm_mbox_communicate(nn, skb, NFP_CCM_TYPE_CRYPTO_ADD,
349 sizeof(*reply), sizeof(*reply));
350 reply = (void *)skb->data;
352 /* We depend on CCM MBOX code not reallocating skb we sent
353 * so we can clear the key material out of the memory.
355 if (!WARN_ON_ONCE((u8 *)back < skb->head ||
356 (u8 *)back > skb_end_pointer(skb)) &&
357 !WARN_ON_ONCE((u8 *)&reply[1] > (u8 *)back))
358 memzero_explicit(back, sizeof(*back));
359 dev_consume_skb_any(skb); /* the extra ref from skb_get() above */
362 nn_dp_warn(&nn->dp, "failed to add TLS: %d (%d)\n",
363 err, direction == TLS_OFFLOAD_CTX_DIR_TX);
364 /* communicate frees skb on error */
365 goto err_conn_remove;
368 err = -be32_to_cpu(reply->error);
370 if (err == -ENOSPC) {
371 if (!atomic_fetch_inc(&nn->ktls_no_space))
372 nn_info(nn, "HW TLS table full\n");
375 "failed to add TLS, FW replied: %d\n", err);
380 if (!reply->handle[0] && !reply->handle[1]) {
381 nn_dp_warn(&nn->dp, "FW returned NULL handle\n");
386 ntls = tls_driver_ctx(sk, direction);
387 memcpy(ntls->fw_handle, reply->handle, sizeof(ntls->fw_handle));
388 if (direction == TLS_OFFLOAD_CTX_DIR_TX)
389 ntls->next_seq = start_offload_tcp_sn;
390 dev_consume_skb_any(skb);
392 if (direction == TLS_OFFLOAD_CTX_DIR_TX)
395 if (!nn->tlv_caps.tls_resync_ss)
396 tls_offload_rx_resync_set_type(sk, TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT);
401 nfp_net_tls_del_fw(nn, reply->handle);
403 dev_consume_skb_any(skb);
405 nfp_net_tls_conn_remove(nn, direction);
410 nfp_net_tls_del(struct net_device *netdev, struct tls_context *tls_ctx,
411 enum tls_offload_ctx_dir direction)
413 struct nfp_net *nn = netdev_priv(netdev);
414 struct nfp_net_tls_offload_ctx *ntls;
416 nfp_net_tls_conn_remove(nn, direction);
418 ntls = __tls_driver_ctx(tls_ctx, direction);
419 nfp_net_tls_del_fw(nn, ntls->fw_handle);
423 nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
424 u8 *rcd_sn, enum tls_offload_ctx_dir direction)
426 struct nfp_net *nn = netdev_priv(netdev);
427 struct nfp_net_tls_offload_ctx *ntls;
428 struct nfp_crypto_req_update *req;
429 enum nfp_ccm_type type;
434 flags = direction == TLS_OFFLOAD_CTX_DIR_TX ? GFP_KERNEL : GFP_ATOMIC;
435 skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), flags);
439 ntls = tls_driver_ctx(sk, direction);
440 req = (void *)skb->data;
442 req->opcode = nfp_tls_1_2_dir_to_opcode(direction);
443 memset(req->resv, 0, sizeof(req->resv));
444 memcpy(req->handle, ntls->fw_handle, sizeof(ntls->fw_handle));
445 req->tcp_seq = cpu_to_be32(seq);
446 memcpy(req->rec_no, rcd_sn, sizeof(req->rec_no));
448 type = NFP_CCM_TYPE_CRYPTO_UPDATE;
449 if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
450 err = nfp_net_tls_communicate_simple(nn, skb, "sync", type);
453 ntls->next_seq = seq;
455 if (nn->tlv_caps.tls_resync_ss)
456 type = NFP_CCM_TYPE_CRYPTO_RESYNC;
457 nfp_ccm_mbox_post(nn, skb, type,
458 sizeof(struct nfp_crypto_reply_simple));
459 atomic_inc(&nn->ktls_rx_resync_sent);
465 static const struct tlsdev_ops nfp_net_tls_ops = {
466 .tls_dev_add = nfp_net_tls_add,
467 .tls_dev_del = nfp_net_tls_del,
468 .tls_dev_resync = nfp_net_tls_resync,
471 int nfp_net_tls_rx_resync_req(struct net_device *netdev,
472 struct nfp_net_tls_resync_req *req,
473 void *pkt, unsigned int pkt_len)
475 struct nfp_net *nn = netdev_priv(netdev);
476 struct nfp_net_tls_offload_ctx *ntls;
477 struct net *net = dev_net(netdev);
478 struct ipv6hdr *ipv6h;
485 iph = pkt + req->l3_offset;
486 ipv6h = pkt + req->l3_offset;
487 th = pkt + req->l4_offset;
489 if ((u8 *)&th[1] > (u8 *)pkt + pkt_len) {
490 netdev_warn_once(netdev, "invalid TLS RX resync request (l3_off: %hhu l4_off: %hhu pkt_len: %u)\n",
491 req->l3_offset, req->l4_offset, pkt_len);
496 switch (ipv6h->version) {
498 sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
499 iph->saddr, th->source, iph->daddr,
500 th->dest, netdev->ifindex);
502 #if IS_ENABLED(CONFIG_IPV6)
504 sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
505 &ipv6h->saddr, th->source,
506 &ipv6h->daddr, ntohs(th->dest),
511 netdev_warn_once(netdev, "invalid TLS RX resync request (l3_off: %hhu l4_off: %hhu ipver: %u)\n",
512 req->l3_offset, req->l4_offset, iph->version);
520 if (!tls_is_sk_rx_device_offloaded(sk) ||
521 sk->sk_shutdown & RCV_SHUTDOWN)
524 ntls = tls_driver_ctx(sk, TLS_OFFLOAD_CTX_DIR_RX);
525 /* some FW versions can't report the handle and report 0s */
526 if (memchr_inv(&req->fw_handle, 0, sizeof(req->fw_handle)) &&
527 memcmp(&req->fw_handle, &ntls->fw_handle, sizeof(ntls->fw_handle)))
530 /* copy to ensure alignment */
531 memcpy(&tcp_seq, &req->tcp_seq, sizeof(tcp_seq));
532 tls_offload_rx_resync_request(sk, tcp_seq);
533 atomic_inc(&nn->ktls_rx_resync_req);
541 atomic_inc(&nn->ktls_rx_resync_ign);
545 static int nfp_net_tls_reset(struct nfp_net *nn)
547 struct nfp_crypto_req_reset *req;
550 skb = nfp_net_tls_alloc_simple(nn, sizeof(*req), GFP_KERNEL);
554 req = (void *)skb->data;
557 return nfp_net_tls_communicate_simple(nn, skb, "reset",
558 NFP_CCM_TYPE_CRYPTO_RESET);
561 int nfp_net_tls_init(struct nfp_net *nn)
563 struct net_device *netdev = nn->dp.netdev;
566 if (!(nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK))
569 if ((nn->tlv_caps.mbox_cmsg_types & NFP_NET_TLS_CCM_MBOX_OPS_MASK) !=
570 NFP_NET_TLS_CCM_MBOX_OPS_MASK)
573 if (!nfp_ccm_mbox_fits(nn, sizeof(struct nfp_crypto_req_add_v6))) {
574 nn_warn(nn, "disabling TLS offload - mbox too small: %d\n",
575 nn->tlv_caps.mbox_len);
579 err = nfp_net_tls_reset(nn);
583 nn_ctrl_bar_lock(nn);
584 nn_writel(nn, nn->tlv_caps.crypto_enable_off, 0);
585 err = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_CRYPTO);
586 nn_ctrl_bar_unlock(nn);
590 if (nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK_RX) {
591 netdev->hw_features |= NETIF_F_HW_TLS_RX;
592 netdev->features |= NETIF_F_HW_TLS_RX;
594 if (nn->tlv_caps.crypto_ops & NFP_NET_TLS_OPCODE_MASK_TX) {
595 netdev->hw_features |= NETIF_F_HW_TLS_TX;
596 netdev->features |= NETIF_F_HW_TLS_TX;
599 netdev->tlsdev_ops = &nfp_net_tls_ops;