2 * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #ifndef __MLX5E_EN_ACCEL_H__
35 #define __MLX5E_EN_ACCEL_H__
37 #include <linux/skbuff.h>
38 #include <linux/netdevice.h>
39 #include "en_accel/ipsec_rxtx.h"
40 #include "en_accel/tls_rxtx.h"
44 #if IS_ENABLED(CONFIG_GENEVE)
45 #include <net/geneve.h>
47 static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
49 return mlx5_tx_swp_supported(mdev);
53 mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
55 struct mlx5e_swp_spec swp_spec = {};
56 unsigned int offset = 0;
60 l3_proto = vlan_get_protocol(skb);
63 l4_proto = ip_hdr(skb)->protocol;
65 case htons(ETH_P_IPV6):
66 l4_proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
72 if (l4_proto != IPPROTO_UDP ||
73 udp_hdr(skb)->dest != cpu_to_be16(GENEVE_UDP_PORT))
75 swp_spec.l3_proto = l3_proto;
76 swp_spec.l4_proto = l4_proto;
77 swp_spec.is_tun = true;
78 if (inner_ip_hdr(skb)->version == 6) {
79 swp_spec.tun_l3_proto = htons(ETH_P_IPV6);
80 swp_spec.tun_l4_proto = inner_ipv6_hdr(skb)->nexthdr;
82 swp_spec.tun_l3_proto = htons(ETH_P_IP);
83 swp_spec.tun_l4_proto = inner_ip_hdr(skb)->protocol;
86 mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
90 static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
95 #endif /* CONFIG_GENEVE */
98 mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
100 int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
102 udp_hdr(skb)->len = htons(payload_len);
105 struct mlx5e_accel_tx_state {
106 #ifdef CONFIG_MLX5_EN_TLS
107 struct mlx5e_accel_tx_tls_state tls;
111 static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
112 struct mlx5e_txqsq *sq,
114 struct mlx5e_accel_tx_state *state)
116 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
117 mlx5e_udp_gso_handle_tx_skb(skb);
119 #ifdef CONFIG_MLX5_EN_TLS
120 if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) {
121 /* May send SKBs and WQEs. */
122 if (unlikely(!mlx5e_tls_handle_tx_skb(dev, sq, skb, &state->tls)))
130 static inline bool mlx5e_accel_tx_finish(struct mlx5e_priv *priv,
131 struct mlx5e_txqsq *sq,
133 struct mlx5e_tx_wqe *wqe,
134 struct mlx5e_accel_tx_state *state)
136 #ifdef CONFIG_MLX5_EN_TLS
137 mlx5e_tls_handle_tx_wqe(sq, &wqe->ctrl, &state->tls);
140 #ifdef CONFIG_MLX5_EN_IPSEC
141 if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state)) {
142 if (unlikely(!mlx5e_ipsec_handle_tx_skb(priv, &wqe->eth, skb)))
150 #endif /* __MLX5E_EN_ACCEL_H__ */