return skb;
}
+enum {
+ MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED,
+ MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED,
+ MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER,
+};
+
+void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe)
+{
+ u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata);
+ u8 ipsec_syndrome = ipsec_meta_data & 0xFF;
+ struct mlx5e_priv *priv;
+ struct xfrm_offload *xo;
+ struct xfrm_state *xs;
+ struct sec_path *sp;
+ u32 sa_handle;
+
+ sa_handle = MLX5_IPSEC_METADATA_HANDLE(ipsec_meta_data);
+ priv = netdev_priv(netdev);
+ sp = secpath_set(skb);
+ if (unlikely(!sp)) {
+ atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
+ return;
+ }
+
+ xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle);
+ if (unlikely(!xs)) {
+ atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
+ return;
+ }
+
+ sp = skb_sec_path(skb);
+ sp->xvec[sp->len++] = xs;
+ sp->olen++;
+
+ xo = xfrm_offload(skb);
+ xo->flags = CRYPTO_DONE;
+
+ switch (ipsec_syndrome & MLX5_IPSEC_METADATA_SYNDROM_MASK) {
+ case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED:
+ xo->status = CRYPTO_SUCCESS;
+ if (WARN_ON_ONCE(priv->ipsec->no_trailer))
+ xo->flags |= XFRM_ESP_NO_TRAILER;
+ break;
+ case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED:
+ xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
+ break;
+ case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER:
+ xo->status = CRYPTO_INVALID_PACKET_SYNTAX;
+ break;
+ default:
+ atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
+ }
+}
+
bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
netdev_features_t features)
{
#ifndef __MLX5E_IPSEC_RXTX_H__
#define __MLX5E_IPSEC_RXTX_H__
-#ifdef CONFIG_MLX5_EN_IPSEC
-
#include <linux/skbuff.h>
#include <net/xfrm.h>
#include "en.h"
#include "en/txrx.h"
+#define MLX5_IPSEC_METADATA_MARKER_MASK (0x80)
+#define MLX5_IPSEC_METADATA_SYNDROM_MASK (0x7F)
+#define MLX5_IPSEC_METADATA_HANDLE(metadata) (((metadata) >> 8) & 0xFF)
+
+#ifdef CONFIG_MLX5_EN_IPSEC
+
struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb, u32 *cqe_bcnt);
void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
bool mlx5e_ipsec_handle_tx_skb(struct mlx5e_priv *priv,
struct mlx5_wqe_eth_seg *eseg,
struct sk_buff *skb);
+void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe);
+static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe)
+{
+ return !!(MLX5_IPSEC_METADATA_MARKER_MASK & be32_to_cpu(cqe->ft_metadata));
+}
+#else
+static inline
+void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe)
+{}
+static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; }
#endif /* CONFIG_MLX5_EN_IPSEC */
#endif /* __MLX5E_IPSEC_RXTX_H__ */
#include "en/hv_vhca_stats.h"
#include "en/devlink.h"
#include "lib/mlx5.h"
+#include "fpga/ipsec.h"
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
{
rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
#ifdef CONFIG_MLX5_EN_IPSEC
- if (c->priv->ipsec)
+ if ((mlx5_fpga_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) &&
+ c->priv->ipsec)
rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
else
#endif
goto csum_unnecessary;
if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
- if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
+ u8 ipproto = get_ip_proto(skb, network_depth, proto);
+
+ if (unlikely(ipproto == IPPROTO_SCTP))
goto csum_unnecessary;
+ if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
+ goto csum_none;
+
stats->csum_complete++;
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
mlx5e_tls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
+ if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
+ mlx5e_ipsec_offload_handle_rx_skb(netdev, skb, cqe);
+
if (lro_num_seg > 1) {
mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);