s->lro_bytes += rq_stats->lro_bytes;
s->rx_csum_none += rq_stats->csum_none;
s->rx_csum_sw += rq_stats->csum_sw;
+ s->rx_csum_inner += rq_stats->csum_inner;
s->rx_wqe_err += rq_stats->wqe_err;
s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
s->rx_mpwqe_frag += rq_stats->mpwqe_frag;
if (lro) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else if (likely(is_first_ethertype_ip(skb))) {
+ return;
+ }
+
+ if (is_first_ethertype_ip(skb)) {
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
rq->stats.csum_sw++;
- } else {
- goto csum_none;
+ return;
}
- return;
-
+ if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
+ (cqe->hds_ip_ext & CQE_L4_OK))) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (cqe_is_tunneled(cqe)) {
+ skb->csum_level = 1;
+ skb->encapsulation = 1;
+ rq->stats.csum_inner++;
+ }
+ return;
+ }
csum_none:
skb->ip_summed = CHECKSUM_NONE;
rq->stats.csum_none++;
u64 rx_csum_good;
u64 rx_csum_none;
u64 rx_csum_sw;
+ u64 rx_csum_inner;
u64 tx_csum_offload;
u64 tx_csum_inner;
u64 tx_queue_stopped;
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_good) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_sw) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_inner) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_offload) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_inner) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
struct mlx5e_rq_stats {
u64 packets;
u64 bytes;
- u64 csum_none;
u64 csum_sw;
+ u64 csum_inner;
+ u64 csum_none;
u64 lro_packets;
u64 lro_bytes;
u64 wqe_err;
static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_none) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_sw) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_inner) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_none) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, lro_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, lro_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, wqe_err) },
};
struct mlx5_cqe64 {
- u8 rsvd0[2];
- __be16 wqe_id;
+ u8 outer_l3_tunneled;
+ u8 rsvd0;
+ __be16 wqe_id;
u8 lro_tcppsh_abort_dupack;
u8 lro_min_ttl;
__be16 lro_tcp_win;
__be16 slid;
__be32 flags_rqpn;
u8 hds_ip_ext;
- u8 l4_hdr_type_etc;
+ u8 l4_l3_hdr_type;
__be16 vlan_info;
__be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
__be32 imm_inval_pkey;
static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
{
- return (cqe->l4_hdr_type_etc >> 4) & 0x7;
+ return (cqe->l4_l3_hdr_type >> 4) & 0x7;
+}
+
+static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe)
+{
+ return (cqe->l4_l3_hdr_type >> 2) & 0x3;
+}
+
+static inline u8 cqe_is_tunneled(struct mlx5_cqe64 *cqe)
+{
+ return cqe->outer_l3_tunneled & 0x1;
}
static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe)
{
- return !!(cqe->l4_hdr_type_etc & 0x1);
+ return !!(cqe->l4_l3_hdr_type & 0x1);
}
static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)