which were part of a TLS stream.
* ``rx_tls_decrypted_bytes`` - number of TLS payload bytes in RX packets
which were successfully decrypted.
+ * ``rx_tls_ctx`` - number of TLS RX HW offload contexts added to device for
+ decryption.
+ * ``rx_tls_del`` - number of TLS RX HW offload contexts deleted from device
+ (connection has finished).
+ * ``rx_tls_resync_req_pkt`` - number of received TLS packets with a resync
+ request.
+ * ``rx_tls_resync_req_start`` - number of times the TLS async resync request
+ was started.
+ * ``rx_tls_resync_req_end`` - number of times the TLS async resync request
+ properly ended with providing the HW tracked tcp-seq.
+ * ``rx_tls_resync_req_skip`` - number of times the TLS async resync request
+ procedure was started by not properly ended.
+ * ``rx_tls_resync_res_ok`` - number of times the TLS resync response call to
+ the driver was successfully handled.
+ * ``rx_tls_resync_res_skip`` - number of times the TLS resync response call to
+ the driver was terminated unsuccessfully.
+ * ``rx_tls_err`` - number of RX packets which were part of a TLS stream
+ but were not decrypted due to unexpected error in the state machine.
* ``tx_tls_encrypted_packets`` - number of TX packets passed to the device
for encryption of their TLS payload.
* ``tx_tls_encrypted_bytes`` - number of TLS payload bytes in TX packets
struct tls12_crypto_info_aes_gcm_128 crypto_info;
struct accel_rule rule;
struct sock *sk;
+ struct mlx5e_rq_stats *stats;
struct completion add_ctx;
u32 tirn;
u32 key_id;
return err;
err_out:
+ priv_rx->stats->tls_resync_req_skip++;
err = PTR_ERR(cseg);
complete(&priv_rx->add_ctx);
goto unlock;
return cseg;
err_out:
+ priv_rx->stats->tls_resync_req_skip++;
return ERR_PTR(err);
}
cseg = post_static_params(sq, priv_rx);
if (IS_ERR(cseg)) {
+ priv_rx->stats->tls_resync_res_skip++;
err = PTR_ERR(cseg);
goto unlock;
}
/* Do not increment priv_rx refcnt, CQE handling is empty */
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
+ priv_rx->stats->tls_resync_res_ok++;
unlock:
spin_unlock(&c->async_icosq_lock);
tracker_state = MLX5_GET(tls_progress_params, ctx, record_tracker_state);
auth_state = MLX5_GET(tls_progress_params, ctx, auth_state);
if (tracker_state != MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING ||
- auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD)
+ auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD) {
+ priv_rx->stats->tls_resync_req_skip++;
goto out;
+ }
hw_seq = MLX5_GET(tls_progress_params, ctx, hw_resync_tcp_sn);
tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq));
+ priv_rx->stats->tls_resync_req_end++;
out:
refcount_dec(&resync->refcnt);
kfree(buf);
seq = th->seq;
datalen = skb->len - depth;
tls_offload_rx_resync_async_request_start(sk, seq, datalen);
+ rq->stats->tls_resync_req_start++;
}
void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk,
struct mlx5_cqe64 *cqe, u32 *cqe_bcnt)
{
u8 tls_offload = get_cqe_tls_offload(cqe);
+ struct mlx5e_rq_stats *stats;
if (likely(tls_offload == CQE_TLS_OFFLOAD_NOT_DECRYPTED))
return;
+ stats = rq->stats;
+
switch (tls_offload) {
case CQE_TLS_OFFLOAD_DECRYPTED:
skb->decrypted = 1;
+ stats->tls_decrypted_packets++;
+ stats->tls_decrypted_bytes += *cqe_bcnt;
break;
case CQE_TLS_OFFLOAD_RESYNC:
+ stats->tls_resync_req_pkt++;
resync_update_sn(rq, skb);
break;
default: /* CQE_TLS_OFFLOAD_ERROR: */
+ stats->tls_err++;
break;
}
}
priv_rx->crypto_info =
*(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+
+ rxq = mlx5e_accel_sk_get_rxq(sk);
+ priv_rx->rxq = rxq;
priv_rx->sk = sk;
- priv_rx->rxq = mlx5e_accel_sk_get_rxq(sk);
+ priv_rx->stats = &priv->channel_stats[rxq].rq;
mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx);
- rxq = priv_rx->rxq;
rqtn = priv->direct_tir[rxq].rqt.rqtn;
err = mlx5e_ktls_create_tir(mdev, &priv_rx->tirn, rqtn);
if (err)
goto err_post_wqes;
+ priv_rx->stats->tls_ctx++;
+
return 0;
err_post_wqes:
refcount_dec(&resync->refcnt);
wait_for_resync(netdev, resync);
+ priv_rx->stats->tls_del++;
if (priv_rx->rule.rule)
mlx5e_accel_fs_del_sk(priv_rx->rule.rule);
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
+#ifdef CONFIG_MLX5_EN_TLS
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_ctx) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_del) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_skip) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_ok) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_skip) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_err) },
+#endif
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
s->rx_congst_umr += rq_stats->congst_umr;
s->rx_arfs_err += rq_stats->arfs_err;
s->rx_recover += rq_stats->recover;
+#ifdef CONFIG_MLX5_EN_TLS
+ s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets;
+ s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes;
+ s->rx_tls_ctx += rq_stats->tls_ctx;
+ s->rx_tls_del += rq_stats->tls_del;
+ s->rx_tls_resync_req_pkt += rq_stats->tls_resync_req_pkt;
+ s->rx_tls_resync_req_start += rq_stats->tls_resync_req_start;
+ s->rx_tls_resync_req_end += rq_stats->tls_resync_req_end;
+ s->rx_tls_resync_req_skip += rq_stats->tls_resync_req_skip;
+ s->rx_tls_resync_res_ok += rq_stats->tls_resync_res_ok;
+ s->rx_tls_resync_res_skip += rq_stats->tls_resync_res_skip;
+ s->rx_tls_err += rq_stats->tls_err;
+#endif
s->ch_events += ch_stats->events;
s->ch_poll += ch_stats->poll;
s->ch_arm += ch_stats->arm;
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
+#ifdef CONFIG_MLX5_EN_TLS
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_ctx) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_del) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_skip) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_ok) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_skip) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_err) },
+#endif
};
static const struct counter_desc sq_stats_desc[] = {
u64 tx_tls_skip_no_sync_data;
u64 tx_tls_drop_no_sync_data;
u64 tx_tls_drop_bypass_req;
+
+ u64 rx_tls_decrypted_packets;
+ u64 rx_tls_decrypted_bytes;
+ u64 rx_tls_ctx;
+ u64 rx_tls_del;
+ u64 rx_tls_resync_req_pkt;
+ u64 rx_tls_resync_req_start;
+ u64 rx_tls_resync_req_end;
+ u64 rx_tls_resync_req_skip;
+ u64 rx_tls_resync_res_ok;
+ u64 rx_tls_resync_res_skip;
+ u64 rx_tls_err;
#endif
u64 rx_xsk_packets;
u64 congst_umr;
u64 arfs_err;
u64 recover;
+#ifdef CONFIG_MLX5_EN_TLS
+ u64 tls_decrypted_packets;
+ u64 tls_decrypted_bytes;
+ u64 tls_ctx;
+ u64 tls_del;
+ u64 tls_resync_req_pkt;
+ u64 tls_resync_req_start;
+ u64 tls_resync_req_end;
+ u64 tls_resync_req_skip;
+ u64 tls_resync_res_ok;
+ u64 tls_resync_res_skip;
+ u64 tls_err;
+#endif
};
struct mlx5e_sq_stats {