From 76c1e1ac2aaeddd5505e4ecfafa963885b5551ab Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Mon, 15 Jun 2020 15:25:23 +0300 Subject: [PATCH] net/mlx5e: kTLS, Add kTLS RX stats Add global and per-channel ethtool SW stats for the device offload. Document the new counters in tls-offload.rst. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- Documentation/networking/tls-offload.rst | 18 ++++++++++ .../ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c | 27 +++++++++++++-- drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 39 ++++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 25 ++++++++++++++ 4 files changed, 106 insertions(+), 3 deletions(-) diff --git a/Documentation/networking/tls-offload.rst b/Documentation/networking/tls-offload.rst index f914e81..37773da 100644 --- a/Documentation/networking/tls-offload.rst +++ b/Documentation/networking/tls-offload.rst @@ -428,6 +428,24 @@ by the driver: which were part of a TLS stream. * ``rx_tls_decrypted_bytes`` - number of TLS payload bytes in RX packets which were successfully decrypted. + * ``rx_tls_ctx`` - number of TLS RX HW offload contexts added to device for + decryption. + * ``rx_tls_del`` - number of TLS RX HW offload contexts deleted from device + (connection has finished). + * ``rx_tls_resync_req_pkt`` - number of received TLS packets with a resync + request. + * ``rx_tls_resync_req_start`` - number of times the TLS async resync request + was started. + * ``rx_tls_resync_req_end`` - number of times the TLS async resync request + properly ended with providing the HW tracked tcp-seq. + * ``rx_tls_resync_req_skip`` - number of times the TLS async resync request + procedure was started by not properly ended. + * ``rx_tls_resync_res_ok`` - number of times the TLS resync response call to + the driver was successfully handled. + * ``rx_tls_resync_res_skip`` - number of times the TLS resync response call to + the driver was terminated unsuccessfully. + * ``rx_tls_err`` - number of RX packets which were part of a TLS stream + but were not decrypted due to unexpected error in the state machine. * ``tx_tls_encrypted_packets`` - number of TX packets passed to the device for encryption of their TLS payload. * ``tx_tls_encrypted_bytes`` - number of TLS payload bytes in TX packets diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c index b26dad9..e0a8f9d6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c @@ -46,6 +46,7 @@ struct mlx5e_ktls_offload_context_rx { struct tls12_crypto_info_aes_gcm_128 crypto_info; struct accel_rule rule; struct sock *sk; + struct mlx5e_rq_stats *stats; struct completion add_ctx; u32 tirn; u32 key_id; @@ -203,6 +204,7 @@ unlock: return err; err_out: + priv_rx->stats->tls_resync_req_skip++; err = PTR_ERR(cseg); complete(&priv_rx->add_ctx); goto unlock; @@ -296,6 +298,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq, return cseg; err_out: + priv_rx->stats->tls_resync_req_skip++; return ERR_PTR(err); } @@ -362,11 +365,13 @@ static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx cseg = post_static_params(sq, priv_rx); if (IS_ERR(cseg)) { + priv_rx->stats->tls_resync_res_skip++; err = PTR_ERR(cseg); goto unlock; } /* Do not increment priv_rx refcnt, CQE handling is empty */ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); + priv_rx->stats->tls_resync_res_ok++; unlock: spin_unlock(&c->async_icosq_lock); @@ -396,11 +401,14 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi, tracker_state = MLX5_GET(tls_progress_params, ctx, record_tracker_state); auth_state = MLX5_GET(tls_progress_params, ctx, auth_state); if (tracker_state != MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING || - auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD) + auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD) { + priv_rx->stats->tls_resync_req_skip++; goto out; + } hw_seq = MLX5_GET(tls_progress_params, ctx, hw_resync_tcp_sn); tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq)); + priv_rx->stats->tls_resync_req_end++; out: refcount_dec(&resync->refcnt); kfree(buf); @@ -479,6 +487,7 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb) seq = th->seq; datalen = skb->len - depth; tls_offload_rx_resync_async_request_start(sk, seq, datalen); + rq->stats->tls_resync_req_start++; } void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk, @@ -509,18 +518,25 @@ void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) { u8 tls_offload = get_cqe_tls_offload(cqe); + struct mlx5e_rq_stats *stats; if (likely(tls_offload == CQE_TLS_OFFLOAD_NOT_DECRYPTED)) return; + stats = rq->stats; + switch (tls_offload) { case CQE_TLS_OFFLOAD_DECRYPTED: skb->decrypted = 1; + stats->tls_decrypted_packets++; + stats->tls_decrypted_bytes += *cqe_bcnt; break; case CQE_TLS_OFFLOAD_RESYNC: + stats->tls_resync_req_pkt++; resync_update_sn(rq, skb); break; default: /* CQE_TLS_OFFLOAD_ERROR: */ + stats->tls_err++; break; } } @@ -562,12 +578,14 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, priv_rx->crypto_info = *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info; + + rxq = mlx5e_accel_sk_get_rxq(sk); + priv_rx->rxq = rxq; priv_rx->sk = sk; - priv_rx->rxq = mlx5e_accel_sk_get_rxq(sk); + priv_rx->stats = &priv->channel_stats[rxq].rq; mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx); - rxq = priv_rx->rxq; rqtn = priv->direct_tir[rxq].rqt.rqtn; err = mlx5e_ktls_create_tir(mdev, &priv_rx->tirn, rqtn); @@ -586,6 +604,8 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, if (err) goto err_post_wqes; + priv_rx->stats->tls_ctx++; + return 0; err_post_wqes: @@ -646,6 +666,7 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx) refcount_dec(&resync->refcnt); wait_for_resync(netdev, resync); + priv_rx->stats->tls_del++; if (priv_rx->rule.rule) mlx5e_accel_fs_del_sk(priv_rx->rule.rule); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index f009fe0..e3b2f59 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -163,6 +163,19 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) }, +#ifdef CONFIG_MLX5_EN_TLS + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_ctx) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_del) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_skip) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_ok) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_skip) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_err) }, +#endif { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) }, @@ -275,6 +288,19 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw) s->rx_congst_umr += rq_stats->congst_umr; s->rx_arfs_err += rq_stats->arfs_err; s->rx_recover += rq_stats->recover; +#ifdef CONFIG_MLX5_EN_TLS + s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets; + s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes; + s->rx_tls_ctx += rq_stats->tls_ctx; + s->rx_tls_del += rq_stats->tls_del; + s->rx_tls_resync_req_pkt += rq_stats->tls_resync_req_pkt; + s->rx_tls_resync_req_start += rq_stats->tls_resync_req_start; + s->rx_tls_resync_req_end += rq_stats->tls_resync_req_end; + s->rx_tls_resync_req_skip += rq_stats->tls_resync_req_skip; + s->rx_tls_resync_res_ok += rq_stats->tls_resync_res_ok; + s->rx_tls_resync_res_skip += rq_stats->tls_resync_res_skip; + s->rx_tls_err += rq_stats->tls_err; +#endif s->ch_events += ch_stats->events; s->ch_poll += ch_stats->poll; s->ch_arm += ch_stats->arm; @@ -1475,6 +1501,19 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) }, +#ifdef CONFIG_MLX5_EN_TLS + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_ctx) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_del) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_skip) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_ok) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_skip) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_err) }, +#endif }; static const struct counter_desc sq_stats_desc[] = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 2b83ba99..2e1cca1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -186,6 +186,18 @@ struct mlx5e_sw_stats { u64 tx_tls_skip_no_sync_data; u64 tx_tls_drop_no_sync_data; u64 tx_tls_drop_bypass_req; + + u64 rx_tls_decrypted_packets; + u64 rx_tls_decrypted_bytes; + u64 rx_tls_ctx; + u64 rx_tls_del; + u64 rx_tls_resync_req_pkt; + u64 rx_tls_resync_req_start; + u64 rx_tls_resync_req_end; + u64 rx_tls_resync_req_skip; + u64 rx_tls_resync_res_ok; + u64 rx_tls_resync_res_skip; + u64 rx_tls_err; #endif u64 rx_xsk_packets; @@ -305,6 +317,19 @@ struct mlx5e_rq_stats { u64 congst_umr; u64 arfs_err; u64 recover; +#ifdef CONFIG_MLX5_EN_TLS + u64 tls_decrypted_packets; + u64 tls_decrypted_bytes; + u64 tls_ctx; + u64 tls_del; + u64 tls_resync_req_pkt; + u64 tls_resync_req_start; + u64 tls_resync_req_end; + u64 tls_resync_req_skip; + u64 tls_resync_res_ok; + u64 tls_resync_res_skip; + u64 tls_err; +#endif }; struct mlx5e_sq_stats { -- 2.7.4