net/mlx5e: Add resiliency for PTP TX port timestamp
authorAya Levin <ayal@nvidia.com>
Mon, 4 Jul 2022 16:34:26 +0000 (19:34 +0300)
committerSaeed Mahameed <saeedm@nvidia.com>
Tue, 19 Jul 2022 20:32:54 +0000 (13:32 -0700)
PTP TX port timestamp relies on receiving 2 CQEs for each outgoing
packet (WQE). The regular CQE has a less accurate timestamp than the
wire CQE. On link change, the wire CQE may get lost. Let the driver
detect and restore the relation between the CQEs, and re-sync after
timeout.

Add resiliency for this as follows: add id (producer counter)
into the WQE's metadata. This id will be received in the wire
CQE (in wqe_counter field). On handling the wire CQE, if there is no
match, replay the PTP application with the time-stamp from the regular
CQE and restore the sync between the CQEs and their SKBs. This patch
adds 2 ptp counters:
1) ptp_cq0_resync_event: number of times a mismatch was detected between
   the regular CQE and the wire CQE.
2) ptp_cq0_resync_cqe: total amount of missing wire CQEs.

Signed-off-by: Aya Levin <ayal@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c

index 047f88f0920387e2b65cd1aca5ffaeb0c4540199..78ad96cf422239df53d5205457da6869d9706138 100644 (file)
@@ -79,19 +79,49 @@ void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
        memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
 }
 
+#define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask)
+
+static bool mlx5e_ptp_ts_cqe_drop(struct mlx5e_ptpsq *ptpsq, u16 skb_cc, u16 skb_id)
+{
+       return (ptpsq->ts_cqe_ctr_mask && (skb_cc != skb_id));
+}
+
+static void mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq *ptpsq, u16 skb_cc, u16 skb_id)
+{
+       struct skb_shared_hwtstamps hwts = {};
+       struct sk_buff *skb;
+
+       ptpsq->cq_stats->resync_event++;
+
+       while (skb_cc != skb_id) {
+               skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
+               hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp;
+               skb_tstamp_tx(skb, &hwts);
+               ptpsq->cq_stats->resync_cqe++;
+               skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
+       }
+}
+
 static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
                                    struct mlx5_cqe64 *cqe,
                                    int budget)
 {
-       struct sk_buff *skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
+       u16 skb_id = PTP_WQE_CTR2IDX(be16_to_cpu(cqe->wqe_counter));
+       u16 skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
        struct mlx5e_txqsq *sq = &ptpsq->txqsq;
+       struct sk_buff *skb;
        ktime_t hwtstamp;
 
        if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
+               skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
                ptpsq->cq_stats->err_cqe++;
                goto out;
        }
 
+       if (mlx5e_ptp_ts_cqe_drop(ptpsq, skb_cc, skb_id))
+               mlx5e_ptp_skb_fifo_ts_cqe_resync(ptpsq, skb_cc, skb_id);
+
+       skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
        hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe));
        mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP,
                                      hwtstamp, ptpsq->cq_stats);
@@ -241,6 +271,7 @@ static void mlx5e_ptp_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
 static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa)
 {
        int wq_sz = mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq);
+       struct mlx5_core_dev *mdev = ptpsq->txqsq.mdev;
 
        ptpsq->skb_fifo.fifo = kvzalloc_node(array_size(wq_sz, sizeof(*ptpsq->skb_fifo.fifo)),
                                             GFP_KERNEL, numa);
@@ -250,7 +281,9 @@ static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa)
        ptpsq->skb_fifo.pc   = &ptpsq->skb_fifo_pc;
        ptpsq->skb_fifo.cc   = &ptpsq->skb_fifo_cc;
        ptpsq->skb_fifo.mask = wq_sz - 1;
-
+       if (MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter))
+               ptpsq->ts_cqe_ctr_mask =
+                       (1 << MLX5_CAP_GEN_2(mdev, ts_cqe_metadata_size2wqe_counter)) - 1;
        return 0;
 }
 
index a71a32e00ebb9f4ffd9c3704d0dc2e92e97c7f66..92dbbec472ec639eedaa2cfeead95338a2136b5b 100644 (file)
@@ -17,6 +17,7 @@ struct mlx5e_ptpsq {
        u16                      skb_fifo_pc;
        struct mlx5e_skb_fifo    skb_fifo;
        struct mlx5e_ptp_cq_stats *cq_stats;
+       u16                      ts_cqe_ctr_mask;
 };
 
 enum {
index 1a88406ee6d21e265d61f6cfeb91c73e4dcb4d0a..7409829d12012e58abde0929ae073a139e0ed39e 100644 (file)
@@ -2100,6 +2100,8 @@ static const struct counter_desc ptp_cq_stats_desc[] = {
        { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) },
        { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) },
        { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
+       { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_cqe) },
+       { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_event) },
 };
 
 static const struct counter_desc ptp_rq_stats_desc[] = {
index e48b15b55b6f630047113587dbd4198fe53b4c2e..ed4fc940e4efbaa71aa9b38022514ff493ce4b57 100644 (file)
@@ -453,6 +453,8 @@ struct mlx5e_ptp_cq_stats {
        u64 err_cqe;
        u64 abort;
        u64 abort_abs_diff_ns;
+       u64 resync_cqe;
+       u64 resync_event;
 };
 
 struct mlx5e_stats {
index 699d3a9886bd615c1ece11aa522c969d3fa9519e..dc1e01e93d5aa861eb0241a8529d34a580e15b40 100644 (file)
@@ -631,12 +631,22 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
                mlx5e_tx_mpwqe_session_complete(sq);
 }
 
+static void mlx5e_cqe_ts_id_eseg(struct mlx5e_ptpsq *ptpsq, struct sk_buff *skb,
+                                struct mlx5_wqe_eth_seg *eseg)
+{
+       if (ptpsq->ts_cqe_ctr_mask && unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+               eseg->flow_table_metadata = cpu_to_be32(ptpsq->skb_fifo_pc &
+                                                       ptpsq->ts_cqe_ctr_mask);
+}
+
 static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
                                   struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
                                   struct mlx5_wqe_eth_seg *eseg, u16 ihs)
 {
        mlx5e_accel_tx_eseg(priv, skb, eseg, ihs);
        mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
+       if (unlikely(sq->ptpsq))
+               mlx5e_cqe_ts_id_eseg(sq->ptpsq, skb, eseg);
 }
 
 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)