1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2020 Mellanox Technologies
7 #include "en/fs_tt_redirect.h"
8 #include <linux/list.h>
9 #include <linux/spinlock.h>
12 struct mlx5_flow_handle *l2_rule;
13 struct mlx5_flow_handle *udp_v4_rule;
14 struct mlx5_flow_handle *udp_v6_rule;
18 struct mlx5e_ptp_params {
19 struct mlx5e_params params;
20 struct mlx5e_sq_param txq_sq_param;
21 struct mlx5e_rq_param rq_param;
24 struct mlx5e_ptp_port_ts_cqe_tracker {
27 struct list_head entry;
30 struct mlx5e_ptp_port_ts_cqe_list {
31 struct mlx5e_ptp_port_ts_cqe_tracker *nodes;
32 struct list_head tracker_list_head;
33 /* Sync list operations in xmit and napi_poll contexts */
34 spinlock_t tracker_list_lock;
38 mlx5e_ptp_port_ts_cqe_list_add(struct mlx5e_ptp_port_ts_cqe_list *list, u8 metadata)
40 struct mlx5e_ptp_port_ts_cqe_tracker *tracker = &list->nodes[metadata];
42 WARN_ON_ONCE(tracker->inuse);
43 tracker->inuse = true;
44 spin_lock(&list->tracker_list_lock);
45 list_add_tail(&tracker->entry, &list->tracker_list_head);
46 spin_unlock(&list->tracker_list_lock);
50 mlx5e_ptp_port_ts_cqe_list_remove(struct mlx5e_ptp_port_ts_cqe_list *list, u8 metadata)
52 struct mlx5e_ptp_port_ts_cqe_tracker *tracker = &list->nodes[metadata];
54 WARN_ON_ONCE(!tracker->inuse);
55 tracker->inuse = false;
56 spin_lock(&list->tracker_list_lock);
57 list_del(&tracker->entry);
58 spin_unlock(&list->tracker_list_lock);
61 void mlx5e_ptpsq_track_metadata(struct mlx5e_ptpsq *ptpsq, u8 metadata)
63 mlx5e_ptp_port_ts_cqe_list_add(ptpsq->ts_cqe_pending_list, metadata);
66 struct mlx5e_skb_cb_hwtstamp {
68 ktime_t port_hwtstamp;
71 void mlx5e_skb_cb_hwtstamp_init(struct sk_buff *skb)
73 memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
76 static struct mlx5e_skb_cb_hwtstamp *mlx5e_skb_cb_get_hwts(struct sk_buff *skb)
78 BUILD_BUG_ON(sizeof(struct mlx5e_skb_cb_hwtstamp) > sizeof(skb->cb));
79 return (struct mlx5e_skb_cb_hwtstamp *)skb->cb;
82 static void mlx5e_skb_cb_hwtstamp_tx(struct sk_buff *skb,
83 struct mlx5e_ptp_cq_stats *cq_stats)
85 struct skb_shared_hwtstamps hwts = {};
88 diff = abs(mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp -
89 mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp);
91 /* Maximal allowed diff is 1 / 128 second */
92 if (diff > (NSEC_PER_SEC >> 7)) {
94 cq_stats->abort_abs_diff_ns += diff;
98 hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp;
99 skb_tstamp_tx(skb, &hwts);
102 void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
104 struct mlx5e_ptp_cq_stats *cq_stats)
106 switch (hwtstamp_type) {
107 case (MLX5E_SKB_CB_CQE_HWTSTAMP):
108 mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp = hwtstamp;
110 case (MLX5E_SKB_CB_PORT_HWTSTAMP):
111 mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp = hwtstamp;
115 /* If both CQEs arrive, check and report the port tstamp, and clear skb cb as
116 * skb soon to be released.
118 if (!mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp ||
119 !mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp)
122 mlx5e_skb_cb_hwtstamp_tx(skb, cq_stats);
123 memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
126 static struct sk_buff *
127 mlx5e_ptp_metadata_map_lookup(struct mlx5e_ptp_metadata_map *map, u16 metadata)
129 return map->data[metadata];
132 static struct sk_buff *
133 mlx5e_ptp_metadata_map_remove(struct mlx5e_ptp_metadata_map *map, u16 metadata)
137 skb = map->data[metadata];
138 map->data[metadata] = NULL;
143 static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq,
146 struct mlx5e_ptp_port_ts_cqe_list *cqe_list = ptpsq->ts_cqe_pending_list;
147 ktime_t timeout = ns_to_ktime(MLX5E_PTP_TS_CQE_UNDELIVERED_TIMEOUT);
148 struct mlx5e_ptp_metadata_map *metadata_map = &ptpsq->metadata_map;
149 struct mlx5e_ptp_port_ts_cqe_tracker *pos, *n;
151 spin_lock(&cqe_list->tracker_list_lock);
152 list_for_each_entry_safe(pos, n, &cqe_list->tracker_list_head, entry) {
153 struct sk_buff *skb =
154 mlx5e_ptp_metadata_map_lookup(metadata_map, pos->metadata_id);
155 ktime_t dma_tstamp = mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp;
158 ktime_after(ktime_add(dma_tstamp, timeout), port_tstamp))
161 metadata_map->undelivered_counter++;
162 WARN_ON_ONCE(!pos->inuse);
164 list_del(&pos->entry);
166 spin_unlock(&cqe_list->tracker_list_lock);
169 #define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask)
171 static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
172 struct mlx5_cqe64 *cqe,
175 struct mlx5e_ptp_port_ts_cqe_list *pending_cqe_list = ptpsq->ts_cqe_pending_list;
176 u8 metadata_id = PTP_WQE_CTR2IDX(be16_to_cpu(cqe->wqe_counter));
177 bool is_err_cqe = !!MLX5E_RX_ERR_CQE(cqe);
178 struct mlx5e_txqsq *sq = &ptpsq->txqsq;
182 if (likely(pending_cqe_list->nodes[metadata_id].inuse)) {
183 mlx5e_ptp_port_ts_cqe_list_remove(pending_cqe_list, metadata_id);
185 /* Reclaim space in the unlikely event CQE was delivered after
188 ptpsq->metadata_map.undelivered_counter--;
189 ptpsq->cq_stats->late_cqe++;
192 skb = mlx5e_ptp_metadata_map_remove(&ptpsq->metadata_map, metadata_id);
194 if (unlikely(is_err_cqe)) {
195 ptpsq->cq_stats->err_cqe++;
199 hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe));
200 mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP,
201 hwtstamp, ptpsq->cq_stats);
202 ptpsq->cq_stats->cqe++;
204 mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, hwtstamp);
206 napi_consume_skb(skb, budget);
207 mlx5e_ptp_metadata_fifo_push(&ptpsq->metadata_freelist, metadata_id);
210 static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
212 struct mlx5e_ptpsq *ptpsq = container_of(cq, struct mlx5e_ptpsq, ts_cq);
213 struct mlx5_cqwq *cqwq = &cq->wq;
214 struct mlx5_cqe64 *cqe;
217 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &ptpsq->txqsq.state)))
220 cqe = mlx5_cqwq_get_cqe(cqwq);
227 mlx5e_ptp_handle_ts_cqe(ptpsq, cqe, budget);
228 } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
230 mlx5_cqwq_update_db_record(cqwq);
232 /* ensure cq space is freed before enabling more cqes */
235 mlx5e_txqsq_wake(&ptpsq->txqsq);
237 return work_done == budget;
240 static int mlx5e_ptp_napi_poll(struct napi_struct *napi, int budget)
242 struct mlx5e_ptp *c = container_of(napi, struct mlx5e_ptp, napi);
243 struct mlx5e_ch_stats *ch_stats = c->stats;
244 struct mlx5e_rq *rq = &c->rq;
253 if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
254 for (i = 0; i < c->num_tc; i++) {
255 busy |= mlx5e_poll_tx_cq(&c->ptpsq[i].txqsq.cq, budget);
256 busy |= mlx5e_ptp_poll_ts_cq(&c->ptpsq[i].ts_cq, budget);
259 if (test_bit(MLX5E_PTP_STATE_RX, c->state) && likely(budget)) {
260 work_done = mlx5e_poll_rx_cq(&rq->cq, budget);
261 busy |= work_done == budget;
262 busy |= INDIRECT_CALL_2(rq->post_wqes,
263 mlx5e_post_rx_mpwqes,
273 if (unlikely(!napi_complete_done(napi, work_done)))
278 if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
279 for (i = 0; i < c->num_tc; i++) {
280 mlx5e_cq_arm(&c->ptpsq[i].txqsq.cq);
281 mlx5e_cq_arm(&c->ptpsq[i].ts_cq);
284 if (test_bit(MLX5E_PTP_STATE_RX, c->state))
285 mlx5e_cq_arm(&rq->cq);
293 static int mlx5e_ptp_alloc_txqsq(struct mlx5e_ptp *c, int txq_ix,
294 struct mlx5e_params *params,
295 struct mlx5e_sq_param *param,
296 struct mlx5e_txqsq *sq, int tc,
297 struct mlx5e_ptpsq *ptpsq)
299 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
300 struct mlx5_core_dev *mdev = c->mdev;
301 struct mlx5_wq_cyc *wq = &sq->wq;
306 sq->clock = &mdev->clock;
307 sq->mkey_be = c->mkey_be;
308 sq->netdev = c->netdev;
311 sq->ch_ix = MLX5E_PTP_CHANNEL_IX;
313 sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
314 sq->min_inline_mode = params->tx_min_inline_mode;
315 sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
316 sq->stats = &c->priv->ptp_stats.sq[tc];
318 INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
319 if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
320 set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
321 sq->stop_room = param->stop_room;
322 sq->ptp_cyc2time = mlx5_sq_ts_translator(mdev);
324 node = dev_to_node(mlx5_core_dma_dev(mdev));
326 param->wq.db_numa_node = node;
327 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl);
330 wq->db = &wq->db[MLX5_SND_DBR];
332 err = mlx5e_alloc_txqsq_db(sq, node);
334 goto err_sq_wq_destroy;
339 mlx5_wq_destroy(&sq->wq_ctrl);
344 static void mlx5e_ptp_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
346 mlx5_core_destroy_sq(mdev, sqn);
349 static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa)
351 struct mlx5e_ptp_metadata_fifo *metadata_freelist = &ptpsq->metadata_freelist;
352 struct mlx5e_ptp_metadata_map *metadata_map = &ptpsq->metadata_map;
353 struct mlx5e_ptp_port_ts_cqe_list *cqe_list;
357 cqe_list = kvzalloc_node(sizeof(*ptpsq->ts_cqe_pending_list), GFP_KERNEL, numa);
360 ptpsq->ts_cqe_pending_list = cqe_list;
362 db_sz = min_t(u32, mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq),
363 1 << MLX5_CAP_GEN_2(ptpsq->txqsq.mdev,
364 ts_cqe_metadata_size2wqe_counter));
365 ptpsq->ts_cqe_ctr_mask = db_sz - 1;
367 cqe_list->nodes = kvzalloc_node(array_size(db_sz, sizeof(*cqe_list->nodes)),
369 if (!cqe_list->nodes)
371 INIT_LIST_HEAD(&cqe_list->tracker_list_head);
372 spin_lock_init(&cqe_list->tracker_list_lock);
374 metadata_freelist->data =
375 kvzalloc_node(array_size(db_sz, sizeof(*metadata_freelist->data)),
377 if (!metadata_freelist->data)
378 goto free_cqe_list_nodes;
379 metadata_freelist->mask = ptpsq->ts_cqe_ctr_mask;
381 for (md = 0; md < db_sz; ++md) {
382 cqe_list->nodes[md].metadata_id = md;
383 metadata_freelist->data[md] = md;
385 metadata_freelist->pc = db_sz;
388 kvzalloc_node(array_size(db_sz, sizeof(*metadata_map->data)),
390 if (!metadata_map->data)
391 goto free_metadata_freelist;
392 metadata_map->capacity = db_sz;
396 free_metadata_freelist:
397 kvfree(metadata_freelist->data);
399 kvfree(cqe_list->nodes);
405 static void mlx5e_ptp_drain_metadata_map(struct mlx5e_ptp_metadata_map *map)
409 for (idx = 0; idx < map->capacity; ++idx) {
410 struct sk_buff *skb = map->data[idx];
412 dev_kfree_skb_any(skb);
416 static void mlx5e_ptp_free_traffic_db(struct mlx5e_ptpsq *ptpsq)
418 mlx5e_ptp_drain_metadata_map(&ptpsq->metadata_map);
419 kvfree(ptpsq->metadata_map.data);
420 kvfree(ptpsq->metadata_freelist.data);
421 kvfree(ptpsq->ts_cqe_pending_list->nodes);
422 kvfree(ptpsq->ts_cqe_pending_list);
425 static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn,
426 int txq_ix, struct mlx5e_ptp_params *cparams,
427 int tc, struct mlx5e_ptpsq *ptpsq)
429 struct mlx5e_sq_param *sqp = &cparams->txq_sq_param;
430 struct mlx5e_txqsq *txqsq = &ptpsq->txqsq;
431 struct mlx5e_create_sq_param csp = {};
434 err = mlx5e_ptp_alloc_txqsq(c, txq_ix, &cparams->params, sqp,
441 csp.cqn = txqsq->cq.mcq.cqn;
442 csp.wq_ctrl = &txqsq->wq_ctrl;
443 csp.min_inline_mode = txqsq->min_inline_mode;
444 csp.ts_cqe_to_dest_cqn = ptpsq->ts_cq.mcq.cqn;
446 err = mlx5e_create_sq_rdy(c->mdev, sqp, &csp, 0, &txqsq->sqn);
450 err = mlx5e_ptp_alloc_traffic_db(ptpsq, dev_to_node(mlx5_core_dma_dev(c->mdev)));
457 mlx5e_free_txqsq(txqsq);
462 static void mlx5e_ptp_close_txqsq(struct mlx5e_ptpsq *ptpsq)
464 struct mlx5e_txqsq *sq = &ptpsq->txqsq;
465 struct mlx5_core_dev *mdev = sq->mdev;
467 mlx5e_ptp_free_traffic_db(ptpsq);
468 cancel_work_sync(&sq->recover_work);
469 mlx5e_ptp_destroy_sq(mdev, sq->sqn);
470 mlx5e_free_txqsq_descs(sq);
471 mlx5e_free_txqsq(sq);
474 static int mlx5e_ptp_open_txqsqs(struct mlx5e_ptp *c,
475 struct mlx5e_ptp_params *cparams)
477 struct mlx5e_params *params = &cparams->params;
478 u8 num_tc = mlx5e_get_dcb_num_tc(params);
483 ix_base = num_tc * params->num_channels;
485 for (tc = 0; tc < num_tc; tc++) {
486 int txq_ix = ix_base + tc;
488 err = mlx5e_ptp_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
489 cparams, tc, &c->ptpsq[tc]);
497 for (--tc; tc >= 0; tc--)
498 mlx5e_ptp_close_txqsq(&c->ptpsq[tc]);
503 static void mlx5e_ptp_close_txqsqs(struct mlx5e_ptp *c)
507 for (tc = 0; tc < c->num_tc; tc++)
508 mlx5e_ptp_close_txqsq(&c->ptpsq[tc]);
511 static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c,
512 struct mlx5e_ptp_params *cparams)
514 struct mlx5e_params *params = &cparams->params;
515 struct mlx5e_create_cq_param ccp = {};
516 struct dim_cq_moder ptp_moder = {};
517 struct mlx5e_cq_param *cq_param;
522 num_tc = mlx5e_get_dcb_num_tc(params);
524 ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev));
525 ccp.ch_stats = c->stats;
527 ccp.ix = MLX5E_PTP_CHANNEL_IX;
529 cq_param = &cparams->txq_sq_param.cqp;
531 for (tc = 0; tc < num_tc; tc++) {
532 struct mlx5e_cq *cq = &c->ptpsq[tc].txqsq.cq;
534 err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
536 goto out_err_txqsq_cq;
539 for (tc = 0; tc < num_tc; tc++) {
540 struct mlx5e_cq *cq = &c->ptpsq[tc].ts_cq;
541 struct mlx5e_ptpsq *ptpsq = &c->ptpsq[tc];
543 err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
547 ptpsq->cq_stats = &c->priv->ptp_stats.cq[tc];
553 for (--tc; tc >= 0; tc--)
554 mlx5e_close_cq(&c->ptpsq[tc].ts_cq);
557 for (--tc; tc >= 0; tc--)
558 mlx5e_close_cq(&c->ptpsq[tc].txqsq.cq);
563 static int mlx5e_ptp_open_rx_cq(struct mlx5e_ptp *c,
564 struct mlx5e_ptp_params *cparams)
566 struct mlx5e_create_cq_param ccp = {};
567 struct dim_cq_moder ptp_moder = {};
568 struct mlx5e_cq_param *cq_param;
569 struct mlx5e_cq *cq = &c->rq.cq;
571 ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev));
572 ccp.ch_stats = c->stats;
574 ccp.ix = MLX5E_PTP_CHANNEL_IX;
576 cq_param = &cparams->rq_param.cqp;
578 return mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
581 static void mlx5e_ptp_close_tx_cqs(struct mlx5e_ptp *c)
585 for (tc = 0; tc < c->num_tc; tc++)
586 mlx5e_close_cq(&c->ptpsq[tc].ts_cq);
588 for (tc = 0; tc < c->num_tc; tc++)
589 mlx5e_close_cq(&c->ptpsq[tc].txqsq.cq);
592 static void mlx5e_ptp_build_sq_param(struct mlx5_core_dev *mdev,
593 struct mlx5e_params *params,
594 struct mlx5e_sq_param *param)
596 void *sqc = param->sqc;
599 mlx5e_build_sq_param_common(mdev, param);
601 wq = MLX5_ADDR_OF(sqc, sqc, wq);
602 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
603 param->stop_room = mlx5e_stop_room_for_max_wqe(mdev);
604 mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp);
607 static void mlx5e_ptp_build_rq_param(struct mlx5_core_dev *mdev,
608 struct net_device *netdev,
610 struct mlx5e_ptp_params *ptp_params)
612 struct mlx5e_rq_param *rq_params = &ptp_params->rq_param;
613 struct mlx5e_params *params = &ptp_params->params;
615 params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC;
616 mlx5e_init_rq_type_params(mdev, params);
617 params->sw_mtu = netdev->max_mtu;
618 mlx5e_build_rq_param(mdev, params, NULL, q_counter, rq_params);
621 static void mlx5e_ptp_build_params(struct mlx5e_ptp *c,
622 struct mlx5e_ptp_params *cparams,
623 struct mlx5e_params *orig)
625 struct mlx5e_params *params = &cparams->params;
627 params->tx_min_inline_mode = orig->tx_min_inline_mode;
628 params->num_channels = orig->num_channels;
629 params->hard_mtu = orig->hard_mtu;
630 params->sw_mtu = orig->sw_mtu;
631 params->mqprio = orig->mqprio;
634 if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
635 params->log_sq_size =
636 min(MLX5_CAP_GEN_2(c->mdev, ts_cqe_metadata_size2wqe_counter),
637 MLX5E_PTP_MAX_LOG_SQ_SIZE);
638 params->log_sq_size = min(params->log_sq_size, orig->log_sq_size);
639 mlx5e_ptp_build_sq_param(c->mdev, params, &cparams->txq_sq_param);
642 if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
643 params->vlan_strip_disable = orig->vlan_strip_disable;
644 mlx5e_ptp_build_rq_param(c->mdev, c->netdev, c->priv->q_counter, cparams);
648 static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
651 struct mlx5_core_dev *mdev = c->mdev;
652 struct mlx5e_priv *priv = c->priv;
655 rq->wq_type = params->rq_wq_type;
657 rq->netdev = priv->netdev;
659 rq->clock = &mdev->clock;
660 rq->tstamp = &priv->tstamp;
662 rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
663 rq->stats = &c->priv->ptp_stats.rq;
664 rq->ix = MLX5E_PTP_CHANNEL_IX;
665 rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
666 err = mlx5e_rq_set_handlers(rq, params, false);
670 return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, 0);
673 static int mlx5e_ptp_open_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
674 struct mlx5e_rq_param *rq_param)
676 int node = dev_to_node(c->mdev->device);
679 err = mlx5e_init_ptp_rq(c, params, &c->rq);
683 return mlx5e_open_rq(params, rq_param, NULL, node, &c->rq);
686 static int mlx5e_ptp_open_queues(struct mlx5e_ptp *c,
687 struct mlx5e_ptp_params *cparams)
691 if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
692 err = mlx5e_ptp_open_tx_cqs(c, cparams);
696 err = mlx5e_ptp_open_txqsqs(c, cparams);
700 if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
701 err = mlx5e_ptp_open_rx_cq(c, cparams);
705 err = mlx5e_ptp_open_rq(c, &cparams->params, &cparams->rq_param);
712 if (test_bit(MLX5E_PTP_STATE_RX, c->state))
713 mlx5e_close_cq(&c->rq.cq);
715 if (test_bit(MLX5E_PTP_STATE_TX, c->state))
716 mlx5e_ptp_close_txqsqs(c);
718 if (test_bit(MLX5E_PTP_STATE_TX, c->state))
719 mlx5e_ptp_close_tx_cqs(c);
724 static void mlx5e_ptp_close_queues(struct mlx5e_ptp *c)
726 if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
727 mlx5e_close_rq(&c->rq);
728 mlx5e_close_cq(&c->rq.cq);
730 if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
731 mlx5e_ptp_close_txqsqs(c);
732 mlx5e_ptp_close_tx_cqs(c);
736 static int mlx5e_ptp_set_state(struct mlx5e_ptp *c, struct mlx5e_params *params)
738 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_TX_PORT_TS))
739 __set_bit(MLX5E_PTP_STATE_TX, c->state);
742 __set_bit(MLX5E_PTP_STATE_RX, c->state);
744 return bitmap_empty(c->state, MLX5E_PTP_STATE_NUM_STATES) ? -EINVAL : 0;
747 static void mlx5e_ptp_rx_unset_fs(struct mlx5e_flow_steering *fs)
749 struct mlx5e_ptp_fs *ptp_fs = mlx5e_fs_get_ptp(fs);
754 mlx5e_fs_tt_redirect_del_rule(ptp_fs->l2_rule);
755 mlx5e_fs_tt_redirect_any_destroy(fs);
757 mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v6_rule);
758 mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v4_rule);
759 mlx5e_fs_tt_redirect_udp_destroy(fs);
760 ptp_fs->valid = false;
763 static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
765 u32 tirn = mlx5e_rx_res_get_tirn_ptp(priv->rx_res);
766 struct mlx5e_flow_steering *fs = priv->fs;
767 struct mlx5_flow_handle *rule;
768 struct mlx5e_ptp_fs *ptp_fs;
771 ptp_fs = mlx5e_fs_get_ptp(fs);
775 err = mlx5e_fs_tt_redirect_udp_create(fs);
779 rule = mlx5e_fs_tt_redirect_udp_add_rule(fs, MLX5_TT_IPV4_UDP,
783 goto out_destroy_fs_udp;
785 ptp_fs->udp_v4_rule = rule;
787 rule = mlx5e_fs_tt_redirect_udp_add_rule(fs, MLX5_TT_IPV6_UDP,
791 goto out_destroy_udp_v4_rule;
793 ptp_fs->udp_v6_rule = rule;
795 err = mlx5e_fs_tt_redirect_any_create(fs);
797 goto out_destroy_udp_v6_rule;
799 rule = mlx5e_fs_tt_redirect_any_add_rule(fs, tirn, ETH_P_1588);
802 goto out_destroy_fs_any;
804 ptp_fs->l2_rule = rule;
805 ptp_fs->valid = true;
810 mlx5e_fs_tt_redirect_any_destroy(fs);
811 out_destroy_udp_v6_rule:
812 mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v6_rule);
813 out_destroy_udp_v4_rule:
814 mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v4_rule);
816 mlx5e_fs_tt_redirect_udp_destroy(fs);
821 int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
822 u8 lag_port, struct mlx5e_ptp **cp)
824 struct net_device *netdev = priv->netdev;
825 struct mlx5_core_dev *mdev = priv->mdev;
826 struct mlx5e_ptp_params *cparams;
831 c = kvzalloc_node(sizeof(*c), GFP_KERNEL, dev_to_node(mlx5_core_dma_dev(mdev)));
832 cparams = kvzalloc(sizeof(*cparams), GFP_KERNEL);
833 if (!c || !cparams) {
839 c->mdev = priv->mdev;
840 c->tstamp = &priv->tstamp;
841 c->pdev = mlx5_core_dma_dev(priv->mdev);
842 c->netdev = priv->netdev;
843 c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey);
844 c->num_tc = mlx5e_get_dcb_num_tc(params);
845 c->stats = &priv->ptp_stats.ch;
846 c->lag_port = lag_port;
848 err = mlx5e_ptp_set_state(c, params);
852 netif_napi_add(netdev, &c->napi, mlx5e_ptp_napi_poll);
854 mlx5e_ptp_build_params(c, cparams, params);
856 err = mlx5e_ptp_open_queues(c, cparams);
860 if (test_bit(MLX5E_PTP_STATE_RX, c->state))
861 priv->rx_ptp_opened = true;
870 netif_napi_del(&c->napi);
877 void mlx5e_ptp_close(struct mlx5e_ptp *c)
879 mlx5e_ptp_close_queues(c);
880 netif_napi_del(&c->napi);
885 void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c)
889 napi_enable(&c->napi);
891 if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
892 for (tc = 0; tc < c->num_tc; tc++)
893 mlx5e_activate_txqsq(&c->ptpsq[tc].txqsq);
895 if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
896 mlx5e_ptp_rx_set_fs(c->priv);
897 mlx5e_activate_rq(&c->rq);
899 mlx5e_trigger_napi_sched(&c->napi);
902 void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c)
906 if (test_bit(MLX5E_PTP_STATE_RX, c->state))
907 mlx5e_deactivate_rq(&c->rq);
909 if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
910 for (tc = 0; tc < c->num_tc; tc++)
911 mlx5e_deactivate_txqsq(&c->ptpsq[tc].txqsq);
914 napi_disable(&c->napi);
917 int mlx5e_ptp_get_rqn(struct mlx5e_ptp *c, u32 *rqn)
919 if (!c || !test_bit(MLX5E_PTP_STATE_RX, c->state))
926 int mlx5e_ptp_alloc_rx_fs(struct mlx5e_flow_steering *fs,
927 const struct mlx5e_profile *profile)
929 struct mlx5e_ptp_fs *ptp_fs;
931 if (!mlx5e_profile_feature_cap(profile, PTP_RX))
934 ptp_fs = kzalloc(sizeof(*ptp_fs), GFP_KERNEL);
937 mlx5e_fs_set_ptp(fs, ptp_fs);
942 void mlx5e_ptp_free_rx_fs(struct mlx5e_flow_steering *fs,
943 const struct mlx5e_profile *profile)
945 struct mlx5e_ptp_fs *ptp_fs = mlx5e_fs_get_ptp(fs);
947 if (!mlx5e_profile_feature_cap(profile, PTP_RX))
950 mlx5e_ptp_rx_unset_fs(fs);
954 int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv *priv, bool set)
956 struct mlx5e_ptp *c = priv->channels.ptp;
958 if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
961 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
965 if (!c || !test_bit(MLX5E_PTP_STATE_RX, c->state)) {
966 netdev_WARN_ONCE(priv->netdev, "Don't try to add PTP RX-FS rules");
969 return mlx5e_ptp_rx_set_fs(priv);
972 if (c && test_bit(MLX5E_PTP_STATE_RX, c->state)) {
973 netdev_WARN_ONCE(priv->netdev, "Don't try to remove PTP RX-FS rules");
976 mlx5e_ptp_rx_unset_fs(priv->fs);