2 * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include "lib/events.h"
35 #include "en_accel/ktls.h"
36 #include "en_accel/en_accel.h"
40 #ifdef CONFIG_PAGE_POOL_STATS
41 #include <net/page_pool/helpers.h>
44 static unsigned int stats_grps_num(struct mlx5e_priv *priv)
46 return !priv->profile->stats_grps_num ? 0 :
47 priv->profile->stats_grps_num(priv);
50 unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv)
52 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
53 const unsigned int num_stats_grps = stats_grps_num(priv);
54 unsigned int total = 0;
57 for (i = 0; i < num_stats_grps; i++)
58 total += stats_grps[i]->get_num_stats(priv);
63 void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv)
65 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
66 const unsigned int num_stats_grps = stats_grps_num(priv);
69 for (i = num_stats_grps - 1; i >= 0; i--)
70 if (stats_grps[i]->update_stats &&
71 stats_grps[i]->update_stats_mask & MLX5E_NDO_UPDATE_STATS)
72 stats_grps[i]->update_stats(priv);
75 void mlx5e_stats_update(struct mlx5e_priv *priv)
77 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
78 const unsigned int num_stats_grps = stats_grps_num(priv);
81 for (i = num_stats_grps - 1; i >= 0; i--)
82 if (stats_grps[i]->update_stats)
83 stats_grps[i]->update_stats(priv);
86 void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx)
88 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
89 const unsigned int num_stats_grps = stats_grps_num(priv);
92 for (i = 0; i < num_stats_grps; i++)
93 idx = stats_grps[i]->fill_stats(priv, data, idx);
96 void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data)
98 mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
99 const unsigned int num_stats_grps = stats_grps_num(priv);
102 for (i = 0; i < num_stats_grps; i++)
103 idx = stats_grps[i]->fill_strings(priv, data, idx);
106 /* Concrete NIC Stats */
108 static const struct counter_desc sw_stats_desc[] = {
109 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
110 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
111 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
112 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
113 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
114 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
115 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
116 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
117 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
118 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
119 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_blks) },
120 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_pkts) },
122 #ifdef CONFIG_MLX5_EN_TLS
123 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
124 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
125 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
126 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
127 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
128 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
129 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) },
130 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
131 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
134 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
135 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
136 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_packets) },
137 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_bytes) },
138 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_skbs) },
139 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_match_packets) },
140 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_large_hds) },
141 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
142 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
143 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
144 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
145 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
146 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
147 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
148 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
149 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
150 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
151 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
152 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) },
153 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) },
154 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_nops) },
155 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
156 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
157 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
158 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
159 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
160 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
161 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
162 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
163 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
164 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
165 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
166 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
167 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
168 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
169 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) },
170 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) },
171 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_nops) },
172 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
173 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
174 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
175 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
176 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
177 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
178 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
179 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
180 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
181 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
182 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
183 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
184 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
185 #ifdef CONFIG_PAGE_POOL_STATS
186 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_fast) },
187 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow) },
188 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow_high_order) },
189 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_empty) },
190 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_refill) },
191 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_waive) },
192 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cached) },
193 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_cache_full) },
194 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring) },
195 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring_full) },
196 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_released_ref) },
198 #ifdef CONFIG_MLX5_EN_TLS
199 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
200 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
201 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) },
202 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) },
203 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) },
204 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_skip) },
205 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_ok) },
206 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_retry) },
207 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_skip) },
208 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_err) },
210 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
211 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
212 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
213 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_aff_change) },
214 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_force_irq) },
215 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) },
216 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_packets) },
217 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_bytes) },
218 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_complete) },
219 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary) },
220 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_unnecessary_inner) },
221 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_none) },
222 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_ecn_mark) },
223 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_removed_vlan_packets) },
224 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_drop) },
225 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_redirect) },
226 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_wqe_err) },
227 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_cqes) },
228 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_mpwqe_filler_strides) },
229 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_oversize_pkts_sw_drop) },
230 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_buff_alloc_err) },
231 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_blks) },
232 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_cqe_compress_pkts) },
233 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_congst_umr) },
234 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_arfs_err) },
235 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_xmit) },
236 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_mpwqe) },
237 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_inlnw) },
238 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_full) },
239 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_err) },
240 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xsk_cqes) },
243 #define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
245 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw)
247 return NUM_SW_COUNTERS;
250 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw)
254 for (i = 0; i < NUM_SW_COUNTERS; i++)
255 strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
259 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)
263 for (i = 0; i < NUM_SW_COUNTERS; i++)
264 data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i);
268 static void mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats *s,
269 struct mlx5e_xdpsq_stats *xdpsq_red_stats)
271 s->tx_xdp_xmit += xdpsq_red_stats->xmit;
272 s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
273 s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
274 s->tx_xdp_nops += xdpsq_red_stats->nops;
275 s->tx_xdp_full += xdpsq_red_stats->full;
276 s->tx_xdp_err += xdpsq_red_stats->err;
277 s->tx_xdp_cqes += xdpsq_red_stats->cqes;
280 static void mlx5e_stats_grp_sw_update_stats_xdpsq(struct mlx5e_sw_stats *s,
281 struct mlx5e_xdpsq_stats *xdpsq_stats)
283 s->rx_xdp_tx_xmit += xdpsq_stats->xmit;
284 s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
285 s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
286 s->rx_xdp_tx_nops += xdpsq_stats->nops;
287 s->rx_xdp_tx_full += xdpsq_stats->full;
288 s->rx_xdp_tx_err += xdpsq_stats->err;
289 s->rx_xdp_tx_cqe += xdpsq_stats->cqes;
292 static void mlx5e_stats_grp_sw_update_stats_xsksq(struct mlx5e_sw_stats *s,
293 struct mlx5e_xdpsq_stats *xsksq_stats)
295 s->tx_xsk_xmit += xsksq_stats->xmit;
296 s->tx_xsk_mpwqe += xsksq_stats->mpwqe;
297 s->tx_xsk_inlnw += xsksq_stats->inlnw;
298 s->tx_xsk_full += xsksq_stats->full;
299 s->tx_xsk_err += xsksq_stats->err;
300 s->tx_xsk_cqes += xsksq_stats->cqes;
303 static void mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats *s,
304 struct mlx5e_rq_stats *xskrq_stats)
306 s->rx_xsk_packets += xskrq_stats->packets;
307 s->rx_xsk_bytes += xskrq_stats->bytes;
308 s->rx_xsk_csum_complete += xskrq_stats->csum_complete;
309 s->rx_xsk_csum_unnecessary += xskrq_stats->csum_unnecessary;
310 s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner;
311 s->rx_xsk_csum_none += xskrq_stats->csum_none;
312 s->rx_xsk_ecn_mark += xskrq_stats->ecn_mark;
313 s->rx_xsk_removed_vlan_packets += xskrq_stats->removed_vlan_packets;
314 s->rx_xsk_xdp_drop += xskrq_stats->xdp_drop;
315 s->rx_xsk_xdp_redirect += xskrq_stats->xdp_redirect;
316 s->rx_xsk_wqe_err += xskrq_stats->wqe_err;
317 s->rx_xsk_mpwqe_filler_cqes += xskrq_stats->mpwqe_filler_cqes;
318 s->rx_xsk_mpwqe_filler_strides += xskrq_stats->mpwqe_filler_strides;
319 s->rx_xsk_oversize_pkts_sw_drop += xskrq_stats->oversize_pkts_sw_drop;
320 s->rx_xsk_buff_alloc_err += xskrq_stats->buff_alloc_err;
321 s->rx_xsk_cqe_compress_blks += xskrq_stats->cqe_compress_blks;
322 s->rx_xsk_cqe_compress_pkts += xskrq_stats->cqe_compress_pkts;
323 s->rx_xsk_congst_umr += xskrq_stats->congst_umr;
324 s->rx_xsk_arfs_err += xskrq_stats->arfs_err;
327 static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
328 struct mlx5e_rq_stats *rq_stats)
330 s->rx_packets += rq_stats->packets;
331 s->rx_bytes += rq_stats->bytes;
332 s->rx_lro_packets += rq_stats->lro_packets;
333 s->rx_lro_bytes += rq_stats->lro_bytes;
334 s->rx_gro_packets += rq_stats->gro_packets;
335 s->rx_gro_bytes += rq_stats->gro_bytes;
336 s->rx_gro_skbs += rq_stats->gro_skbs;
337 s->rx_gro_match_packets += rq_stats->gro_match_packets;
338 s->rx_gro_large_hds += rq_stats->gro_large_hds;
339 s->rx_ecn_mark += rq_stats->ecn_mark;
340 s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
341 s->rx_csum_none += rq_stats->csum_none;
342 s->rx_csum_complete += rq_stats->csum_complete;
343 s->rx_csum_complete_tail += rq_stats->csum_complete_tail;
344 s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
345 s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
346 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
347 s->rx_xdp_drop += rq_stats->xdp_drop;
348 s->rx_xdp_redirect += rq_stats->xdp_redirect;
349 s->rx_wqe_err += rq_stats->wqe_err;
350 s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes;
351 s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
352 s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop;
353 s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
354 s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
355 s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
356 s->rx_congst_umr += rq_stats->congst_umr;
357 s->rx_arfs_err += rq_stats->arfs_err;
358 s->rx_recover += rq_stats->recover;
359 #ifdef CONFIG_PAGE_POOL_STATS
360 s->rx_pp_alloc_fast += rq_stats->pp_alloc_fast;
361 s->rx_pp_alloc_slow += rq_stats->pp_alloc_slow;
362 s->rx_pp_alloc_empty += rq_stats->pp_alloc_empty;
363 s->rx_pp_alloc_refill += rq_stats->pp_alloc_refill;
364 s->rx_pp_alloc_waive += rq_stats->pp_alloc_waive;
365 s->rx_pp_alloc_slow_high_order += rq_stats->pp_alloc_slow_high_order;
366 s->rx_pp_recycle_cached += rq_stats->pp_recycle_cached;
367 s->rx_pp_recycle_cache_full += rq_stats->pp_recycle_cache_full;
368 s->rx_pp_recycle_ring += rq_stats->pp_recycle_ring;
369 s->rx_pp_recycle_ring_full += rq_stats->pp_recycle_ring_full;
370 s->rx_pp_recycle_released_ref += rq_stats->pp_recycle_released_ref;
372 #ifdef CONFIG_MLX5_EN_TLS
373 s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets;
374 s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes;
375 s->rx_tls_resync_req_pkt += rq_stats->tls_resync_req_pkt;
376 s->rx_tls_resync_req_start += rq_stats->tls_resync_req_start;
377 s->rx_tls_resync_req_end += rq_stats->tls_resync_req_end;
378 s->rx_tls_resync_req_skip += rq_stats->tls_resync_req_skip;
379 s->rx_tls_resync_res_ok += rq_stats->tls_resync_res_ok;
380 s->rx_tls_resync_res_retry += rq_stats->tls_resync_res_retry;
381 s->rx_tls_resync_res_skip += rq_stats->tls_resync_res_skip;
382 s->rx_tls_err += rq_stats->tls_err;
386 static void mlx5e_stats_grp_sw_update_stats_ch_stats(struct mlx5e_sw_stats *s,
387 struct mlx5e_ch_stats *ch_stats)
389 s->ch_events += ch_stats->events;
390 s->ch_poll += ch_stats->poll;
391 s->ch_arm += ch_stats->arm;
392 s->ch_aff_change += ch_stats->aff_change;
393 s->ch_force_irq += ch_stats->force_irq;
394 s->ch_eq_rearm += ch_stats->eq_rearm;
397 static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s,
398 struct mlx5e_sq_stats *sq_stats)
400 s->tx_packets += sq_stats->packets;
401 s->tx_bytes += sq_stats->bytes;
402 s->tx_tso_packets += sq_stats->tso_packets;
403 s->tx_tso_bytes += sq_stats->tso_bytes;
404 s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
405 s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
406 s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
407 s->tx_nop += sq_stats->nop;
408 s->tx_mpwqe_blks += sq_stats->mpwqe_blks;
409 s->tx_mpwqe_pkts += sq_stats->mpwqe_pkts;
410 s->tx_queue_stopped += sq_stats->stopped;
411 s->tx_queue_wake += sq_stats->wake;
412 s->tx_queue_dropped += sq_stats->dropped;
413 s->tx_cqe_err += sq_stats->cqe_err;
414 s->tx_recover += sq_stats->recover;
415 s->tx_xmit_more += sq_stats->xmit_more;
416 s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
417 s->tx_csum_none += sq_stats->csum_none;
418 s->tx_csum_partial += sq_stats->csum_partial;
419 #ifdef CONFIG_MLX5_EN_TLS
420 s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
421 s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes;
422 s->tx_tls_ooo += sq_stats->tls_ooo;
423 s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes;
424 s->tx_tls_dump_packets += sq_stats->tls_dump_packets;
425 s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
426 s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data;
427 s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
428 s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req;
430 s->tx_cqes += sq_stats->cqes;
433 static void mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv *priv,
434 struct mlx5e_sw_stats *s)
438 if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
441 mlx5e_stats_grp_sw_update_stats_ch_stats(s, &priv->ptp_stats.ch);
443 if (priv->tx_ptp_opened) {
444 for (i = 0; i < priv->max_opened_tc; i++) {
445 mlx5e_stats_grp_sw_update_stats_sq(s, &priv->ptp_stats.sq[i]);
447 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
451 if (priv->rx_ptp_opened) {
452 mlx5e_stats_grp_sw_update_stats_rq_stats(s, &priv->ptp_stats.rq);
454 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
459 static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
460 struct mlx5e_sw_stats *s)
462 struct mlx5e_sq_stats **stats;
466 /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
467 max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
468 stats = READ_ONCE(priv->htb_qos_sq_stats);
470 for (i = 0; i < max_qos_sqs; i++) {
471 mlx5e_stats_grp_sw_update_stats_sq(s, READ_ONCE(stats[i]));
473 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
478 #ifdef CONFIG_PAGE_POOL_STATS
479 static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
481 struct mlx5e_rq_stats *rq_stats = c->rq.stats;
482 struct page_pool *pool = c->rq.page_pool;
483 struct page_pool_stats stats = { 0 };
485 if (!page_pool_get_stats(pool, &stats))
488 rq_stats->pp_alloc_fast = stats.alloc_stats.fast;
489 rq_stats->pp_alloc_slow = stats.alloc_stats.slow;
490 rq_stats->pp_alloc_slow_high_order = stats.alloc_stats.slow_high_order;
491 rq_stats->pp_alloc_empty = stats.alloc_stats.empty;
492 rq_stats->pp_alloc_waive = stats.alloc_stats.waive;
493 rq_stats->pp_alloc_refill = stats.alloc_stats.refill;
495 rq_stats->pp_recycle_cached = stats.recycle_stats.cached;
496 rq_stats->pp_recycle_cache_full = stats.recycle_stats.cache_full;
497 rq_stats->pp_recycle_ring = stats.recycle_stats.ring;
498 rq_stats->pp_recycle_ring_full = stats.recycle_stats.ring_full;
499 rq_stats->pp_recycle_released_ref = stats.recycle_stats.released_refcnt;
502 static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c)
507 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
509 struct mlx5e_sw_stats *s = &priv->stats.sw;
512 memset(s, 0, sizeof(*s));
514 for (i = 0; i < priv->channels.num; i++) /* for active channels only */
515 mlx5e_stats_update_stats_rq_page_pool(priv->channels.c[i]);
517 for (i = 0; i < priv->stats_nch; i++) {
518 struct mlx5e_channel_stats *channel_stats =
519 priv->channel_stats[i];
523 mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq);
524 mlx5e_stats_grp_sw_update_stats_xdpsq(s, &channel_stats->rq_xdpsq);
525 mlx5e_stats_grp_sw_update_stats_ch_stats(s, &channel_stats->ch);
527 mlx5e_stats_grp_sw_update_stats_xdp_red(s, &channel_stats->xdpsq);
528 /* AF_XDP zero-copy */
529 mlx5e_stats_grp_sw_update_stats_xskrq(s, &channel_stats->xskrq);
530 mlx5e_stats_grp_sw_update_stats_xsksq(s, &channel_stats->xsksq);
532 for (j = 0; j < priv->max_opened_tc; j++) {
533 mlx5e_stats_grp_sw_update_stats_sq(s, &channel_stats->sq[j]);
535 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
539 mlx5e_stats_grp_sw_update_stats_ptp(priv, s);
540 mlx5e_stats_grp_sw_update_stats_qos(priv, s);
543 static const struct counter_desc q_stats_desc[] = {
544 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
547 static const struct counter_desc drop_rq_stats_desc[] = {
548 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_if_down_packets) },
551 #define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
552 #define NUM_DROP_RQ_COUNTERS ARRAY_SIZE(drop_rq_stats_desc)
554 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qcnt)
559 num_stats += NUM_Q_COUNTERS;
561 if (priv->drop_rq_q_counter)
562 num_stats += NUM_DROP_RQ_COUNTERS;
567 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)
571 for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
572 strcpy(data + (idx++) * ETH_GSTRING_LEN,
573 q_stats_desc[i].format);
575 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
576 strcpy(data + (idx++) * ETH_GSTRING_LEN,
577 drop_rq_stats_desc[i].format);
582 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
586 for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
587 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
589 for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
590 data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
591 drop_rq_stats_desc, i);
595 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)
597 struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
598 u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
599 u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
602 MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
604 if (priv->q_counter) {
605 MLX5_SET(query_q_counter_in, in, counter_set_id,
607 ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
609 qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out,
613 if (priv->drop_rq_q_counter) {
614 MLX5_SET(query_q_counter_in, in, counter_set_id,
615 priv->drop_rq_q_counter);
616 ret = mlx5_cmd_exec_inout(priv->mdev, query_q_counter, in, out);
618 qcnt->rx_if_down_packets = MLX5_GET(query_q_counter_out,
623 #define VNIC_ENV_OFF(c) MLX5_BYTE_OFF(query_vnic_env_out, c)
624 static const struct counter_desc vnic_env_stats_steer_desc[] = {
625 { "rx_steer_missed_packets",
626 VNIC_ENV_OFF(vport_env.nic_receive_steering_discard) },
629 static const struct counter_desc vnic_env_stats_dev_oob_desc[] = {
630 { "dev_internal_queue_oob",
631 VNIC_ENV_OFF(vport_env.internal_rq_out_of_buffer) },
634 static const struct counter_desc vnic_env_stats_drop_desc[] = {
635 { "rx_oversize_pkts_buffer",
636 VNIC_ENV_OFF(vport_env.eth_wqe_too_small) },
639 #define NUM_VNIC_ENV_STEER_COUNTERS(dev) \
640 (MLX5_CAP_GEN(dev, nic_receive_steering_discard) ? \
641 ARRAY_SIZE(vnic_env_stats_steer_desc) : 0)
642 #define NUM_VNIC_ENV_DEV_OOB_COUNTERS(dev) \
643 (MLX5_CAP_GEN(dev, vnic_env_int_rq_oob) ? \
644 ARRAY_SIZE(vnic_env_stats_dev_oob_desc) : 0)
645 #define NUM_VNIC_ENV_DROP_COUNTERS(dev) \
646 (MLX5_CAP_GEN(dev, eth_wqe_too_small) ? \
647 ARRAY_SIZE(vnic_env_stats_drop_desc) : 0)
649 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vnic_env)
651 return NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev) +
652 NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev) +
653 NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev);
656 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)
660 for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
661 strcpy(data + (idx++) * ETH_GSTRING_LEN,
662 vnic_env_stats_steer_desc[i].format);
664 for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
665 strcpy(data + (idx++) * ETH_GSTRING_LEN,
666 vnic_env_stats_dev_oob_desc[i].format);
668 for (i = 0; i < NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); i++)
669 strcpy(data + (idx++) * ETH_GSTRING_LEN,
670 vnic_env_stats_drop_desc[i].format);
675 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
679 for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
680 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
681 vnic_env_stats_steer_desc, i);
683 for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
684 data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
685 vnic_env_stats_dev_oob_desc, i);
687 for (i = 0; i < NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); i++)
688 data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
689 vnic_env_stats_drop_desc, i);
694 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
696 u32 *out = (u32 *)priv->stats.vnic.query_vnic_env_out;
697 u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
698 struct mlx5_core_dev *mdev = priv->mdev;
700 if (!mlx5e_stats_grp_vnic_env_num_stats(priv))
703 MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
704 mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out);
707 #define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
708 static const struct counter_desc vport_stats_desc[] = {
709 { "rx_vport_unicast_packets",
710 VPORT_COUNTER_OFF(received_eth_unicast.packets) },
711 { "rx_vport_unicast_bytes",
712 VPORT_COUNTER_OFF(received_eth_unicast.octets) },
713 { "tx_vport_unicast_packets",
714 VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
715 { "tx_vport_unicast_bytes",
716 VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
717 { "rx_vport_multicast_packets",
718 VPORT_COUNTER_OFF(received_eth_multicast.packets) },
719 { "rx_vport_multicast_bytes",
720 VPORT_COUNTER_OFF(received_eth_multicast.octets) },
721 { "tx_vport_multicast_packets",
722 VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
723 { "tx_vport_multicast_bytes",
724 VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
725 { "rx_vport_broadcast_packets",
726 VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
727 { "rx_vport_broadcast_bytes",
728 VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
729 { "tx_vport_broadcast_packets",
730 VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
731 { "tx_vport_broadcast_bytes",
732 VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
733 { "rx_vport_rdma_unicast_packets",
734 VPORT_COUNTER_OFF(received_ib_unicast.packets) },
735 { "rx_vport_rdma_unicast_bytes",
736 VPORT_COUNTER_OFF(received_ib_unicast.octets) },
737 { "tx_vport_rdma_unicast_packets",
738 VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
739 { "tx_vport_rdma_unicast_bytes",
740 VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
741 { "rx_vport_rdma_multicast_packets",
742 VPORT_COUNTER_OFF(received_ib_multicast.packets) },
743 { "rx_vport_rdma_multicast_bytes",
744 VPORT_COUNTER_OFF(received_ib_multicast.octets) },
745 { "tx_vport_rdma_multicast_packets",
746 VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
747 { "tx_vport_rdma_multicast_bytes",
748 VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
751 static const struct counter_desc vport_loopback_stats_desc[] = {
752 { "vport_loopback_packets",
753 VPORT_COUNTER_OFF(local_loopback.packets) },
754 { "vport_loopback_bytes",
755 VPORT_COUNTER_OFF(local_loopback.octets) },
758 #define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc)
759 #define NUM_VPORT_LOOPBACK_COUNTERS(dev) \
760 (MLX5_CAP_GEN(dev, vport_counter_local_loopback) ? \
761 ARRAY_SIZE(vport_loopback_stats_desc) : 0)
763 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport)
765 return NUM_VPORT_COUNTERS +
766 NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev);
769 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)
773 for (i = 0; i < NUM_VPORT_COUNTERS; i++)
774 strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format);
776 for (i = 0; i < NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev); i++)
777 strcpy(data + (idx++) * ETH_GSTRING_LEN,
778 vport_loopback_stats_desc[i].format);
783 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)
787 for (i = 0; i < NUM_VPORT_COUNTERS; i++)
788 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
789 vport_stats_desc, i);
791 for (i = 0; i < NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev); i++)
792 data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
793 vport_loopback_stats_desc, i);
798 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)
800 u32 *out = (u32 *)priv->stats.vport.query_vport_out;
801 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {};
802 struct mlx5_core_dev *mdev = priv->mdev;
804 MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
805 mlx5_cmd_exec_inout(mdev, query_vport_counter, in, out);
808 #define PPORT_802_3_OFF(c) \
809 MLX5_BYTE_OFF(ppcnt_reg, \
810 counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
811 static const struct counter_desc pport_802_3_stats_desc[] = {
812 { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
813 { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
814 { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
815 { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
816 { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
817 { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
818 { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
819 { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
820 { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
821 { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
822 { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
823 { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
824 { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
825 { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
826 { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
827 { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
828 { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
829 { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
832 #define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
834 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(802_3)
836 return NUM_PPORT_802_3_COUNTERS;
839 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(802_3)
843 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
844 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_802_3_stats_desc[i].format);
848 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3)
852 for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
853 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
854 pport_802_3_stats_desc, i);
858 #define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
859 (MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1)
861 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3)
863 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
864 struct mlx5_core_dev *mdev = priv->mdev;
865 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
866 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
869 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
872 MLX5_SET(ppcnt_reg, in, local_port, 1);
873 out = pstats->IEEE_802_3_counters;
874 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
875 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
878 #define MLX5E_READ_CTR64_BE_F(ptr, set, c) \
879 be64_to_cpu(*(__be64 *)((char *)ptr + \
880 MLX5_BYTE_OFF(ppcnt_reg, \
881 counter_set.set.c##_high)))
883 static int mlx5e_stats_get_ieee(struct mlx5_core_dev *mdev,
884 u32 *ppcnt_ieee_802_3)
886 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
887 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
889 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
892 MLX5_SET(ppcnt_reg, in, local_port, 1);
893 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
894 return mlx5_core_access_reg(mdev, in, sz, ppcnt_ieee_802_3,
895 sz, MLX5_REG_PPCNT, 0, 0);
898 void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
899 struct ethtool_pause_stats *pause_stats)
901 u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
902 struct mlx5_core_dev *mdev = priv->mdev;
904 if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
907 pause_stats->tx_pause_frames =
908 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
909 eth_802_3_cntrs_grp_data_layout,
910 a_pause_mac_ctrl_frames_transmitted);
911 pause_stats->rx_pause_frames =
912 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
913 eth_802_3_cntrs_grp_data_layout,
914 a_pause_mac_ctrl_frames_received);
917 void mlx5e_stats_eth_phy_get(struct mlx5e_priv *priv,
918 struct ethtool_eth_phy_stats *phy_stats)
920 u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
921 struct mlx5_core_dev *mdev = priv->mdev;
923 if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
926 phy_stats->SymbolErrorDuringCarrier =
927 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
928 eth_802_3_cntrs_grp_data_layout,
929 a_symbol_error_during_carrier);
932 void mlx5e_stats_eth_mac_get(struct mlx5e_priv *priv,
933 struct ethtool_eth_mac_stats *mac_stats)
935 u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
936 struct mlx5_core_dev *mdev = priv->mdev;
938 if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
942 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3, \
943 eth_802_3_cntrs_grp_data_layout, \
946 mac_stats->FramesTransmittedOK = RD(a_frames_transmitted_ok);
947 mac_stats->FramesReceivedOK = RD(a_frames_received_ok);
948 mac_stats->FrameCheckSequenceErrors = RD(a_frame_check_sequence_errors);
949 mac_stats->OctetsTransmittedOK = RD(a_octets_transmitted_ok);
950 mac_stats->OctetsReceivedOK = RD(a_octets_received_ok);
951 mac_stats->MulticastFramesXmittedOK = RD(a_multicast_frames_xmitted_ok);
952 mac_stats->BroadcastFramesXmittedOK = RD(a_broadcast_frames_xmitted_ok);
953 mac_stats->MulticastFramesReceivedOK = RD(a_multicast_frames_received_ok);
954 mac_stats->BroadcastFramesReceivedOK = RD(a_broadcast_frames_received_ok);
955 mac_stats->InRangeLengthErrors = RD(a_in_range_length_errors);
956 mac_stats->OutOfRangeLengthField = RD(a_out_of_range_length_field);
957 mac_stats->FrameTooLongErrors = RD(a_frame_too_long_errors);
961 void mlx5e_stats_eth_ctrl_get(struct mlx5e_priv *priv,
962 struct ethtool_eth_ctrl_stats *ctrl_stats)
964 u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
965 struct mlx5_core_dev *mdev = priv->mdev;
967 if (mlx5e_stats_get_ieee(mdev, ppcnt_ieee_802_3))
970 ctrl_stats->MACControlFramesTransmitted =
971 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
972 eth_802_3_cntrs_grp_data_layout,
973 a_mac_control_frames_transmitted);
974 ctrl_stats->MACControlFramesReceived =
975 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
976 eth_802_3_cntrs_grp_data_layout,
977 a_mac_control_frames_received);
978 ctrl_stats->UnsupportedOpcodesReceived =
979 MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
980 eth_802_3_cntrs_grp_data_layout,
981 a_unsupported_opcodes_received);
984 #define PPORT_2863_OFF(c) \
985 MLX5_BYTE_OFF(ppcnt_reg, \
986 counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
987 static const struct counter_desc pport_2863_stats_desc[] = {
988 { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
989 { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
990 { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
993 #define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
995 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2863)
997 return NUM_PPORT_2863_COUNTERS;
1000 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2863)
1004 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
1005 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format);
1009 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863)
1013 for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
1014 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
1015 pport_2863_stats_desc, i);
1019 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2863)
1021 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1022 struct mlx5_core_dev *mdev = priv->mdev;
1023 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1024 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1027 MLX5_SET(ppcnt_reg, in, local_port, 1);
1028 out = pstats->RFC_2863_counters;
1029 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
1030 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1033 #define PPORT_2819_OFF(c) \
1034 MLX5_BYTE_OFF(ppcnt_reg, \
1035 counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
1036 static const struct counter_desc pport_2819_stats_desc[] = {
1037 { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
1038 { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
1039 { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
1040 { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
1041 { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
1042 { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
1043 { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
1044 { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
1045 { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
1046 { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
1047 { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
1048 { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
1049 { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
1052 #define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
1054 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(2819)
1056 return NUM_PPORT_2819_COUNTERS;
1059 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2819)
1063 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
1064 strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format);
1068 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819)
1072 for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
1073 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
1074 pport_2819_stats_desc, i);
1078 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819)
1080 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1081 struct mlx5_core_dev *mdev = priv->mdev;
1082 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1083 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1086 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
1089 MLX5_SET(ppcnt_reg, in, local_port, 1);
1090 out = pstats->RFC_2819_counters;
1091 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
1092 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1095 static const struct ethtool_rmon_hist_range mlx5e_rmon_ranges[] = {
1109 void mlx5e_stats_rmon_get(struct mlx5e_priv *priv,
1110 struct ethtool_rmon_stats *rmon,
1111 const struct ethtool_rmon_hist_range **ranges)
1113 u32 ppcnt_RFC_2819_counters[MLX5_ST_SZ_DW(ppcnt_reg)];
1114 struct mlx5_core_dev *mdev = priv->mdev;
1115 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1116 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1118 MLX5_SET(ppcnt_reg, in, local_port, 1);
1119 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
1120 if (mlx5_core_access_reg(mdev, in, sz, ppcnt_RFC_2819_counters,
1121 sz, MLX5_REG_PPCNT, 0, 0))
1125 MLX5E_READ_CTR64_BE_F(ppcnt_RFC_2819_counters, \
1126 eth_2819_cntrs_grp_data_layout, \
1129 rmon->undersize_pkts = RD(ether_stats_undersize_pkts);
1130 rmon->fragments = RD(ether_stats_fragments);
1131 rmon->jabbers = RD(ether_stats_jabbers);
1133 rmon->hist[0] = RD(ether_stats_pkts64octets);
1134 rmon->hist[1] = RD(ether_stats_pkts65to127octets);
1135 rmon->hist[2] = RD(ether_stats_pkts128to255octets);
1136 rmon->hist[3] = RD(ether_stats_pkts256to511octets);
1137 rmon->hist[4] = RD(ether_stats_pkts512to1023octets);
1138 rmon->hist[5] = RD(ether_stats_pkts1024to1518octets);
1139 rmon->hist[6] = RD(ether_stats_pkts1519to2047octets);
1140 rmon->hist[7] = RD(ether_stats_pkts2048to4095octets);
1141 rmon->hist[8] = RD(ether_stats_pkts4096to8191octets);
1142 rmon->hist[9] = RD(ether_stats_pkts8192to10239octets);
1145 *ranges = mlx5e_rmon_ranges;
1148 #define PPORT_PHY_STATISTICAL_OFF(c) \
1149 MLX5_BYTE_OFF(ppcnt_reg, \
1150 counter_set.phys_layer_statistical_cntrs.c##_high)
1151 static const struct counter_desc pport_phy_statistical_stats_desc[] = {
1152 { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
1153 { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
1156 static const struct counter_desc
1157 pport_phy_statistical_err_lanes_stats_desc[] = {
1158 { "rx_err_lane_0_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane0) },
1159 { "rx_err_lane_1_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane1) },
1160 { "rx_err_lane_2_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane2) },
1161 { "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) },
1164 #define NUM_PPORT_PHY_STATISTICAL_COUNTERS \
1165 ARRAY_SIZE(pport_phy_statistical_stats_desc)
1166 #define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \
1167 ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc)
1169 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy)
1171 struct mlx5_core_dev *mdev = priv->mdev;
1174 /* "1" for link_down_events special counter */
1177 num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ?
1178 NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0;
1180 num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ?
1181 NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0;
1186 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)
1188 struct mlx5_core_dev *mdev = priv->mdev;
1191 strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy");
1193 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1196 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
1197 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1198 pport_phy_statistical_stats_desc[i].format);
1200 if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
1201 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
1202 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1203 pport_phy_statistical_err_lanes_stats_desc[i].format);
1208 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
1210 struct mlx5_core_dev *mdev = priv->mdev;
1213 /* link_down_events_phy has special handling since it is not stored in __be64 format */
1214 data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
1215 counter_set.phys_layer_cntrs.link_down_events);
1217 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1220 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
1222 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
1223 pport_phy_statistical_stats_desc, i);
1225 if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
1226 for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
1228 MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
1229 pport_phy_statistical_err_lanes_stats_desc,
1234 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
1236 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1237 struct mlx5_core_dev *mdev = priv->mdev;
1238 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1239 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1242 MLX5_SET(ppcnt_reg, in, local_port, 1);
1243 out = pstats->phy_counters;
1244 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
1245 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1247 if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
1250 out = pstats->phy_statistical_counters;
1251 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
1252 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1255 void mlx5e_get_link_ext_stats(struct net_device *dev,
1256 struct ethtool_link_ext_stats *stats)
1258 struct mlx5e_priv *priv = netdev_priv(dev);
1259 u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1260 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1261 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1263 MLX5_SET(ppcnt_reg, in, local_port, 1);
1264 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
1265 mlx5_core_access_reg(priv->mdev, in, sz, out,
1266 MLX5_ST_SZ_BYTES(ppcnt_reg), MLX5_REG_PPCNT, 0, 0);
1268 stats->link_down_events = MLX5_GET(ppcnt_reg, out,
1269 counter_set.phys_layer_cntrs.link_down_events);
1272 static int fec_num_lanes(struct mlx5_core_dev *dev)
1274 u32 out[MLX5_ST_SZ_DW(pmlp_reg)] = {};
1275 u32 in[MLX5_ST_SZ_DW(pmlp_reg)] = {};
1278 MLX5_SET(pmlp_reg, in, local_port, 1);
1279 err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
1280 MLX5_REG_PMLP, 0, 0);
1284 return MLX5_GET(pmlp_reg, out, width);
1287 static int fec_active_mode(struct mlx5_core_dev *mdev)
1289 unsigned long fec_active_long;
1292 if (mlx5e_get_fec_mode(mdev, &fec_active, NULL))
1293 return MLX5E_FEC_NOFEC;
1295 fec_active_long = fec_active;
1296 return find_first_bit(&fec_active_long, sizeof(unsigned long) * BITS_PER_BYTE);
1299 #define MLX5E_STATS_SET_FEC_BLOCK(idx) ({ \
1300 fec_stats->corrected_blocks.lanes[(idx)] = \
1301 MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, \
1302 fc_fec_corrected_blocks_lane##idx); \
1303 fec_stats->uncorrectable_blocks.lanes[(idx)] = \
1304 MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs, \
1305 fc_fec_uncorrectable_blocks_lane##idx); \
1308 static void fec_set_fc_stats(struct ethtool_fec_stats *fec_stats,
1309 u32 *ppcnt, u8 lanes)
1311 if (lanes > 3) { /* 4 lanes */
1312 MLX5E_STATS_SET_FEC_BLOCK(3);
1313 MLX5E_STATS_SET_FEC_BLOCK(2);
1315 if (lanes > 1) /* 2 lanes */
1316 MLX5E_STATS_SET_FEC_BLOCK(1);
1317 if (lanes > 0) /* 1 lane */
1318 MLX5E_STATS_SET_FEC_BLOCK(0);
1321 static void fec_set_rs_stats(struct ethtool_fec_stats *fec_stats, u32 *ppcnt)
1323 fec_stats->corrected_blocks.total =
1324 MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs,
1325 rs_fec_corrected_blocks);
1326 fec_stats->uncorrectable_blocks.total =
1327 MLX5E_READ_CTR64_BE_F(ppcnt, phys_layer_cntrs,
1328 rs_fec_uncorrectable_blocks);
1331 static void fec_set_block_stats(struct mlx5e_priv *priv,
1332 struct ethtool_fec_stats *fec_stats)
1334 struct mlx5_core_dev *mdev = priv->mdev;
1335 u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1336 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1337 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1338 int mode = fec_active_mode(mdev);
1340 if (mode == MLX5E_FEC_NOFEC)
1343 MLX5_SET(ppcnt_reg, in, local_port, 1);
1344 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
1345 if (mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0))
1349 case MLX5E_FEC_RS_528_514:
1350 case MLX5E_FEC_RS_544_514:
1351 case MLX5E_FEC_LLRS_272_257_1:
1352 fec_set_rs_stats(fec_stats, out);
1354 case MLX5E_FEC_FIRECODE:
1355 fec_set_fc_stats(fec_stats, out, fec_num_lanes(mdev));
1359 static void fec_set_corrected_bits_total(struct mlx5e_priv *priv,
1360 struct ethtool_fec_stats *fec_stats)
1362 u32 ppcnt_phy_statistical[MLX5_ST_SZ_DW(ppcnt_reg)];
1363 struct mlx5_core_dev *mdev = priv->mdev;
1364 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1365 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1367 MLX5_SET(ppcnt_reg, in, local_port, 1);
1368 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
1369 if (mlx5_core_access_reg(mdev, in, sz, ppcnt_phy_statistical,
1370 sz, MLX5_REG_PPCNT, 0, 0))
1373 fec_stats->corrected_bits.total =
1374 MLX5E_READ_CTR64_BE_F(ppcnt_phy_statistical,
1375 phys_layer_statistical_cntrs,
1376 phy_corrected_bits);
1379 void mlx5e_stats_fec_get(struct mlx5e_priv *priv,
1380 struct ethtool_fec_stats *fec_stats)
1382 if (!MLX5_CAP_PCAM_FEATURE(priv->mdev, ppcnt_statistical_group))
1385 fec_set_corrected_bits_total(priv, fec_stats);
1386 fec_set_block_stats(priv, fec_stats);
1389 #define PPORT_ETH_EXT_OFF(c) \
1390 MLX5_BYTE_OFF(ppcnt_reg, \
1391 counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
1392 static const struct counter_desc pport_eth_ext_stats_desc[] = {
1393 { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
1396 #define NUM_PPORT_ETH_EXT_COUNTERS ARRAY_SIZE(pport_eth_ext_stats_desc)
1398 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(eth_ext)
1400 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1401 return NUM_PPORT_ETH_EXT_COUNTERS;
1406 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext)
1410 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1411 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
1412 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1413 pport_eth_ext_stats_desc[i].format);
1417 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)
1421 if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
1422 for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
1424 MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
1425 pport_eth_ext_stats_desc, i);
1429 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext)
1431 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1432 struct mlx5_core_dev *mdev = priv->mdev;
1433 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1434 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1437 if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters))
1440 MLX5_SET(ppcnt_reg, in, local_port, 1);
1441 out = pstats->eth_ext_counters;
1442 MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
1443 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1446 #define PCIE_PERF_OFF(c) \
1447 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
1448 static const struct counter_desc pcie_perf_stats_desc[] = {
1449 { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
1450 { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
1453 #define PCIE_PERF_OFF64(c) \
1454 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
1455 static const struct counter_desc pcie_perf_stats_desc64[] = {
1456 { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
1459 static const struct counter_desc pcie_perf_stall_stats_desc[] = {
1460 { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
1461 { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
1462 { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
1463 { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
1466 #define NUM_PCIE_PERF_COUNTERS ARRAY_SIZE(pcie_perf_stats_desc)
1467 #define NUM_PCIE_PERF_COUNTERS64 ARRAY_SIZE(pcie_perf_stats_desc64)
1468 #define NUM_PCIE_PERF_STALL_COUNTERS ARRAY_SIZE(pcie_perf_stall_stats_desc)
1470 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pcie)
1474 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1475 num_stats += NUM_PCIE_PERF_COUNTERS;
1477 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1478 num_stats += NUM_PCIE_PERF_COUNTERS64;
1480 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1481 num_stats += NUM_PCIE_PERF_STALL_COUNTERS;
1486 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie)
1490 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1491 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
1492 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1493 pcie_perf_stats_desc[i].format);
1495 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1496 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1497 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1498 pcie_perf_stats_desc64[i].format);
1500 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1501 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1502 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1503 pcie_perf_stall_stats_desc[i].format);
1507 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)
1511 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
1512 for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
1514 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
1515 pcie_perf_stats_desc, i);
1517 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
1518 for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
1520 MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
1521 pcie_perf_stats_desc64, i);
1523 if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
1524 for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
1526 MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
1527 pcie_perf_stall_stats_desc, i);
1531 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie)
1533 struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
1534 struct mlx5_core_dev *mdev = priv->mdev;
1535 u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
1536 int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
1539 if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
1542 out = pcie_stats->pcie_perf_counters;
1543 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
1544 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
1547 #define PPORT_PER_TC_PRIO_OFF(c) \
1548 MLX5_BYTE_OFF(ppcnt_reg, \
1549 counter_set.eth_per_tc_prio_grp_data_layout.c##_high)
1551 static const struct counter_desc pport_per_tc_prio_stats_desc[] = {
1552 { "rx_prio%d_buf_discard", PPORT_PER_TC_PRIO_OFF(no_buffer_discard_uc) },
1555 #define NUM_PPORT_PER_TC_PRIO_COUNTERS ARRAY_SIZE(pport_per_tc_prio_stats_desc)
1557 #define PPORT_PER_TC_CONGEST_PRIO_OFF(c) \
1558 MLX5_BYTE_OFF(ppcnt_reg, \
1559 counter_set.eth_per_tc_congest_prio_grp_data_layout.c##_high)
1561 static const struct counter_desc pport_per_tc_congest_prio_stats_desc[] = {
1562 { "rx_prio%d_cong_discard", PPORT_PER_TC_CONGEST_PRIO_OFF(wred_discard) },
1563 { "rx_prio%d_marked", PPORT_PER_TC_CONGEST_PRIO_OFF(ecn_marked_tc) },
1566 #define NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS \
1567 ARRAY_SIZE(pport_per_tc_congest_prio_stats_desc)
1569 static int mlx5e_grp_per_tc_prio_get_num_stats(struct mlx5e_priv *priv)
1571 struct mlx5_core_dev *mdev = priv->mdev;
1573 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1576 return NUM_PPORT_PER_TC_PRIO_COUNTERS * NUM_PPORT_PRIO;
1579 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest)
1581 struct mlx5_core_dev *mdev = priv->mdev;
1584 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1587 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1588 for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1589 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1590 pport_per_tc_prio_stats_desc[i].format, prio);
1591 for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS; i++)
1592 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1593 pport_per_tc_congest_prio_stats_desc[i].format, prio);
1599 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)
1601 struct mlx5e_pport_stats *pport = &priv->stats.pport;
1602 struct mlx5_core_dev *mdev = priv->mdev;
1605 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1608 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1609 for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
1611 MLX5E_READ_CTR64_BE(&pport->per_tc_prio_counters[prio],
1612 pport_per_tc_prio_stats_desc, i);
1613 for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS ; i++)
1615 MLX5E_READ_CTR64_BE(&pport->per_tc_congest_prio_counters[prio],
1616 pport_per_tc_congest_prio_stats_desc, i);
1622 static void mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv *priv)
1624 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1625 struct mlx5_core_dev *mdev = priv->mdev;
1626 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1627 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1631 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1634 MLX5_SET(ppcnt_reg, in, pnat, 2);
1635 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP);
1636 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1637 out = pstats->per_tc_prio_counters[prio];
1638 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1639 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1643 static int mlx5e_grp_per_tc_congest_prio_get_num_stats(struct mlx5e_priv *priv)
1645 struct mlx5_core_dev *mdev = priv->mdev;
1647 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1650 return NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS * NUM_PPORT_PRIO;
1653 static void mlx5e_grp_per_tc_congest_prio_update_stats(struct mlx5e_priv *priv)
1655 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1656 struct mlx5_core_dev *mdev = priv->mdev;
1657 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
1658 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1662 if (!MLX5_CAP_GEN(mdev, sbcam_reg))
1665 MLX5_SET(ppcnt_reg, in, pnat, 2);
1666 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP);
1667 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1668 out = pstats->per_tc_congest_prio_counters[prio];
1669 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1670 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
1674 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_port_buff_congest)
1676 return mlx5e_grp_per_tc_prio_get_num_stats(priv) +
1677 mlx5e_grp_per_tc_congest_prio_get_num_stats(priv);
1680 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_port_buff_congest)
1682 mlx5e_grp_per_tc_prio_update_stats(priv);
1683 mlx5e_grp_per_tc_congest_prio_update_stats(priv);
1686 #define PPORT_PER_PRIO_OFF(c) \
1687 MLX5_BYTE_OFF(ppcnt_reg, \
1688 counter_set.eth_per_prio_grp_data_layout.c##_high)
1689 static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
1690 { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
1691 { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
1692 { "rx_prio%d_discards", PPORT_PER_PRIO_OFF(rx_discards) },
1693 { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
1694 { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
1697 #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
1699 static int mlx5e_grp_per_prio_traffic_get_num_stats(void)
1701 return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
1704 static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
1710 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1711 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1712 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1713 pport_per_prio_traffic_stats_desc[i].format, prio);
1719 static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
1725 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1726 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
1728 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1729 pport_per_prio_traffic_stats_desc, i);
1735 static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
1736 /* %s is "global" or "prio{i}" */
1737 { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
1738 { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
1739 { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
1740 { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
1741 { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
1744 static const struct counter_desc pport_pfc_stall_stats_desc[] = {
1745 { "tx_pause_storm_warning_events", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
1746 { "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) },
1749 #define NUM_PPORT_PER_PRIO_PFC_COUNTERS ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
1750 #define NUM_PPORT_PFC_STALL_COUNTERS(priv) (ARRAY_SIZE(pport_pfc_stall_stats_desc) * \
1751 MLX5_CAP_PCAM_FEATURE((priv)->mdev, pfcc_mask) * \
1752 MLX5_CAP_DEBUG((priv)->mdev, stall_detect))
1754 static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
1756 struct mlx5_core_dev *mdev = priv->mdev;
1761 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1764 err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
1766 return err ? 0 : pfc_en_tx | pfc_en_rx;
1769 static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
1771 struct mlx5_core_dev *mdev = priv->mdev;
1776 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1779 err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
1781 return err ? false : rx_pause | tx_pause;
1784 static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
1786 return (mlx5e_query_global_pause_combined(priv) +
1787 hweight8(mlx5e_query_pfc_combined(priv))) *
1788 NUM_PPORT_PER_PRIO_PFC_COUNTERS +
1789 NUM_PPORT_PFC_STALL_COUNTERS(priv);
1792 static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
1796 unsigned long pfc_combined;
1799 pfc_combined = mlx5e_query_pfc_combined(priv);
1800 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1801 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1802 char pfc_string[ETH_GSTRING_LEN];
1804 snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
1805 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1806 pport_per_prio_pfc_stats_desc[i].format, pfc_string);
1810 if (mlx5e_query_global_pause_combined(priv)) {
1811 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1812 sprintf(data + (idx++) * ETH_GSTRING_LEN,
1813 pport_per_prio_pfc_stats_desc[i].format, "global");
1817 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1818 strcpy(data + (idx++) * ETH_GSTRING_LEN,
1819 pport_pfc_stall_stats_desc[i].format);
1824 static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
1828 unsigned long pfc_combined;
1831 pfc_combined = mlx5e_query_pfc_combined(priv);
1832 for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
1833 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1835 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
1836 pport_per_prio_pfc_stats_desc, i);
1840 if (mlx5e_query_global_pause_combined(priv)) {
1841 for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
1843 MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1844 pport_per_prio_pfc_stats_desc, i);
1848 for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
1849 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
1850 pport_pfc_stall_stats_desc, i);
1855 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)
1857 return mlx5e_grp_per_prio_traffic_get_num_stats() +
1858 mlx5e_grp_per_prio_pfc_get_num_stats(priv);
1861 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio)
1863 idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
1864 idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
1868 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio)
1870 idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
1871 idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
1875 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio)
1877 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
1878 struct mlx5_core_dev *mdev = priv->mdev;
1879 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
1880 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
1884 if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
1887 MLX5_SET(ppcnt_reg, in, local_port, 1);
1888 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
1889 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
1890 out = pstats->per_prio_counters[prio];
1891 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
1892 mlx5_core_access_reg(mdev, in, sz, out, sz,
1893 MLX5_REG_PPCNT, 0, 0);
1897 static const struct counter_desc mlx5e_pme_status_desc[] = {
1898 { "module_unplug", sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED },
1901 static const struct counter_desc mlx5e_pme_error_desc[] = {
1902 { "module_bus_stuck", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK },
1903 { "module_high_temp", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE },
1904 { "module_bad_shorted", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE },
1907 #define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc)
1908 #define NUM_PME_ERR_STATS ARRAY_SIZE(mlx5e_pme_error_desc)
1910 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(pme)
1912 return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
1915 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme)
1919 for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1920 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
1922 for (i = 0; i < NUM_PME_ERR_STATS; i++)
1923 strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
1928 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)
1930 struct mlx5_pme_stats pme_stats;
1933 mlx5_get_pme_stats(priv->mdev, &pme_stats);
1935 for (i = 0; i < NUM_PME_STATUS_STATS; i++)
1936 data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.status_counters,
1937 mlx5e_pme_status_desc, i);
1939 for (i = 0; i < NUM_PME_ERR_STATS; i++)
1940 data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.error_counters,
1941 mlx5e_pme_error_desc, i);
1946 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
1948 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
1950 return mlx5e_ktls_get_count(priv);
1953 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)
1955 return idx + mlx5e_ktls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
1958 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)
1960 return idx + mlx5e_ktls_get_stats(priv, data + idx);
1963 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; }
1965 static const struct counter_desc rq_stats_desc[] = {
1966 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
1967 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
1968 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
1969 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
1970 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
1971 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1972 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1973 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
1974 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
1975 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
1976 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
1977 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
1978 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_packets) },
1979 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_bytes) },
1980 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_skbs) },
1981 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_match_packets) },
1982 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_large_hds) },
1983 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
1984 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
1985 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
1986 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
1987 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
1988 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
1989 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
1990 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
1991 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
1992 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
1993 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
1994 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
1995 #ifdef CONFIG_PAGE_POOL_STATS
1996 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_fast) },
1997 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow) },
1998 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow_high_order) },
1999 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_empty) },
2000 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_refill) },
2001 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_waive) },
2002 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_cached) },
2003 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_cache_full) },
2004 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring) },
2005 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring_full) },
2006 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_released_ref) },
2008 #ifdef CONFIG_MLX5_EN_TLS
2009 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
2010 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
2011 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) },
2012 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) },
2013 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) },
2014 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_skip) },
2015 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_ok) },
2016 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_retry) },
2017 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_skip) },
2018 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_err) },
2022 static const struct counter_desc sq_stats_desc[] = {
2023 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
2024 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
2025 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
2026 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
2027 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
2028 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
2029 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
2030 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
2031 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
2032 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
2033 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
2034 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
2035 #ifdef CONFIG_MLX5_EN_TLS
2036 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
2037 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
2038 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
2039 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
2040 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
2041 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
2042 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
2043 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
2044 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
2046 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
2047 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
2048 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
2049 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
2050 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) },
2051 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqes) },
2052 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
2053 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
2056 static const struct counter_desc rq_xdpsq_stats_desc[] = {
2057 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
2058 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
2059 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
2060 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
2061 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
2062 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
2063 { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2066 static const struct counter_desc xdpsq_stats_desc[] = {
2067 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
2068 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
2069 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
2070 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, nops) },
2071 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
2072 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
2073 { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2076 static const struct counter_desc xskrq_stats_desc[] = {
2077 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, packets) },
2078 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, bytes) },
2079 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_complete) },
2080 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
2081 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
2082 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_none) },
2083 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
2084 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
2085 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
2086 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
2087 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, wqe_err) },
2088 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
2089 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
2090 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
2091 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
2092 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
2093 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
2094 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, congst_umr) },
2095 { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, arfs_err) },
2098 static const struct counter_desc xsksq_stats_desc[] = {
2099 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
2100 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
2101 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
2102 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, full) },
2103 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, err) },
2104 { MLX5E_DECLARE_XSKSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
2107 static const struct counter_desc ch_stats_desc[] = {
2108 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) },
2109 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) },
2110 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, arm) },
2111 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, aff_change) },
2112 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, force_irq) },
2113 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
2116 static const struct counter_desc ptp_sq_stats_desc[] = {
2117 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, packets) },
2118 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, bytes) },
2119 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
2120 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
2121 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
2122 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, nop) },
2123 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, csum_none) },
2124 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, stopped) },
2125 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, dropped) },
2126 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
2127 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, recover) },
2128 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqes) },
2129 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, wake) },
2130 { MLX5E_DECLARE_PTP_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
2133 static const struct counter_desc ptp_ch_stats_desc[] = {
2134 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, events) },
2135 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, poll) },
2136 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, arm) },
2137 { MLX5E_DECLARE_PTP_CH_STAT(struct mlx5e_ch_stats, eq_rearm) },
2140 static const struct counter_desc ptp_cq_stats_desc[] = {
2141 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, cqe) },
2142 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, err_cqe) },
2143 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) },
2144 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
2145 { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, late_cqe) },
2148 static const struct counter_desc ptp_rq_stats_desc[] = {
2149 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, packets) },
2150 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, bytes) },
2151 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete) },
2152 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
2153 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
2154 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
2155 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
2156 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_none) },
2157 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
2158 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
2159 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_packets) },
2160 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_bytes) },
2161 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
2162 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
2163 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, wqe_err) },
2164 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
2165 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
2166 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
2167 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
2168 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
2169 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
2170 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, congst_umr) },
2171 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, arfs_err) },
2172 { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, recover) },
2175 static const struct counter_desc qos_sq_stats_desc[] = {
2176 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, packets) },
2177 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, bytes) },
2178 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
2179 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
2180 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
2181 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
2182 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
2183 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
2184 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
2185 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, nop) },
2186 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
2187 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
2188 #ifdef CONFIG_MLX5_EN_TLS
2189 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
2190 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
2191 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
2192 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
2193 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
2194 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
2195 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
2196 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
2197 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
2199 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_none) },
2200 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, stopped) },
2201 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, dropped) },
2202 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
2203 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, recover) },
2204 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqes) },
2205 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, wake) },
2206 { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
2209 #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
2210 #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
2211 #define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc)
2212 #define NUM_RQ_XDPSQ_STATS ARRAY_SIZE(rq_xdpsq_stats_desc)
2213 #define NUM_XSKRQ_STATS ARRAY_SIZE(xskrq_stats_desc)
2214 #define NUM_XSKSQ_STATS ARRAY_SIZE(xsksq_stats_desc)
2215 #define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc)
2216 #define NUM_PTP_SQ_STATS ARRAY_SIZE(ptp_sq_stats_desc)
2217 #define NUM_PTP_CH_STATS ARRAY_SIZE(ptp_ch_stats_desc)
2218 #define NUM_PTP_CQ_STATS ARRAY_SIZE(ptp_cq_stats_desc)
2219 #define NUM_PTP_RQ_STATS ARRAY_SIZE(ptp_rq_stats_desc)
2220 #define NUM_QOS_SQ_STATS ARRAY_SIZE(qos_sq_stats_desc)
2222 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)
2224 /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
2225 return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb_max_qos_sqs);
2228 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)
2230 /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
2231 u16 max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
2234 for (qid = 0; qid < max_qos_sqs; qid++)
2235 for (i = 0; i < NUM_QOS_SQ_STATS; i++)
2236 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2237 qos_sq_stats_desc[i].format, qid);
2242 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)
2244 struct mlx5e_sq_stats **stats;
2248 /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
2249 max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
2250 stats = READ_ONCE(priv->htb_qos_sq_stats);
2252 for (qid = 0; qid < max_qos_sqs; qid++) {
2253 struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]);
2255 for (i = 0; i < NUM_QOS_SQ_STATS; i++)
2256 data[idx++] = MLX5E_READ_CTR64_CPU(s, qos_sq_stats_desc, i);
2262 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos) { return; }
2264 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp)
2266 int num = NUM_PTP_CH_STATS;
2268 if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2271 if (priv->tx_ptp_opened)
2272 num += (NUM_PTP_SQ_STATS + NUM_PTP_CQ_STATS) * priv->max_opened_tc;
2273 if (priv->rx_ptp_opened)
2274 num += NUM_PTP_RQ_STATS;
2279 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
2283 if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2286 for (i = 0; i < NUM_PTP_CH_STATS; i++)
2287 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2288 "%s", ptp_ch_stats_desc[i].format);
2290 if (priv->tx_ptp_opened) {
2291 for (tc = 0; tc < priv->max_opened_tc; tc++)
2292 for (i = 0; i < NUM_PTP_SQ_STATS; i++)
2293 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2294 ptp_sq_stats_desc[i].format, tc);
2296 for (tc = 0; tc < priv->max_opened_tc; tc++)
2297 for (i = 0; i < NUM_PTP_CQ_STATS; i++)
2298 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2299 ptp_cq_stats_desc[i].format, tc);
2301 if (priv->rx_ptp_opened) {
2302 for (i = 0; i < NUM_PTP_RQ_STATS; i++)
2303 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2304 ptp_rq_stats_desc[i].format, MLX5E_PTP_CHANNEL_IX);
2309 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)
2313 if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
2316 for (i = 0; i < NUM_PTP_CH_STATS; i++)
2318 MLX5E_READ_CTR64_CPU(&priv->ptp_stats.ch,
2319 ptp_ch_stats_desc, i);
2321 if (priv->tx_ptp_opened) {
2322 for (tc = 0; tc < priv->max_opened_tc; tc++)
2323 for (i = 0; i < NUM_PTP_SQ_STATS; i++)
2325 MLX5E_READ_CTR64_CPU(&priv->ptp_stats.sq[tc],
2326 ptp_sq_stats_desc, i);
2328 for (tc = 0; tc < priv->max_opened_tc; tc++)
2329 for (i = 0; i < NUM_PTP_CQ_STATS; i++)
2331 MLX5E_READ_CTR64_CPU(&priv->ptp_stats.cq[tc],
2332 ptp_cq_stats_desc, i);
2334 if (priv->rx_ptp_opened) {
2335 for (i = 0; i < NUM_PTP_RQ_STATS; i++)
2337 MLX5E_READ_CTR64_CPU(&priv->ptp_stats.rq,
2338 ptp_rq_stats_desc, i);
2343 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp) { return; }
2345 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(channels)
2347 int max_nch = priv->stats_nch;
2349 return (NUM_RQ_STATS * max_nch) +
2350 (NUM_CH_STATS * max_nch) +
2351 (NUM_SQ_STATS * max_nch * priv->max_opened_tc) +
2352 (NUM_RQ_XDPSQ_STATS * max_nch) +
2353 (NUM_XDPSQ_STATS * max_nch) +
2354 (NUM_XSKRQ_STATS * max_nch * priv->xsk.ever_used) +
2355 (NUM_XSKSQ_STATS * max_nch * priv->xsk.ever_used);
2358 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
2360 bool is_xsk = priv->xsk.ever_used;
2361 int max_nch = priv->stats_nch;
2364 for (i = 0; i < max_nch; i++)
2365 for (j = 0; j < NUM_CH_STATS; j++)
2366 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2367 ch_stats_desc[j].format, i);
2369 for (i = 0; i < max_nch; i++) {
2370 for (j = 0; j < NUM_RQ_STATS; j++)
2371 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2372 rq_stats_desc[j].format, i);
2373 for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
2374 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2375 xskrq_stats_desc[j].format, i);
2376 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
2377 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2378 rq_xdpsq_stats_desc[j].format, i);
2381 for (tc = 0; tc < priv->max_opened_tc; tc++)
2382 for (i = 0; i < max_nch; i++)
2383 for (j = 0; j < NUM_SQ_STATS; j++)
2384 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2385 sq_stats_desc[j].format,
2388 for (i = 0; i < max_nch; i++) {
2389 for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
2390 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2391 xsksq_stats_desc[j].format, i);
2392 for (j = 0; j < NUM_XDPSQ_STATS; j++)
2393 sprintf(data + (idx++) * ETH_GSTRING_LEN,
2394 xdpsq_stats_desc[j].format, i);
2400 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
2402 bool is_xsk = priv->xsk.ever_used;
2403 int max_nch = priv->stats_nch;
2406 for (i = 0; i < max_nch; i++)
2407 for (j = 0; j < NUM_CH_STATS; j++)
2409 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->ch,
2412 for (i = 0; i < max_nch; i++) {
2413 for (j = 0; j < NUM_RQ_STATS; j++)
2415 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq,
2417 for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
2419 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xskrq,
2420 xskrq_stats_desc, j);
2421 for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
2423 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq_xdpsq,
2424 rq_xdpsq_stats_desc, j);
2427 for (tc = 0; tc < priv->max_opened_tc; tc++)
2428 for (i = 0; i < max_nch; i++)
2429 for (j = 0; j < NUM_SQ_STATS; j++)
2431 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->sq[tc],
2434 for (i = 0; i < max_nch; i++) {
2435 for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
2437 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xsksq,
2438 xsksq_stats_desc, j);
2439 for (j = 0; j < NUM_XDPSQ_STATS; j++)
2441 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xdpsq,
2442 xdpsq_stats_desc, j);
2448 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels) { return; }
2450 MLX5E_DEFINE_STATS_GRP(sw, 0);
2451 MLX5E_DEFINE_STATS_GRP(qcnt, MLX5E_NDO_UPDATE_STATS);
2452 MLX5E_DEFINE_STATS_GRP(vnic_env, 0);
2453 MLX5E_DEFINE_STATS_GRP(vport, MLX5E_NDO_UPDATE_STATS);
2454 MLX5E_DEFINE_STATS_GRP(802_3, MLX5E_NDO_UPDATE_STATS);
2455 MLX5E_DEFINE_STATS_GRP(2863, 0);
2456 MLX5E_DEFINE_STATS_GRP(2819, 0);
2457 MLX5E_DEFINE_STATS_GRP(phy, 0);
2458 MLX5E_DEFINE_STATS_GRP(pcie, 0);
2459 MLX5E_DEFINE_STATS_GRP(per_prio, 0);
2460 MLX5E_DEFINE_STATS_GRP(pme, 0);
2461 MLX5E_DEFINE_STATS_GRP(channels, 0);
2462 MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
2463 MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
2464 static MLX5E_DEFINE_STATS_GRP(tls, 0);
2465 MLX5E_DEFINE_STATS_GRP(ptp, 0);
2466 static MLX5E_DEFINE_STATS_GRP(qos, 0);
2468 /* The stats groups order is opposite to the update_stats() order calls */
2469 mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
2470 &MLX5E_STATS_GRP(sw),
2471 &MLX5E_STATS_GRP(qcnt),
2472 &MLX5E_STATS_GRP(vnic_env),
2473 &MLX5E_STATS_GRP(vport),
2474 &MLX5E_STATS_GRP(802_3),
2475 &MLX5E_STATS_GRP(2863),
2476 &MLX5E_STATS_GRP(2819),
2477 &MLX5E_STATS_GRP(phy),
2478 &MLX5E_STATS_GRP(eth_ext),
2479 &MLX5E_STATS_GRP(pcie),
2480 &MLX5E_STATS_GRP(per_prio),
2481 &MLX5E_STATS_GRP(pme),
2482 #ifdef CONFIG_MLX5_EN_IPSEC
2483 &MLX5E_STATS_GRP(ipsec_hw),
2484 &MLX5E_STATS_GRP(ipsec_sw),
2486 &MLX5E_STATS_GRP(tls),
2487 &MLX5E_STATS_GRP(channels),
2488 &MLX5E_STATS_GRP(per_port_buff_congest),
2489 &MLX5E_STATS_GRP(ptp),
2490 &MLX5E_STATS_GRP(qos),
2491 #ifdef CONFIG_MLX5_EN_MACSEC
2492 &MLX5E_STATS_GRP(macsec_hw),
2496 unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
2498 return ARRAY_SIZE(mlx5e_nic_stats_grps);