bnxt: Do not read past the end of test names
[platform/kernel/linux-rpi.git] / drivers / net / ethernet / broadcom / bnxt / bnxt_ethtool.c
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2017 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10
11 #include <linux/ctype.h>
12 #include <linux/stringify.h>
13 #include <linux/ethtool.h>
14 #include <linux/ethtool_netlink.h>
15 #include <linux/linkmode.h>
16 #include <linux/interrupt.h>
17 #include <linux/pci.h>
18 #include <linux/etherdevice.h>
19 #include <linux/crc32.h>
20 #include <linux/firmware.h>
21 #include <linux/utsname.h>
22 #include <linux/time.h>
23 #include <linux/ptp_clock_kernel.h>
24 #include <linux/net_tstamp.h>
25 #include <linux/timecounter.h>
26 #include <net/netlink.h>
27 #include "bnxt_hsi.h"
28 #include "bnxt.h"
29 #include "bnxt_hwrm.h"
30 #include "bnxt_ulp.h"
31 #include "bnxt_xdp.h"
32 #include "bnxt_ptp.h"
33 #include "bnxt_ethtool.h"
34 #include "bnxt_nvm_defs.h"      /* NVRAM content constant and structure defs */
35 #include "bnxt_fw_hdr.h"        /* Firmware hdr constant and structure defs */
36 #include "bnxt_coredump.h"
37
38 #define BNXT_NVM_ERR_MSG(dev, extack, msg)                      \
39         do {                                                    \
40                 if (extack)                                     \
41                         NL_SET_ERR_MSG_MOD(extack, msg);        \
42                 netdev_err(dev, "%s\n", msg);                   \
43         } while (0)
44
45 static u32 bnxt_get_msglevel(struct net_device *dev)
46 {
47         struct bnxt *bp = netdev_priv(dev);
48
49         return bp->msg_enable;
50 }
51
52 static void bnxt_set_msglevel(struct net_device *dev, u32 value)
53 {
54         struct bnxt *bp = netdev_priv(dev);
55
56         bp->msg_enable = value;
57 }
58
59 static int bnxt_get_coalesce(struct net_device *dev,
60                              struct ethtool_coalesce *coal,
61                              struct kernel_ethtool_coalesce *kernel_coal,
62                              struct netlink_ext_ack *extack)
63 {
64         struct bnxt *bp = netdev_priv(dev);
65         struct bnxt_coal *hw_coal;
66         u16 mult;
67
68         memset(coal, 0, sizeof(*coal));
69
70         coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM;
71
72         hw_coal = &bp->rx_coal;
73         mult = hw_coal->bufs_per_record;
74         coal->rx_coalesce_usecs = hw_coal->coal_ticks;
75         coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult;
76         coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
77         coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
78         if (hw_coal->flags &
79             RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET)
80                 kernel_coal->use_cqe_mode_rx = true;
81
82         hw_coal = &bp->tx_coal;
83         mult = hw_coal->bufs_per_record;
84         coal->tx_coalesce_usecs = hw_coal->coal_ticks;
85         coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult;
86         coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
87         coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
88         if (hw_coal->flags &
89             RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET)
90                 kernel_coal->use_cqe_mode_tx = true;
91
92         coal->stats_block_coalesce_usecs = bp->stats_coal_ticks;
93
94         return 0;
95 }
96
97 static int bnxt_set_coalesce(struct net_device *dev,
98                              struct ethtool_coalesce *coal,
99                              struct kernel_ethtool_coalesce *kernel_coal,
100                              struct netlink_ext_ack *extack)
101 {
102         struct bnxt *bp = netdev_priv(dev);
103         bool update_stats = false;
104         struct bnxt_coal *hw_coal;
105         int rc = 0;
106         u16 mult;
107
108         if (coal->use_adaptive_rx_coalesce) {
109                 bp->flags |= BNXT_FLAG_DIM;
110         } else {
111                 if (bp->flags & BNXT_FLAG_DIM) {
112                         bp->flags &= ~(BNXT_FLAG_DIM);
113                         goto reset_coalesce;
114                 }
115         }
116
117         if ((kernel_coal->use_cqe_mode_rx || kernel_coal->use_cqe_mode_tx) &&
118             !(bp->coal_cap.cmpl_params &
119               RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET))
120                 return -EOPNOTSUPP;
121
122         hw_coal = &bp->rx_coal;
123         mult = hw_coal->bufs_per_record;
124         hw_coal->coal_ticks = coal->rx_coalesce_usecs;
125         hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult;
126         hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq;
127         hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult;
128         hw_coal->flags &=
129                 ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
130         if (kernel_coal->use_cqe_mode_rx)
131                 hw_coal->flags |=
132                         RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
133
134         hw_coal = &bp->tx_coal;
135         mult = hw_coal->bufs_per_record;
136         hw_coal->coal_ticks = coal->tx_coalesce_usecs;
137         hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult;
138         hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq;
139         hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult;
140         hw_coal->flags &=
141                 ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
142         if (kernel_coal->use_cqe_mode_tx)
143                 hw_coal->flags |=
144                         RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
145
146         if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) {
147                 u32 stats_ticks = coal->stats_block_coalesce_usecs;
148
149                 /* Allow 0, which means disable. */
150                 if (stats_ticks)
151                         stats_ticks = clamp_t(u32, stats_ticks,
152                                               BNXT_MIN_STATS_COAL_TICKS,
153                                               BNXT_MAX_STATS_COAL_TICKS);
154                 stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS);
155                 bp->stats_coal_ticks = stats_ticks;
156                 if (bp->stats_coal_ticks)
157                         bp->current_interval =
158                                 bp->stats_coal_ticks * HZ / 1000000;
159                 else
160                         bp->current_interval = BNXT_TIMER_INTERVAL;
161                 update_stats = true;
162         }
163
164 reset_coalesce:
165         if (test_bit(BNXT_STATE_OPEN, &bp->state)) {
166                 if (update_stats) {
167                         rc = bnxt_close_nic(bp, true, false);
168                         if (!rc)
169                                 rc = bnxt_open_nic(bp, true, false);
170                 } else {
171                         rc = bnxt_hwrm_set_coal(bp);
172                 }
173         }
174
175         return rc;
176 }
177
178 static const char * const bnxt_ring_rx_stats_str[] = {
179         "rx_ucast_packets",
180         "rx_mcast_packets",
181         "rx_bcast_packets",
182         "rx_discards",
183         "rx_errors",
184         "rx_ucast_bytes",
185         "rx_mcast_bytes",
186         "rx_bcast_bytes",
187 };
188
189 static const char * const bnxt_ring_tx_stats_str[] = {
190         "tx_ucast_packets",
191         "tx_mcast_packets",
192         "tx_bcast_packets",
193         "tx_errors",
194         "tx_discards",
195         "tx_ucast_bytes",
196         "tx_mcast_bytes",
197         "tx_bcast_bytes",
198 };
199
200 static const char * const bnxt_ring_tpa_stats_str[] = {
201         "tpa_packets",
202         "tpa_bytes",
203         "tpa_events",
204         "tpa_aborts",
205 };
206
207 static const char * const bnxt_ring_tpa2_stats_str[] = {
208         "rx_tpa_eligible_pkt",
209         "rx_tpa_eligible_bytes",
210         "rx_tpa_pkt",
211         "rx_tpa_bytes",
212         "rx_tpa_errors",
213         "rx_tpa_events",
214 };
215
216 static const char * const bnxt_rx_sw_stats_str[] = {
217         "rx_l4_csum_errors",
218         "rx_resets",
219         "rx_buf_errors",
220 };
221
222 static const char * const bnxt_cmn_sw_stats_str[] = {
223         "missed_irqs",
224 };
225
226 #define BNXT_RX_STATS_ENTRY(counter)    \
227         { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
228
229 #define BNXT_TX_STATS_ENTRY(counter)    \
230         { BNXT_TX_STATS_OFFSET(counter), __stringify(counter) }
231
232 #define BNXT_RX_STATS_EXT_ENTRY(counter)        \
233         { BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) }
234
235 #define BNXT_TX_STATS_EXT_ENTRY(counter)        \
236         { BNXT_TX_STATS_EXT_OFFSET(counter), __stringify(counter) }
237
238 #define BNXT_RX_STATS_EXT_PFC_ENTRY(n)                          \
239         BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_duration_us),   \
240         BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_transitions)
241
242 #define BNXT_TX_STATS_EXT_PFC_ENTRY(n)                          \
243         BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_duration_us),   \
244         BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_transitions)
245
246 #define BNXT_RX_STATS_EXT_PFC_ENTRIES                           \
247         BNXT_RX_STATS_EXT_PFC_ENTRY(0),                         \
248         BNXT_RX_STATS_EXT_PFC_ENTRY(1),                         \
249         BNXT_RX_STATS_EXT_PFC_ENTRY(2),                         \
250         BNXT_RX_STATS_EXT_PFC_ENTRY(3),                         \
251         BNXT_RX_STATS_EXT_PFC_ENTRY(4),                         \
252         BNXT_RX_STATS_EXT_PFC_ENTRY(5),                         \
253         BNXT_RX_STATS_EXT_PFC_ENTRY(6),                         \
254         BNXT_RX_STATS_EXT_PFC_ENTRY(7)
255
256 #define BNXT_TX_STATS_EXT_PFC_ENTRIES                           \
257         BNXT_TX_STATS_EXT_PFC_ENTRY(0),                         \
258         BNXT_TX_STATS_EXT_PFC_ENTRY(1),                         \
259         BNXT_TX_STATS_EXT_PFC_ENTRY(2),                         \
260         BNXT_TX_STATS_EXT_PFC_ENTRY(3),                         \
261         BNXT_TX_STATS_EXT_PFC_ENTRY(4),                         \
262         BNXT_TX_STATS_EXT_PFC_ENTRY(5),                         \
263         BNXT_TX_STATS_EXT_PFC_ENTRY(6),                         \
264         BNXT_TX_STATS_EXT_PFC_ENTRY(7)
265
266 #define BNXT_RX_STATS_EXT_COS_ENTRY(n)                          \
267         BNXT_RX_STATS_EXT_ENTRY(rx_bytes_cos##n),               \
268         BNXT_RX_STATS_EXT_ENTRY(rx_packets_cos##n)
269
270 #define BNXT_TX_STATS_EXT_COS_ENTRY(n)                          \
271         BNXT_TX_STATS_EXT_ENTRY(tx_bytes_cos##n),               \
272         BNXT_TX_STATS_EXT_ENTRY(tx_packets_cos##n)
273
274 #define BNXT_RX_STATS_EXT_COS_ENTRIES                           \
275         BNXT_RX_STATS_EXT_COS_ENTRY(0),                         \
276         BNXT_RX_STATS_EXT_COS_ENTRY(1),                         \
277         BNXT_RX_STATS_EXT_COS_ENTRY(2),                         \
278         BNXT_RX_STATS_EXT_COS_ENTRY(3),                         \
279         BNXT_RX_STATS_EXT_COS_ENTRY(4),                         \
280         BNXT_RX_STATS_EXT_COS_ENTRY(5),                         \
281         BNXT_RX_STATS_EXT_COS_ENTRY(6),                         \
282         BNXT_RX_STATS_EXT_COS_ENTRY(7)                          \
283
284 #define BNXT_TX_STATS_EXT_COS_ENTRIES                           \
285         BNXT_TX_STATS_EXT_COS_ENTRY(0),                         \
286         BNXT_TX_STATS_EXT_COS_ENTRY(1),                         \
287         BNXT_TX_STATS_EXT_COS_ENTRY(2),                         \
288         BNXT_TX_STATS_EXT_COS_ENTRY(3),                         \
289         BNXT_TX_STATS_EXT_COS_ENTRY(4),                         \
290         BNXT_TX_STATS_EXT_COS_ENTRY(5),                         \
291         BNXT_TX_STATS_EXT_COS_ENTRY(6),                         \
292         BNXT_TX_STATS_EXT_COS_ENTRY(7)                          \
293
294 #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(n)                  \
295         BNXT_RX_STATS_EXT_ENTRY(rx_discard_bytes_cos##n),       \
296         BNXT_RX_STATS_EXT_ENTRY(rx_discard_packets_cos##n)
297
298 #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES                           \
299         BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(0),                         \
300         BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(1),                         \
301         BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(2),                         \
302         BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(3),                         \
303         BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(4),                         \
304         BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(5),                         \
305         BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(6),                         \
306         BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(7)
307
308 #define BNXT_RX_STATS_PRI_ENTRY(counter, n)             \
309         { BNXT_RX_STATS_EXT_OFFSET(counter##_cos0),     \
310           __stringify(counter##_pri##n) }
311
312 #define BNXT_TX_STATS_PRI_ENTRY(counter, n)             \
313         { BNXT_TX_STATS_EXT_OFFSET(counter##_cos0),     \
314           __stringify(counter##_pri##n) }
315
316 #define BNXT_RX_STATS_PRI_ENTRIES(counter)              \
317         BNXT_RX_STATS_PRI_ENTRY(counter, 0),            \
318         BNXT_RX_STATS_PRI_ENTRY(counter, 1),            \
319         BNXT_RX_STATS_PRI_ENTRY(counter, 2),            \
320         BNXT_RX_STATS_PRI_ENTRY(counter, 3),            \
321         BNXT_RX_STATS_PRI_ENTRY(counter, 4),            \
322         BNXT_RX_STATS_PRI_ENTRY(counter, 5),            \
323         BNXT_RX_STATS_PRI_ENTRY(counter, 6),            \
324         BNXT_RX_STATS_PRI_ENTRY(counter, 7)
325
326 #define BNXT_TX_STATS_PRI_ENTRIES(counter)              \
327         BNXT_TX_STATS_PRI_ENTRY(counter, 0),            \
328         BNXT_TX_STATS_PRI_ENTRY(counter, 1),            \
329         BNXT_TX_STATS_PRI_ENTRY(counter, 2),            \
330         BNXT_TX_STATS_PRI_ENTRY(counter, 3),            \
331         BNXT_TX_STATS_PRI_ENTRY(counter, 4),            \
332         BNXT_TX_STATS_PRI_ENTRY(counter, 5),            \
333         BNXT_TX_STATS_PRI_ENTRY(counter, 6),            \
334         BNXT_TX_STATS_PRI_ENTRY(counter, 7)
335
336 enum {
337         RX_TOTAL_DISCARDS,
338         TX_TOTAL_DISCARDS,
339         RX_NETPOLL_DISCARDS,
340 };
341
342 static struct {
343         u64                     counter;
344         char                    string[ETH_GSTRING_LEN];
345 } bnxt_sw_func_stats[] = {
346         {0, "rx_total_discard_pkts"},
347         {0, "tx_total_discard_pkts"},
348         {0, "rx_total_netpoll_discards"},
349 };
350
351 #define NUM_RING_RX_SW_STATS            ARRAY_SIZE(bnxt_rx_sw_stats_str)
352 #define NUM_RING_CMN_SW_STATS           ARRAY_SIZE(bnxt_cmn_sw_stats_str)
353 #define NUM_RING_RX_HW_STATS            ARRAY_SIZE(bnxt_ring_rx_stats_str)
354 #define NUM_RING_TX_HW_STATS            ARRAY_SIZE(bnxt_ring_tx_stats_str)
355
356 static const struct {
357         long offset;
358         char string[ETH_GSTRING_LEN];
359 } bnxt_port_stats_arr[] = {
360         BNXT_RX_STATS_ENTRY(rx_64b_frames),
361         BNXT_RX_STATS_ENTRY(rx_65b_127b_frames),
362         BNXT_RX_STATS_ENTRY(rx_128b_255b_frames),
363         BNXT_RX_STATS_ENTRY(rx_256b_511b_frames),
364         BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames),
365         BNXT_RX_STATS_ENTRY(rx_1024b_1518b_frames),
366         BNXT_RX_STATS_ENTRY(rx_good_vlan_frames),
367         BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames),
368         BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames),
369         BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames),
370         BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames),
371         BNXT_RX_STATS_ENTRY(rx_total_frames),
372         BNXT_RX_STATS_ENTRY(rx_ucast_frames),
373         BNXT_RX_STATS_ENTRY(rx_mcast_frames),
374         BNXT_RX_STATS_ENTRY(rx_bcast_frames),
375         BNXT_RX_STATS_ENTRY(rx_fcs_err_frames),
376         BNXT_RX_STATS_ENTRY(rx_ctrl_frames),
377         BNXT_RX_STATS_ENTRY(rx_pause_frames),
378         BNXT_RX_STATS_ENTRY(rx_pfc_frames),
379         BNXT_RX_STATS_ENTRY(rx_align_err_frames),
380         BNXT_RX_STATS_ENTRY(rx_ovrsz_frames),
381         BNXT_RX_STATS_ENTRY(rx_jbr_frames),
382         BNXT_RX_STATS_ENTRY(rx_mtu_err_frames),
383         BNXT_RX_STATS_ENTRY(rx_tagged_frames),
384         BNXT_RX_STATS_ENTRY(rx_double_tagged_frames),
385         BNXT_RX_STATS_ENTRY(rx_good_frames),
386         BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri0),
387         BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri1),
388         BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri2),
389         BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri3),
390         BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri4),
391         BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri5),
392         BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri6),
393         BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri7),
394         BNXT_RX_STATS_ENTRY(rx_undrsz_frames),
395         BNXT_RX_STATS_ENTRY(rx_eee_lpi_events),
396         BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration),
397         BNXT_RX_STATS_ENTRY(rx_bytes),
398         BNXT_RX_STATS_ENTRY(rx_runt_bytes),
399         BNXT_RX_STATS_ENTRY(rx_runt_frames),
400         BNXT_RX_STATS_ENTRY(rx_stat_discard),
401         BNXT_RX_STATS_ENTRY(rx_stat_err),
402
403         BNXT_TX_STATS_ENTRY(tx_64b_frames),
404         BNXT_TX_STATS_ENTRY(tx_65b_127b_frames),
405         BNXT_TX_STATS_ENTRY(tx_128b_255b_frames),
406         BNXT_TX_STATS_ENTRY(tx_256b_511b_frames),
407         BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames),
408         BNXT_TX_STATS_ENTRY(tx_1024b_1518b_frames),
409         BNXT_TX_STATS_ENTRY(tx_good_vlan_frames),
410         BNXT_TX_STATS_ENTRY(tx_1519b_2047b_frames),
411         BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames),
412         BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames),
413         BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames),
414         BNXT_TX_STATS_ENTRY(tx_good_frames),
415         BNXT_TX_STATS_ENTRY(tx_total_frames),
416         BNXT_TX_STATS_ENTRY(tx_ucast_frames),
417         BNXT_TX_STATS_ENTRY(tx_mcast_frames),
418         BNXT_TX_STATS_ENTRY(tx_bcast_frames),
419         BNXT_TX_STATS_ENTRY(tx_pause_frames),
420         BNXT_TX_STATS_ENTRY(tx_pfc_frames),
421         BNXT_TX_STATS_ENTRY(tx_jabber_frames),
422         BNXT_TX_STATS_ENTRY(tx_fcs_err_frames),
423         BNXT_TX_STATS_ENTRY(tx_err),
424         BNXT_TX_STATS_ENTRY(tx_fifo_underruns),
425         BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri0),
426         BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri1),
427         BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri2),
428         BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri3),
429         BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri4),
430         BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri5),
431         BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri6),
432         BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri7),
433         BNXT_TX_STATS_ENTRY(tx_eee_lpi_events),
434         BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration),
435         BNXT_TX_STATS_ENTRY(tx_total_collisions),
436         BNXT_TX_STATS_ENTRY(tx_bytes),
437         BNXT_TX_STATS_ENTRY(tx_xthol_frames),
438         BNXT_TX_STATS_ENTRY(tx_stat_discard),
439         BNXT_TX_STATS_ENTRY(tx_stat_error),
440 };
441
442 static const struct {
443         long offset;
444         char string[ETH_GSTRING_LEN];
445 } bnxt_port_stats_ext_arr[] = {
446         BNXT_RX_STATS_EXT_ENTRY(link_down_events),
447         BNXT_RX_STATS_EXT_ENTRY(continuous_pause_events),
448         BNXT_RX_STATS_EXT_ENTRY(resume_pause_events),
449         BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events),
450         BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events),
451         BNXT_RX_STATS_EXT_COS_ENTRIES,
452         BNXT_RX_STATS_EXT_PFC_ENTRIES,
453         BNXT_RX_STATS_EXT_ENTRY(rx_bits),
454         BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold),
455         BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err),
456         BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits),
457         BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES,
458         BNXT_RX_STATS_EXT_ENTRY(rx_fec_corrected_blocks),
459         BNXT_RX_STATS_EXT_ENTRY(rx_fec_uncorrectable_blocks),
460 };
461
462 static const struct {
463         long offset;
464         char string[ETH_GSTRING_LEN];
465 } bnxt_tx_port_stats_ext_arr[] = {
466         BNXT_TX_STATS_EXT_COS_ENTRIES,
467         BNXT_TX_STATS_EXT_PFC_ENTRIES,
468 };
469
470 static const struct {
471         long base_off;
472         char string[ETH_GSTRING_LEN];
473 } bnxt_rx_bytes_pri_arr[] = {
474         BNXT_RX_STATS_PRI_ENTRIES(rx_bytes),
475 };
476
477 static const struct {
478         long base_off;
479         char string[ETH_GSTRING_LEN];
480 } bnxt_rx_pkts_pri_arr[] = {
481         BNXT_RX_STATS_PRI_ENTRIES(rx_packets),
482 };
483
484 static const struct {
485         long base_off;
486         char string[ETH_GSTRING_LEN];
487 } bnxt_tx_bytes_pri_arr[] = {
488         BNXT_TX_STATS_PRI_ENTRIES(tx_bytes),
489 };
490
491 static const struct {
492         long base_off;
493         char string[ETH_GSTRING_LEN];
494 } bnxt_tx_pkts_pri_arr[] = {
495         BNXT_TX_STATS_PRI_ENTRIES(tx_packets),
496 };
497
498 #define BNXT_NUM_SW_FUNC_STATS  ARRAY_SIZE(bnxt_sw_func_stats)
499 #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
500 #define BNXT_NUM_STATS_PRI                      \
501         (ARRAY_SIZE(bnxt_rx_bytes_pri_arr) +    \
502          ARRAY_SIZE(bnxt_rx_pkts_pri_arr) +     \
503          ARRAY_SIZE(bnxt_tx_bytes_pri_arr) +    \
504          ARRAY_SIZE(bnxt_tx_pkts_pri_arr))
505
506 static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
507 {
508         if (BNXT_SUPPORTS_TPA(bp)) {
509                 if (bp->max_tpa_v2) {
510                         if (BNXT_CHIP_P5_THOR(bp))
511                                 return BNXT_NUM_TPA_RING_STATS_P5;
512                         return BNXT_NUM_TPA_RING_STATS_P5_SR2;
513                 }
514                 return BNXT_NUM_TPA_RING_STATS;
515         }
516         return 0;
517 }
518
519 static int bnxt_get_num_ring_stats(struct bnxt *bp)
520 {
521         int rx, tx, cmn;
522
523         rx = NUM_RING_RX_HW_STATS + NUM_RING_RX_SW_STATS +
524              bnxt_get_num_tpa_ring_stats(bp);
525         tx = NUM_RING_TX_HW_STATS;
526         cmn = NUM_RING_CMN_SW_STATS;
527         return rx * bp->rx_nr_rings + tx * bp->tx_nr_rings +
528                cmn * bp->cp_nr_rings;
529 }
530
531 static int bnxt_get_num_stats(struct bnxt *bp)
532 {
533         int num_stats = bnxt_get_num_ring_stats(bp);
534
535         num_stats += BNXT_NUM_SW_FUNC_STATS;
536
537         if (bp->flags & BNXT_FLAG_PORT_STATS)
538                 num_stats += BNXT_NUM_PORT_STATS;
539
540         if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
541                 num_stats += bp->fw_rx_stats_ext_size +
542                              bp->fw_tx_stats_ext_size;
543                 if (bp->pri2cos_valid)
544                         num_stats += BNXT_NUM_STATS_PRI;
545         }
546
547         return num_stats;
548 }
549
550 static int bnxt_get_sset_count(struct net_device *dev, int sset)
551 {
552         struct bnxt *bp = netdev_priv(dev);
553
554         switch (sset) {
555         case ETH_SS_STATS:
556                 return bnxt_get_num_stats(bp);
557         case ETH_SS_TEST:
558                 if (!bp->num_tests)
559                         return -EOPNOTSUPP;
560                 return bp->num_tests;
561         default:
562                 return -EOPNOTSUPP;
563         }
564 }
565
566 static bool is_rx_ring(struct bnxt *bp, int ring_num)
567 {
568         return ring_num < bp->rx_nr_rings;
569 }
570
571 static bool is_tx_ring(struct bnxt *bp, int ring_num)
572 {
573         int tx_base = 0;
574
575         if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
576                 tx_base = bp->rx_nr_rings;
577
578         if (ring_num >= tx_base && ring_num < (tx_base + bp->tx_nr_rings))
579                 return true;
580         return false;
581 }
582
583 static void bnxt_get_ethtool_stats(struct net_device *dev,
584                                    struct ethtool_stats *stats, u64 *buf)
585 {
586         u32 i, j = 0;
587         struct bnxt *bp = netdev_priv(dev);
588         u32 tpa_stats;
589
590         if (!bp->bnapi) {
591                 j += bnxt_get_num_ring_stats(bp) + BNXT_NUM_SW_FUNC_STATS;
592                 goto skip_ring_stats;
593         }
594
595         for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++)
596                 bnxt_sw_func_stats[i].counter = 0;
597
598         tpa_stats = bnxt_get_num_tpa_ring_stats(bp);
599         for (i = 0; i < bp->cp_nr_rings; i++) {
600                 struct bnxt_napi *bnapi = bp->bnapi[i];
601                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
602                 u64 *sw_stats = cpr->stats.sw_stats;
603                 u64 *sw;
604                 int k;
605
606                 if (is_rx_ring(bp, i)) {
607                         for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++)
608                                 buf[j] = sw_stats[k];
609                 }
610                 if (is_tx_ring(bp, i)) {
611                         k = NUM_RING_RX_HW_STATS;
612                         for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
613                                j++, k++)
614                                 buf[j] = sw_stats[k];
615                 }
616                 if (!tpa_stats || !is_rx_ring(bp, i))
617                         goto skip_tpa_ring_stats;
618
619                 k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS;
620                 for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS +
621                            tpa_stats; j++, k++)
622                         buf[j] = sw_stats[k];
623
624 skip_tpa_ring_stats:
625                 sw = (u64 *)&cpr->sw_stats.rx;
626                 if (is_rx_ring(bp, i)) {
627                         for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++)
628                                 buf[j] = sw[k];
629                 }
630
631                 sw = (u64 *)&cpr->sw_stats.cmn;
632                 for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++)
633                         buf[j] = sw[k];
634
635                 bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter +=
636                         BNXT_GET_RING_STATS64(sw_stats, rx_discard_pkts);
637                 bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter +=
638                         BNXT_GET_RING_STATS64(sw_stats, tx_discard_pkts);
639                 bnxt_sw_func_stats[RX_NETPOLL_DISCARDS].counter +=
640                         cpr->sw_stats.rx.rx_netpoll_discards;
641         }
642
643         for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++)
644                 buf[j] = bnxt_sw_func_stats[i].counter;
645
646 skip_ring_stats:
647         if (bp->flags & BNXT_FLAG_PORT_STATS) {
648                 u64 *port_stats = bp->port_stats.sw_stats;
649
650                 for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++)
651                         buf[j] = *(port_stats + bnxt_port_stats_arr[i].offset);
652         }
653         if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
654                 u64 *rx_port_stats_ext = bp->rx_port_stats_ext.sw_stats;
655                 u64 *tx_port_stats_ext = bp->tx_port_stats_ext.sw_stats;
656
657                 for (i = 0; i < bp->fw_rx_stats_ext_size; i++, j++) {
658                         buf[j] = *(rx_port_stats_ext +
659                                    bnxt_port_stats_ext_arr[i].offset);
660                 }
661                 for (i = 0; i < bp->fw_tx_stats_ext_size; i++, j++) {
662                         buf[j] = *(tx_port_stats_ext +
663                                    bnxt_tx_port_stats_ext_arr[i].offset);
664                 }
665                 if (bp->pri2cos_valid) {
666                         for (i = 0; i < 8; i++, j++) {
667                                 long n = bnxt_rx_bytes_pri_arr[i].base_off +
668                                          bp->pri2cos_idx[i];
669
670                                 buf[j] = *(rx_port_stats_ext + n);
671                         }
672                         for (i = 0; i < 8; i++, j++) {
673                                 long n = bnxt_rx_pkts_pri_arr[i].base_off +
674                                          bp->pri2cos_idx[i];
675
676                                 buf[j] = *(rx_port_stats_ext + n);
677                         }
678                         for (i = 0; i < 8; i++, j++) {
679                                 long n = bnxt_tx_bytes_pri_arr[i].base_off +
680                                          bp->pri2cos_idx[i];
681
682                                 buf[j] = *(tx_port_stats_ext + n);
683                         }
684                         for (i = 0; i < 8; i++, j++) {
685                                 long n = bnxt_tx_pkts_pri_arr[i].base_off +
686                                          bp->pri2cos_idx[i];
687
688                                 buf[j] = *(tx_port_stats_ext + n);
689                         }
690                 }
691         }
692 }
693
694 static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
695 {
696         struct bnxt *bp = netdev_priv(dev);
697         static const char * const *str;
698         u32 i, j, num_str;
699
700         switch (stringset) {
701         case ETH_SS_STATS:
702                 for (i = 0; i < bp->cp_nr_rings; i++) {
703                         if (is_rx_ring(bp, i)) {
704                                 num_str = NUM_RING_RX_HW_STATS;
705                                 for (j = 0; j < num_str; j++) {
706                                         sprintf(buf, "[%d]: %s", i,
707                                                 bnxt_ring_rx_stats_str[j]);
708                                         buf += ETH_GSTRING_LEN;
709                                 }
710                         }
711                         if (is_tx_ring(bp, i)) {
712                                 num_str = NUM_RING_TX_HW_STATS;
713                                 for (j = 0; j < num_str; j++) {
714                                         sprintf(buf, "[%d]: %s", i,
715                                                 bnxt_ring_tx_stats_str[j]);
716                                         buf += ETH_GSTRING_LEN;
717                                 }
718                         }
719                         num_str = bnxt_get_num_tpa_ring_stats(bp);
720                         if (!num_str || !is_rx_ring(bp, i))
721                                 goto skip_tpa_stats;
722
723                         if (bp->max_tpa_v2)
724                                 str = bnxt_ring_tpa2_stats_str;
725                         else
726                                 str = bnxt_ring_tpa_stats_str;
727
728                         for (j = 0; j < num_str; j++) {
729                                 sprintf(buf, "[%d]: %s", i, str[j]);
730                                 buf += ETH_GSTRING_LEN;
731                         }
732 skip_tpa_stats:
733                         if (is_rx_ring(bp, i)) {
734                                 num_str = NUM_RING_RX_SW_STATS;
735                                 for (j = 0; j < num_str; j++) {
736                                         sprintf(buf, "[%d]: %s", i,
737                                                 bnxt_rx_sw_stats_str[j]);
738                                         buf += ETH_GSTRING_LEN;
739                                 }
740                         }
741                         num_str = NUM_RING_CMN_SW_STATS;
742                         for (j = 0; j < num_str; j++) {
743                                 sprintf(buf, "[%d]: %s", i,
744                                         bnxt_cmn_sw_stats_str[j]);
745                                 buf += ETH_GSTRING_LEN;
746                         }
747                 }
748                 for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) {
749                         strcpy(buf, bnxt_sw_func_stats[i].string);
750                         buf += ETH_GSTRING_LEN;
751                 }
752
753                 if (bp->flags & BNXT_FLAG_PORT_STATS) {
754                         for (i = 0; i < BNXT_NUM_PORT_STATS; i++) {
755                                 strcpy(buf, bnxt_port_stats_arr[i].string);
756                                 buf += ETH_GSTRING_LEN;
757                         }
758                 }
759                 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
760                         for (i = 0; i < bp->fw_rx_stats_ext_size; i++) {
761                                 strcpy(buf, bnxt_port_stats_ext_arr[i].string);
762                                 buf += ETH_GSTRING_LEN;
763                         }
764                         for (i = 0; i < bp->fw_tx_stats_ext_size; i++) {
765                                 strcpy(buf,
766                                        bnxt_tx_port_stats_ext_arr[i].string);
767                                 buf += ETH_GSTRING_LEN;
768                         }
769                         if (bp->pri2cos_valid) {
770                                 for (i = 0; i < 8; i++) {
771                                         strcpy(buf,
772                                                bnxt_rx_bytes_pri_arr[i].string);
773                                         buf += ETH_GSTRING_LEN;
774                                 }
775                                 for (i = 0; i < 8; i++) {
776                                         strcpy(buf,
777                                                bnxt_rx_pkts_pri_arr[i].string);
778                                         buf += ETH_GSTRING_LEN;
779                                 }
780                                 for (i = 0; i < 8; i++) {
781                                         strcpy(buf,
782                                                bnxt_tx_bytes_pri_arr[i].string);
783                                         buf += ETH_GSTRING_LEN;
784                                 }
785                                 for (i = 0; i < 8; i++) {
786                                         strcpy(buf,
787                                                bnxt_tx_pkts_pri_arr[i].string);
788                                         buf += ETH_GSTRING_LEN;
789                                 }
790                         }
791                 }
792                 break;
793         case ETH_SS_TEST:
794                 if (bp->num_tests)
795                         memcpy(buf, bp->test_info->string,
796                                bp->num_tests * ETH_GSTRING_LEN);
797                 break;
798         default:
799                 netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n",
800                            stringset);
801                 break;
802         }
803 }
804
805 static void bnxt_get_ringparam(struct net_device *dev,
806                                struct ethtool_ringparam *ering,
807                                struct kernel_ethtool_ringparam *kernel_ering,
808                                struct netlink_ext_ack *extack)
809 {
810         struct bnxt *bp = netdev_priv(dev);
811
812         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
813                 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
814                 ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
815                 kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
816         } else {
817                 ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
818                 ering->rx_jumbo_max_pending = 0;
819                 kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED;
820         }
821         ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT;
822
823         ering->rx_pending = bp->rx_ring_size;
824         ering->rx_jumbo_pending = bp->rx_agg_ring_size;
825         ering->tx_pending = bp->tx_ring_size;
826 }
827
828 static int bnxt_set_ringparam(struct net_device *dev,
829                               struct ethtool_ringparam *ering,
830                               struct kernel_ethtool_ringparam *kernel_ering,
831                               struct netlink_ext_ack *extack)
832 {
833         struct bnxt *bp = netdev_priv(dev);
834
835         if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
836             (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
837             (ering->tx_pending < BNXT_MIN_TX_DESC_CNT))
838                 return -EINVAL;
839
840         if (netif_running(dev))
841                 bnxt_close_nic(bp, false, false);
842
843         bp->rx_ring_size = ering->rx_pending;
844         bp->tx_ring_size = ering->tx_pending;
845         bnxt_set_ring_params(bp);
846
847         if (netif_running(dev))
848                 return bnxt_open_nic(bp, false, false);
849
850         return 0;
851 }
852
853 static void bnxt_get_channels(struct net_device *dev,
854                               struct ethtool_channels *channel)
855 {
856         struct bnxt *bp = netdev_priv(dev);
857         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
858         int max_rx_rings, max_tx_rings, tcs;
859         int max_tx_sch_inputs, tx_grps;
860
861         /* Get the most up-to-date max_tx_sch_inputs. */
862         if (netif_running(dev) && BNXT_NEW_RM(bp))
863                 bnxt_hwrm_func_resc_qcaps(bp, false);
864         max_tx_sch_inputs = hw_resc->max_tx_sch_inputs;
865
866         bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
867         if (max_tx_sch_inputs)
868                 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
869
870         tcs = netdev_get_num_tc(dev);
871         tx_grps = max(tcs, 1);
872         if (bp->tx_nr_rings_xdp)
873                 tx_grps++;
874         max_tx_rings /= tx_grps;
875         channel->max_combined = min_t(int, max_rx_rings, max_tx_rings);
876
877         if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
878                 max_rx_rings = 0;
879                 max_tx_rings = 0;
880         }
881         if (max_tx_sch_inputs)
882                 max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
883
884         if (tcs > 1)
885                 max_tx_rings /= tcs;
886
887         channel->max_rx = max_rx_rings;
888         channel->max_tx = max_tx_rings;
889         channel->max_other = 0;
890         if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
891                 channel->combined_count = bp->rx_nr_rings;
892                 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
893                         channel->combined_count--;
894         } else {
895                 if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) {
896                         channel->rx_count = bp->rx_nr_rings;
897                         channel->tx_count = bp->tx_nr_rings_per_tc;
898                 }
899         }
900 }
901
902 static int bnxt_set_channels(struct net_device *dev,
903                              struct ethtool_channels *channel)
904 {
905         struct bnxt *bp = netdev_priv(dev);
906         int req_tx_rings, req_rx_rings, tcs;
907         bool sh = false;
908         int tx_xdp = 0;
909         int rc = 0;
910
911         if (channel->other_count)
912                 return -EINVAL;
913
914         if (!channel->combined_count &&
915             (!channel->rx_count || !channel->tx_count))
916                 return -EINVAL;
917
918         if (channel->combined_count &&
919             (channel->rx_count || channel->tx_count))
920                 return -EINVAL;
921
922         if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count ||
923                                             channel->tx_count))
924                 return -EINVAL;
925
926         if (channel->combined_count)
927                 sh = true;
928
929         tcs = netdev_get_num_tc(dev);
930
931         req_tx_rings = sh ? channel->combined_count : channel->tx_count;
932         req_rx_rings = sh ? channel->combined_count : channel->rx_count;
933         if (bp->tx_nr_rings_xdp) {
934                 if (!sh) {
935                         netdev_err(dev, "Only combined mode supported when XDP is enabled.\n");
936                         return -EINVAL;
937                 }
938                 tx_xdp = req_rx_rings;
939         }
940         rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp);
941         if (rc) {
942                 netdev_warn(dev, "Unable to allocate the requested rings\n");
943                 return rc;
944         }
945
946         if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) !=
947             bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) &&
948             netif_is_rxfh_configured(dev)) {
949                 netdev_warn(dev, "RSS table size change required, RSS table entries must be default to proceed\n");
950                 return -EINVAL;
951         }
952
953         if (netif_running(dev)) {
954                 if (BNXT_PF(bp)) {
955                         /* TODO CHIMP_FW: Send message to all VF's
956                          * before PF unload
957                          */
958                 }
959                 rc = bnxt_close_nic(bp, true, false);
960                 if (rc) {
961                         netdev_err(bp->dev, "Set channel failure rc :%x\n",
962                                    rc);
963                         return rc;
964                 }
965         }
966
967         if (sh) {
968                 bp->flags |= BNXT_FLAG_SHARED_RINGS;
969                 bp->rx_nr_rings = channel->combined_count;
970                 bp->tx_nr_rings_per_tc = channel->combined_count;
971         } else {
972                 bp->flags &= ~BNXT_FLAG_SHARED_RINGS;
973                 bp->rx_nr_rings = channel->rx_count;
974                 bp->tx_nr_rings_per_tc = channel->tx_count;
975         }
976         bp->tx_nr_rings_xdp = tx_xdp;
977         bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp;
978         if (tcs > 1)
979                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp;
980
981         bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
982                                bp->tx_nr_rings + bp->rx_nr_rings;
983
984         /* After changing number of rx channels, update NTUPLE feature. */
985         netdev_update_features(dev);
986         if (netif_running(dev)) {
987                 rc = bnxt_open_nic(bp, true, false);
988                 if ((!rc) && BNXT_PF(bp)) {
989                         /* TODO CHIMP_FW: Send message to all VF's
990                          * to renable
991                          */
992                 }
993         } else {
994                 rc = bnxt_reserve_rings(bp, true);
995         }
996
997         return rc;
998 }
999
1000 #ifdef CONFIG_RFS_ACCEL
1001 static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd,
1002                             u32 *rule_locs)
1003 {
1004         int i, j = 0;
1005
1006         cmd->data = bp->ntp_fltr_count;
1007         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
1008                 struct hlist_head *head;
1009                 struct bnxt_ntuple_filter *fltr;
1010
1011                 head = &bp->ntp_fltr_hash_tbl[i];
1012                 rcu_read_lock();
1013                 hlist_for_each_entry_rcu(fltr, head, hash) {
1014                         if (j == cmd->rule_cnt)
1015                                 break;
1016                         rule_locs[j++] = fltr->sw_id;
1017                 }
1018                 rcu_read_unlock();
1019                 if (j == cmd->rule_cnt)
1020                         break;
1021         }
1022         cmd->rule_cnt = j;
1023         return 0;
1024 }
1025
1026 static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1027 {
1028         struct ethtool_rx_flow_spec *fs =
1029                 (struct ethtool_rx_flow_spec *)&cmd->fs;
1030         struct bnxt_ntuple_filter *fltr;
1031         struct flow_keys *fkeys;
1032         int i, rc = -EINVAL;
1033
1034         if (fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
1035                 return rc;
1036
1037         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
1038                 struct hlist_head *head;
1039
1040                 head = &bp->ntp_fltr_hash_tbl[i];
1041                 rcu_read_lock();
1042                 hlist_for_each_entry_rcu(fltr, head, hash) {
1043                         if (fltr->sw_id == fs->location)
1044                                 goto fltr_found;
1045                 }
1046                 rcu_read_unlock();
1047         }
1048         return rc;
1049
1050 fltr_found:
1051         fkeys = &fltr->fkeys;
1052         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
1053                 if (fkeys->basic.ip_proto == IPPROTO_TCP)
1054                         fs->flow_type = TCP_V4_FLOW;
1055                 else if (fkeys->basic.ip_proto == IPPROTO_UDP)
1056                         fs->flow_type = UDP_V4_FLOW;
1057                 else
1058                         goto fltr_err;
1059
1060                 fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
1061                 fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
1062
1063                 fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
1064                 fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
1065
1066                 fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
1067                 fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
1068
1069                 fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
1070                 fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
1071         } else {
1072                 int i;
1073
1074                 if (fkeys->basic.ip_proto == IPPROTO_TCP)
1075                         fs->flow_type = TCP_V6_FLOW;
1076                 else if (fkeys->basic.ip_proto == IPPROTO_UDP)
1077                         fs->flow_type = UDP_V6_FLOW;
1078                 else
1079                         goto fltr_err;
1080
1081                 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
1082                         fkeys->addrs.v6addrs.src;
1083                 *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
1084                         fkeys->addrs.v6addrs.dst;
1085                 for (i = 0; i < 4; i++) {
1086                         fs->m_u.tcp_ip6_spec.ip6src[i] = cpu_to_be32(~0);
1087                         fs->m_u.tcp_ip6_spec.ip6dst[i] = cpu_to_be32(~0);
1088                 }
1089                 fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
1090                 fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0);
1091
1092                 fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
1093                 fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0);
1094         }
1095
1096         fs->ring_cookie = fltr->rxq;
1097         rc = 0;
1098
1099 fltr_err:
1100         rcu_read_unlock();
1101
1102         return rc;
1103 }
1104 #endif
1105
1106 static u64 get_ethtool_ipv4_rss(struct bnxt *bp)
1107 {
1108         if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4)
1109                 return RXH_IP_SRC | RXH_IP_DST;
1110         return 0;
1111 }
1112
1113 static u64 get_ethtool_ipv6_rss(struct bnxt *bp)
1114 {
1115         if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6)
1116                 return RXH_IP_SRC | RXH_IP_DST;
1117         return 0;
1118 }
1119
1120 static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1121 {
1122         cmd->data = 0;
1123         switch (cmd->flow_type) {
1124         case TCP_V4_FLOW:
1125                 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4)
1126                         cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1127                                      RXH_L4_B_0_1 | RXH_L4_B_2_3;
1128                 cmd->data |= get_ethtool_ipv4_rss(bp);
1129                 break;
1130         case UDP_V4_FLOW:
1131                 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4)
1132                         cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1133                                      RXH_L4_B_0_1 | RXH_L4_B_2_3;
1134                 fallthrough;
1135         case SCTP_V4_FLOW:
1136         case AH_ESP_V4_FLOW:
1137         case AH_V4_FLOW:
1138         case ESP_V4_FLOW:
1139         case IPV4_FLOW:
1140                 cmd->data |= get_ethtool_ipv4_rss(bp);
1141                 break;
1142
1143         case TCP_V6_FLOW:
1144                 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6)
1145                         cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1146                                      RXH_L4_B_0_1 | RXH_L4_B_2_3;
1147                 cmd->data |= get_ethtool_ipv6_rss(bp);
1148                 break;
1149         case UDP_V6_FLOW:
1150                 if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6)
1151                         cmd->data |= RXH_IP_SRC | RXH_IP_DST |
1152                                      RXH_L4_B_0_1 | RXH_L4_B_2_3;
1153                 fallthrough;
1154         case SCTP_V6_FLOW:
1155         case AH_ESP_V6_FLOW:
1156         case AH_V6_FLOW:
1157         case ESP_V6_FLOW:
1158         case IPV6_FLOW:
1159                 cmd->data |= get_ethtool_ipv6_rss(bp);
1160                 break;
1161         }
1162         return 0;
1163 }
1164
1165 #define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
1166 #define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST)
1167
1168 static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
1169 {
1170         u32 rss_hash_cfg = bp->rss_hash_cfg;
1171         int tuple, rc = 0;
1172
1173         if (cmd->data == RXH_4TUPLE)
1174                 tuple = 4;
1175         else if (cmd->data == RXH_2TUPLE)
1176                 tuple = 2;
1177         else if (!cmd->data)
1178                 tuple = 0;
1179         else
1180                 return -EINVAL;
1181
1182         if (cmd->flow_type == TCP_V4_FLOW) {
1183                 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
1184                 if (tuple == 4)
1185                         rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4;
1186         } else if (cmd->flow_type == UDP_V4_FLOW) {
1187                 if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP))
1188                         return -EINVAL;
1189                 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
1190                 if (tuple == 4)
1191                         rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4;
1192         } else if (cmd->flow_type == TCP_V6_FLOW) {
1193                 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
1194                 if (tuple == 4)
1195                         rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
1196         } else if (cmd->flow_type == UDP_V6_FLOW) {
1197                 if (tuple == 4 && !(bp->flags & BNXT_FLAG_UDP_RSS_CAP))
1198                         return -EINVAL;
1199                 rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
1200                 if (tuple == 4)
1201                         rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
1202         } else if (tuple == 4) {
1203                 return -EINVAL;
1204         }
1205
1206         switch (cmd->flow_type) {
1207         case TCP_V4_FLOW:
1208         case UDP_V4_FLOW:
1209         case SCTP_V4_FLOW:
1210         case AH_ESP_V4_FLOW:
1211         case AH_V4_FLOW:
1212         case ESP_V4_FLOW:
1213         case IPV4_FLOW:
1214                 if (tuple == 2)
1215                         rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
1216                 else if (!tuple)
1217                         rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4;
1218                 break;
1219
1220         case TCP_V6_FLOW:
1221         case UDP_V6_FLOW:
1222         case SCTP_V6_FLOW:
1223         case AH_ESP_V6_FLOW:
1224         case AH_V6_FLOW:
1225         case ESP_V6_FLOW:
1226         case IPV6_FLOW:
1227                 if (tuple == 2)
1228                         rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
1229                 else if (!tuple)
1230                         rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6;
1231                 break;
1232         }
1233
1234         if (bp->rss_hash_cfg == rss_hash_cfg)
1235                 return 0;
1236
1237         if (bp->fw_cap & BNXT_FW_CAP_RSS_HASH_TYPE_DELTA)
1238                 bp->rss_hash_delta = bp->rss_hash_cfg ^ rss_hash_cfg;
1239         bp->rss_hash_cfg = rss_hash_cfg;
1240         if (netif_running(bp->dev)) {
1241                 bnxt_close_nic(bp, false, false);
1242                 rc = bnxt_open_nic(bp, false, false);
1243         }
1244         return rc;
1245 }
1246
1247 static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1248                           u32 *rule_locs)
1249 {
1250         struct bnxt *bp = netdev_priv(dev);
1251         int rc = 0;
1252
1253         switch (cmd->cmd) {
1254 #ifdef CONFIG_RFS_ACCEL
1255         case ETHTOOL_GRXRINGS:
1256                 cmd->data = bp->rx_nr_rings;
1257                 break;
1258
1259         case ETHTOOL_GRXCLSRLCNT:
1260                 cmd->rule_cnt = bp->ntp_fltr_count;
1261                 cmd->data = BNXT_NTP_FLTR_MAX_FLTR;
1262                 break;
1263
1264         case ETHTOOL_GRXCLSRLALL:
1265                 rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs);
1266                 break;
1267
1268         case ETHTOOL_GRXCLSRULE:
1269                 rc = bnxt_grxclsrule(bp, cmd);
1270                 break;
1271 #endif
1272
1273         case ETHTOOL_GRXFH:
1274                 rc = bnxt_grxfh(bp, cmd);
1275                 break;
1276
1277         default:
1278                 rc = -EOPNOTSUPP;
1279                 break;
1280         }
1281
1282         return rc;
1283 }
1284
1285 static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1286 {
1287         struct bnxt *bp = netdev_priv(dev);
1288         int rc;
1289
1290         switch (cmd->cmd) {
1291         case ETHTOOL_SRXFH:
1292                 rc = bnxt_srxfh(bp, cmd);
1293                 break;
1294
1295         default:
1296                 rc = -EOPNOTSUPP;
1297                 break;
1298         }
1299         return rc;
1300 }
1301
1302 u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
1303 {
1304         struct bnxt *bp = netdev_priv(dev);
1305
1306         if (bp->flags & BNXT_FLAG_CHIP_P5)
1307                 return ALIGN(bp->rx_nr_rings, BNXT_RSS_TABLE_ENTRIES_P5);
1308         return HW_HASH_INDEX_SIZE;
1309 }
1310
1311 static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
1312 {
1313         return HW_HASH_KEY_SIZE;
1314 }
1315
1316 static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
1317                          u8 *hfunc)
1318 {
1319         struct bnxt *bp = netdev_priv(dev);
1320         struct bnxt_vnic_info *vnic;
1321         u32 i, tbl_size;
1322
1323         if (hfunc)
1324                 *hfunc = ETH_RSS_HASH_TOP;
1325
1326         if (!bp->vnic_info)
1327                 return 0;
1328
1329         vnic = &bp->vnic_info[0];
1330         if (indir && bp->rss_indir_tbl) {
1331                 tbl_size = bnxt_get_rxfh_indir_size(dev);
1332                 for (i = 0; i < tbl_size; i++)
1333                         indir[i] = bp->rss_indir_tbl[i];
1334         }
1335
1336         if (key && vnic->rss_hash_key)
1337                 memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
1338
1339         return 0;
1340 }
1341
1342 static int bnxt_set_rxfh(struct net_device *dev, const u32 *indir,
1343                          const u8 *key, const u8 hfunc)
1344 {
1345         struct bnxt *bp = netdev_priv(dev);
1346         int rc = 0;
1347
1348         if (hfunc && hfunc != ETH_RSS_HASH_TOP)
1349                 return -EOPNOTSUPP;
1350
1351         if (key)
1352                 return -EOPNOTSUPP;
1353
1354         if (indir) {
1355                 u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev);
1356
1357                 for (i = 0; i < tbl_size; i++)
1358                         bp->rss_indir_tbl[i] = indir[i];
1359                 pad = bp->rss_indir_tbl_entries - tbl_size;
1360                 if (pad)
1361                         memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
1362         }
1363
1364         if (netif_running(bp->dev)) {
1365                 bnxt_close_nic(bp, false, false);
1366                 rc = bnxt_open_nic(bp, false, false);
1367         }
1368         return rc;
1369 }
1370
1371 static void bnxt_get_drvinfo(struct net_device *dev,
1372                              struct ethtool_drvinfo *info)
1373 {
1374         struct bnxt *bp = netdev_priv(dev);
1375
1376         strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1377         strscpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
1378         strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
1379         info->n_stats = bnxt_get_num_stats(bp);
1380         info->testinfo_len = bp->num_tests;
1381         /* TODO CHIMP_FW: eeprom dump details */
1382         info->eedump_len = 0;
1383         /* TODO CHIMP FW: reg dump details */
1384         info->regdump_len = 0;
1385 }
1386
1387 static int bnxt_get_regs_len(struct net_device *dev)
1388 {
1389         struct bnxt *bp = netdev_priv(dev);
1390         int reg_len;
1391
1392         if (!BNXT_PF(bp))
1393                 return -EOPNOTSUPP;
1394
1395         reg_len = BNXT_PXP_REG_LEN;
1396
1397         if (bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)
1398                 reg_len += sizeof(struct pcie_ctx_hw_stats);
1399
1400         return reg_len;
1401 }
1402
1403 static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1404                           void *_p)
1405 {
1406         struct pcie_ctx_hw_stats *hw_pcie_stats;
1407         struct hwrm_pcie_qstats_input *req;
1408         struct bnxt *bp = netdev_priv(dev);
1409         dma_addr_t hw_pcie_stats_addr;
1410         int rc;
1411
1412         regs->version = 0;
1413         bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p);
1414
1415         if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
1416                 return;
1417
1418         if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS))
1419                 return;
1420
1421         hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats),
1422                                            &hw_pcie_stats_addr);
1423         if (!hw_pcie_stats) {
1424                 hwrm_req_drop(bp, req);
1425                 return;
1426         }
1427
1428         regs->version = 1;
1429         hwrm_req_hold(bp, req); /* hold on to slice */
1430         req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats));
1431         req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
1432         rc = hwrm_req_send(bp, req);
1433         if (!rc) {
1434                 __le64 *src = (__le64 *)hw_pcie_stats;
1435                 u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN);
1436                 int i;
1437
1438                 for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++)
1439                         dst[i] = le64_to_cpu(src[i]);
1440         }
1441         hwrm_req_drop(bp, req);
1442 }
1443
1444 static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1445 {
1446         struct bnxt *bp = netdev_priv(dev);
1447
1448         wol->supported = 0;
1449         wol->wolopts = 0;
1450         memset(&wol->sopass, 0, sizeof(wol->sopass));
1451         if (bp->flags & BNXT_FLAG_WOL_CAP) {
1452                 wol->supported = WAKE_MAGIC;
1453                 if (bp->wol)
1454                         wol->wolopts = WAKE_MAGIC;
1455         }
1456 }
1457
1458 static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1459 {
1460         struct bnxt *bp = netdev_priv(dev);
1461
1462         if (wol->wolopts & ~WAKE_MAGIC)
1463                 return -EINVAL;
1464
1465         if (wol->wolopts & WAKE_MAGIC) {
1466                 if (!(bp->flags & BNXT_FLAG_WOL_CAP))
1467                         return -EINVAL;
1468                 if (!bp->wol) {
1469                         if (bnxt_hwrm_alloc_wol_fltr(bp))
1470                                 return -EBUSY;
1471                         bp->wol = 1;
1472                 }
1473         } else {
1474                 if (bp->wol) {
1475                         if (bnxt_hwrm_free_wol_fltr(bp))
1476                                 return -EBUSY;
1477                         bp->wol = 0;
1478                 }
1479         }
1480         return 0;
1481 }
1482
1483 u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
1484 {
1485         u32 speed_mask = 0;
1486
1487         /* TODO: support 25GB, 40GB, 50GB with different cable type */
1488         /* set the advertised speeds */
1489         if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB)
1490                 speed_mask |= ADVERTISED_100baseT_Full;
1491         if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB)
1492                 speed_mask |= ADVERTISED_1000baseT_Full;
1493         if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB)
1494                 speed_mask |= ADVERTISED_2500baseX_Full;
1495         if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
1496                 speed_mask |= ADVERTISED_10000baseT_Full;
1497         if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
1498                 speed_mask |= ADVERTISED_40000baseCR4_Full;
1499
1500         if ((fw_pause & BNXT_LINK_PAUSE_BOTH) == BNXT_LINK_PAUSE_BOTH)
1501                 speed_mask |= ADVERTISED_Pause;
1502         else if (fw_pause & BNXT_LINK_PAUSE_TX)
1503                 speed_mask |= ADVERTISED_Asym_Pause;
1504         else if (fw_pause & BNXT_LINK_PAUSE_RX)
1505                 speed_mask |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1506
1507         return speed_mask;
1508 }
1509
1510 #define BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, name)\
1511 {                                                                       \
1512         if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100MB)                    \
1513                 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1514                                                      100baseT_Full);    \
1515         if ((fw_speeds) & BNXT_LINK_SPEED_MSK_1GB)                      \
1516                 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1517                                                      1000baseT_Full);   \
1518         if ((fw_speeds) & BNXT_LINK_SPEED_MSK_10GB)                     \
1519                 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1520                                                      10000baseT_Full);  \
1521         if ((fw_speeds) & BNXT_LINK_SPEED_MSK_25GB)                     \
1522                 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1523                                                      25000baseCR_Full); \
1524         if ((fw_speeds) & BNXT_LINK_SPEED_MSK_40GB)                     \
1525                 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1526                                                      40000baseCR4_Full);\
1527         if ((fw_speeds) & BNXT_LINK_SPEED_MSK_50GB)                     \
1528                 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1529                                                      50000baseCR2_Full);\
1530         if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100GB)                    \
1531                 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1532                                                      100000baseCR4_Full);\
1533         if ((fw_pause) & BNXT_LINK_PAUSE_RX) {                          \
1534                 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1535                                                      Pause);            \
1536                 if (!((fw_pause) & BNXT_LINK_PAUSE_TX))                 \
1537                         ethtool_link_ksettings_add_link_mode(           \
1538                                         lk_ksettings, name, Asym_Pause);\
1539         } else if ((fw_pause) & BNXT_LINK_PAUSE_TX) {                   \
1540                 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1541                                                      Asym_Pause);       \
1542         }                                                               \
1543 }
1544
1545 #define BNXT_ETHTOOL_TO_FW_SPDS(fw_speeds, lk_ksettings, name)          \
1546 {                                                                       \
1547         if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
1548                                                   100baseT_Full) ||     \
1549             ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
1550                                                   100baseT_Half))       \
1551                 (fw_speeds) |= BNXT_LINK_SPEED_MSK_100MB;               \
1552         if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
1553                                                   1000baseT_Full) ||    \
1554             ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
1555                                                   1000baseT_Half))      \
1556                 (fw_speeds) |= BNXT_LINK_SPEED_MSK_1GB;                 \
1557         if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
1558                                                   10000baseT_Full))     \
1559                 (fw_speeds) |= BNXT_LINK_SPEED_MSK_10GB;                \
1560         if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
1561                                                   25000baseCR_Full))    \
1562                 (fw_speeds) |= BNXT_LINK_SPEED_MSK_25GB;                \
1563         if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
1564                                                   40000baseCR4_Full))   \
1565                 (fw_speeds) |= BNXT_LINK_SPEED_MSK_40GB;                \
1566         if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
1567                                                   50000baseCR2_Full))   \
1568                 (fw_speeds) |= BNXT_LINK_SPEED_MSK_50GB;                \
1569         if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
1570                                                   100000baseCR4_Full))  \
1571                 (fw_speeds) |= BNXT_LINK_SPEED_MSK_100GB;               \
1572 }
1573
1574 #define BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, name)     \
1575 {                                                                       \
1576         if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_50GB)                \
1577                 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1578                                                      50000baseCR_Full); \
1579         if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_100GB)               \
1580                 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1581                                                      100000baseCR2_Full);\
1582         if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_200GB)               \
1583                 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1584                                                      200000baseCR4_Full);\
1585 }
1586
1587 #define BNXT_ETHTOOL_TO_FW_PAM4_SPDS(fw_speeds, lk_ksettings, name)     \
1588 {                                                                       \
1589         if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
1590                                                   50000baseCR_Full))    \
1591                 (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_50GB;           \
1592         if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
1593                                                   100000baseCR2_Full))  \
1594                 (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_100GB;          \
1595         if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name,   \
1596                                                   200000baseCR4_Full))  \
1597                 (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_200GB;          \
1598 }
1599
1600 static void bnxt_fw_to_ethtool_advertised_fec(struct bnxt_link_info *link_info,
1601                                 struct ethtool_link_ksettings *lk_ksettings)
1602 {
1603         u16 fec_cfg = link_info->fec_cfg;
1604
1605         if ((fec_cfg & BNXT_FEC_NONE) || !(fec_cfg & BNXT_FEC_AUTONEG)) {
1606                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
1607                                  lk_ksettings->link_modes.advertising);
1608                 return;
1609         }
1610         if (fec_cfg & BNXT_FEC_ENC_BASE_R)
1611                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1612                                  lk_ksettings->link_modes.advertising);
1613         if (fec_cfg & BNXT_FEC_ENC_RS)
1614                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1615                                  lk_ksettings->link_modes.advertising);
1616         if (fec_cfg & BNXT_FEC_ENC_LLRS)
1617                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
1618                                  lk_ksettings->link_modes.advertising);
1619 }
1620
1621 static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info,
1622                                 struct ethtool_link_ksettings *lk_ksettings)
1623 {
1624         u16 fw_speeds = link_info->advertising;
1625         u8 fw_pause = 0;
1626
1627         if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
1628                 fw_pause = link_info->auto_pause_setting;
1629
1630         BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, advertising);
1631         fw_speeds = link_info->advertising_pam4;
1632         BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, advertising);
1633         bnxt_fw_to_ethtool_advertised_fec(link_info, lk_ksettings);
1634 }
1635
1636 static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info,
1637                                 struct ethtool_link_ksettings *lk_ksettings)
1638 {
1639         u16 fw_speeds = link_info->lp_auto_link_speeds;
1640         u8 fw_pause = 0;
1641
1642         if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
1643                 fw_pause = link_info->lp_pause;
1644
1645         BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings,
1646                                 lp_advertising);
1647         fw_speeds = link_info->lp_auto_pam4_link_speeds;
1648         BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, lp_advertising);
1649 }
1650
1651 static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info,
1652                                 struct ethtool_link_ksettings *lk_ksettings)
1653 {
1654         u16 fec_cfg = link_info->fec_cfg;
1655
1656         if (fec_cfg & BNXT_FEC_NONE) {
1657                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
1658                                  lk_ksettings->link_modes.supported);
1659                 return;
1660         }
1661         if (fec_cfg & BNXT_FEC_ENC_BASE_R_CAP)
1662                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1663                                  lk_ksettings->link_modes.supported);
1664         if (fec_cfg & BNXT_FEC_ENC_RS_CAP)
1665                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1666                                  lk_ksettings->link_modes.supported);
1667         if (fec_cfg & BNXT_FEC_ENC_LLRS_CAP)
1668                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
1669                                  lk_ksettings->link_modes.supported);
1670 }
1671
1672 static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info,
1673                                 struct ethtool_link_ksettings *lk_ksettings)
1674 {
1675         struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
1676         u16 fw_speeds = link_info->support_speeds;
1677
1678         BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported);
1679         fw_speeds = link_info->support_pam4_speeds;
1680         BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, supported);
1681
1682         if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) {
1683                 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1684                                                      Pause);
1685                 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1686                                                      Asym_Pause);
1687         }
1688
1689         if (link_info->support_auto_speeds ||
1690             link_info->support_pam4_auto_speeds)
1691                 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1692                                                      Autoneg);
1693         bnxt_fw_to_ethtool_support_fec(link_info, lk_ksettings);
1694 }
1695
1696 u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
1697 {
1698         switch (fw_link_speed) {
1699         case BNXT_LINK_SPEED_100MB:
1700                 return SPEED_100;
1701         case BNXT_LINK_SPEED_1GB:
1702                 return SPEED_1000;
1703         case BNXT_LINK_SPEED_2_5GB:
1704                 return SPEED_2500;
1705         case BNXT_LINK_SPEED_10GB:
1706                 return SPEED_10000;
1707         case BNXT_LINK_SPEED_20GB:
1708                 return SPEED_20000;
1709         case BNXT_LINK_SPEED_25GB:
1710                 return SPEED_25000;
1711         case BNXT_LINK_SPEED_40GB:
1712                 return SPEED_40000;
1713         case BNXT_LINK_SPEED_50GB:
1714                 return SPEED_50000;
1715         case BNXT_LINK_SPEED_100GB:
1716                 return SPEED_100000;
1717         default:
1718                 return SPEED_UNKNOWN;
1719         }
1720 }
1721
1722 static int bnxt_get_link_ksettings(struct net_device *dev,
1723                                    struct ethtool_link_ksettings *lk_ksettings)
1724 {
1725         struct bnxt *bp = netdev_priv(dev);
1726         struct bnxt_link_info *link_info = &bp->link_info;
1727         struct ethtool_link_settings *base = &lk_ksettings->base;
1728         u32 ethtool_speed;
1729
1730         ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
1731         mutex_lock(&bp->link_lock);
1732         bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings);
1733
1734         ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
1735         if (link_info->autoneg) {
1736                 bnxt_fw_to_ethtool_advertised_spds(link_info, lk_ksettings);
1737                 ethtool_link_ksettings_add_link_mode(lk_ksettings,
1738                                                      advertising, Autoneg);
1739                 base->autoneg = AUTONEG_ENABLE;
1740                 base->duplex = DUPLEX_UNKNOWN;
1741                 if (link_info->phy_link_status == BNXT_LINK_LINK) {
1742                         bnxt_fw_to_ethtool_lp_adv(link_info, lk_ksettings);
1743                         if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
1744                                 base->duplex = DUPLEX_FULL;
1745                         else
1746                                 base->duplex = DUPLEX_HALF;
1747                 }
1748                 ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
1749         } else {
1750                 base->autoneg = AUTONEG_DISABLE;
1751                 ethtool_speed =
1752                         bnxt_fw_to_ethtool_speed(link_info->req_link_speed);
1753                 base->duplex = DUPLEX_HALF;
1754                 if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL)
1755                         base->duplex = DUPLEX_FULL;
1756         }
1757         base->speed = ethtool_speed;
1758
1759         base->port = PORT_NONE;
1760         if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
1761                 base->port = PORT_TP;
1762                 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1763                                                      TP);
1764                 ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising,
1765                                                      TP);
1766         } else {
1767                 ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
1768                                                      FIBRE);
1769                 ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising,
1770                                                      FIBRE);
1771
1772                 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
1773                         base->port = PORT_DA;
1774                 else if (link_info->media_type ==
1775                          PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE)
1776                         base->port = PORT_FIBRE;
1777         }
1778         base->phy_address = link_info->phy_addr;
1779         mutex_unlock(&bp->link_lock);
1780
1781         return 0;
1782 }
1783
1784 static int bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed)
1785 {
1786         struct bnxt *bp = netdev_priv(dev);
1787         struct bnxt_link_info *link_info = &bp->link_info;
1788         u16 support_pam4_spds = link_info->support_pam4_speeds;
1789         u16 support_spds = link_info->support_speeds;
1790         u8 sig_mode = BNXT_SIG_MODE_NRZ;
1791         u16 fw_speed = 0;
1792
1793         switch (ethtool_speed) {
1794         case SPEED_100:
1795                 if (support_spds & BNXT_LINK_SPEED_MSK_100MB)
1796                         fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB;
1797                 break;
1798         case SPEED_1000:
1799                 if (support_spds & BNXT_LINK_SPEED_MSK_1GB)
1800                         fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
1801                 break;
1802         case SPEED_2500:
1803                 if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB)
1804                         fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB;
1805                 break;
1806         case SPEED_10000:
1807                 if (support_spds & BNXT_LINK_SPEED_MSK_10GB)
1808                         fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
1809                 break;
1810         case SPEED_20000:
1811                 if (support_spds & BNXT_LINK_SPEED_MSK_20GB)
1812                         fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB;
1813                 break;
1814         case SPEED_25000:
1815                 if (support_spds & BNXT_LINK_SPEED_MSK_25GB)
1816                         fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
1817                 break;
1818         case SPEED_40000:
1819                 if (support_spds & BNXT_LINK_SPEED_MSK_40GB)
1820                         fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
1821                 break;
1822         case SPEED_50000:
1823                 if (support_spds & BNXT_LINK_SPEED_MSK_50GB) {
1824                         fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
1825                 } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_50GB) {
1826                         fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB;
1827                         sig_mode = BNXT_SIG_MODE_PAM4;
1828                 }
1829                 break;
1830         case SPEED_100000:
1831                 if (support_spds & BNXT_LINK_SPEED_MSK_100GB) {
1832                         fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB;
1833                 } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_100GB) {
1834                         fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB;
1835                         sig_mode = BNXT_SIG_MODE_PAM4;
1836                 }
1837                 break;
1838         case SPEED_200000:
1839                 if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_200GB) {
1840                         fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB;
1841                         sig_mode = BNXT_SIG_MODE_PAM4;
1842                 }
1843                 break;
1844         }
1845
1846         if (!fw_speed) {
1847                 netdev_err(dev, "unsupported speed!\n");
1848                 return -EINVAL;
1849         }
1850
1851         if (link_info->req_link_speed == fw_speed &&
1852             link_info->req_signal_mode == sig_mode &&
1853             link_info->autoneg == 0)
1854                 return -EALREADY;
1855
1856         link_info->req_link_speed = fw_speed;
1857         link_info->req_signal_mode = sig_mode;
1858         link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
1859         link_info->autoneg = 0;
1860         link_info->advertising = 0;
1861         link_info->advertising_pam4 = 0;
1862
1863         return 0;
1864 }
1865
1866 u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
1867 {
1868         u16 fw_speed_mask = 0;
1869
1870         /* only support autoneg at speed 100, 1000, and 10000 */
1871         if (advertising & (ADVERTISED_100baseT_Full |
1872                            ADVERTISED_100baseT_Half)) {
1873                 fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB;
1874         }
1875         if (advertising & (ADVERTISED_1000baseT_Full |
1876                            ADVERTISED_1000baseT_Half)) {
1877                 fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB;
1878         }
1879         if (advertising & ADVERTISED_10000baseT_Full)
1880                 fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
1881
1882         if (advertising & ADVERTISED_40000baseCR4_Full)
1883                 fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB;
1884
1885         return fw_speed_mask;
1886 }
1887
1888 static int bnxt_set_link_ksettings(struct net_device *dev,
1889                            const struct ethtool_link_ksettings *lk_ksettings)
1890 {
1891         struct bnxt *bp = netdev_priv(dev);
1892         struct bnxt_link_info *link_info = &bp->link_info;
1893         const struct ethtool_link_settings *base = &lk_ksettings->base;
1894         bool set_pause = false;
1895         u32 speed;
1896         int rc = 0;
1897
1898         if (!BNXT_PHY_CFG_ABLE(bp))
1899                 return -EOPNOTSUPP;
1900
1901         mutex_lock(&bp->link_lock);
1902         if (base->autoneg == AUTONEG_ENABLE) {
1903                 link_info->advertising = 0;
1904                 link_info->advertising_pam4 = 0;
1905                 BNXT_ETHTOOL_TO_FW_SPDS(link_info->advertising, lk_ksettings,
1906                                         advertising);
1907                 BNXT_ETHTOOL_TO_FW_PAM4_SPDS(link_info->advertising_pam4,
1908                                              lk_ksettings, advertising);
1909                 link_info->autoneg |= BNXT_AUTONEG_SPEED;
1910                 if (!link_info->advertising && !link_info->advertising_pam4) {
1911                         link_info->advertising = link_info->support_auto_speeds;
1912                         link_info->advertising_pam4 =
1913                                 link_info->support_pam4_auto_speeds;
1914                 }
1915                 /* any change to autoneg will cause link change, therefore the
1916                  * driver should put back the original pause setting in autoneg
1917                  */
1918                 if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
1919                         set_pause = true;
1920         } else {
1921                 u8 phy_type = link_info->phy_type;
1922
1923                 if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET  ||
1924                     phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE ||
1925                     link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
1926                         netdev_err(dev, "10GBase-T devices must autoneg\n");
1927                         rc = -EINVAL;
1928                         goto set_setting_exit;
1929                 }
1930                 if (base->duplex == DUPLEX_HALF) {
1931                         netdev_err(dev, "HALF DUPLEX is not supported!\n");
1932                         rc = -EINVAL;
1933                         goto set_setting_exit;
1934                 }
1935                 speed = base->speed;
1936                 rc = bnxt_force_link_speed(dev, speed);
1937                 if (rc) {
1938                         if (rc == -EALREADY)
1939                                 rc = 0;
1940                         goto set_setting_exit;
1941                 }
1942         }
1943
1944         if (netif_running(dev))
1945                 rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
1946
1947 set_setting_exit:
1948         mutex_unlock(&bp->link_lock);
1949         return rc;
1950 }
1951
1952 static int bnxt_get_fecparam(struct net_device *dev,
1953                              struct ethtool_fecparam *fec)
1954 {
1955         struct bnxt *bp = netdev_priv(dev);
1956         struct bnxt_link_info *link_info;
1957         u8 active_fec;
1958         u16 fec_cfg;
1959
1960         link_info = &bp->link_info;
1961         fec_cfg = link_info->fec_cfg;
1962         active_fec = link_info->active_fec_sig_mode &
1963                      PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
1964         if (fec_cfg & BNXT_FEC_NONE) {
1965                 fec->fec = ETHTOOL_FEC_NONE;
1966                 fec->active_fec = ETHTOOL_FEC_NONE;
1967                 return 0;
1968         }
1969         if (fec_cfg & BNXT_FEC_AUTONEG)
1970                 fec->fec |= ETHTOOL_FEC_AUTO;
1971         if (fec_cfg & BNXT_FEC_ENC_BASE_R)
1972                 fec->fec |= ETHTOOL_FEC_BASER;
1973         if (fec_cfg & BNXT_FEC_ENC_RS)
1974                 fec->fec |= ETHTOOL_FEC_RS;
1975         if (fec_cfg & BNXT_FEC_ENC_LLRS)
1976                 fec->fec |= ETHTOOL_FEC_LLRS;
1977
1978         switch (active_fec) {
1979         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
1980                 fec->active_fec |= ETHTOOL_FEC_BASER;
1981                 break;
1982         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
1983         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
1984         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
1985                 fec->active_fec |= ETHTOOL_FEC_RS;
1986                 break;
1987         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
1988         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
1989                 fec->active_fec |= ETHTOOL_FEC_LLRS;
1990                 break;
1991         case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
1992                 fec->active_fec |= ETHTOOL_FEC_OFF;
1993                 break;
1994         }
1995         return 0;
1996 }
1997
1998 static void bnxt_get_fec_stats(struct net_device *dev,
1999                                struct ethtool_fec_stats *fec_stats)
2000 {
2001         struct bnxt *bp = netdev_priv(dev);
2002         u64 *rx;
2003
2004         if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
2005                 return;
2006
2007         rx = bp->rx_port_stats_ext.sw_stats;
2008         fec_stats->corrected_bits.total =
2009                 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_corrected_bits));
2010
2011         if (bp->fw_rx_stats_ext_size <= BNXT_RX_STATS_EXT_NUM_LEGACY)
2012                 return;
2013
2014         fec_stats->corrected_blocks.total =
2015                 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_corrected_blocks));
2016         fec_stats->uncorrectable_blocks.total =
2017                 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_uncorrectable_blocks));
2018 }
2019
2020 static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info,
2021                                          u32 fec)
2022 {
2023         u32 fw_fec = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE;
2024
2025         if (fec & ETHTOOL_FEC_BASER)
2026                 fw_fec |= BNXT_FEC_BASE_R_ON(link_info);
2027         else if (fec & ETHTOOL_FEC_RS)
2028                 fw_fec |= BNXT_FEC_RS_ON(link_info);
2029         else if (fec & ETHTOOL_FEC_LLRS)
2030                 fw_fec |= BNXT_FEC_LLRS_ON;
2031         return fw_fec;
2032 }
2033
2034 static int bnxt_set_fecparam(struct net_device *dev,
2035                              struct ethtool_fecparam *fecparam)
2036 {
2037         struct hwrm_port_phy_cfg_input *req;
2038         struct bnxt *bp = netdev_priv(dev);
2039         struct bnxt_link_info *link_info;
2040         u32 new_cfg, fec = fecparam->fec;
2041         u16 fec_cfg;
2042         int rc;
2043
2044         link_info = &bp->link_info;
2045         fec_cfg = link_info->fec_cfg;
2046         if (fec_cfg & BNXT_FEC_NONE)
2047                 return -EOPNOTSUPP;
2048
2049         if (fec & ETHTOOL_FEC_OFF) {
2050                 new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE |
2051                           BNXT_FEC_ALL_OFF(link_info);
2052                 goto apply_fec;
2053         }
2054         if (((fec & ETHTOOL_FEC_AUTO) && !(fec_cfg & BNXT_FEC_AUTONEG_CAP)) ||
2055             ((fec & ETHTOOL_FEC_RS) && !(fec_cfg & BNXT_FEC_ENC_RS_CAP)) ||
2056             ((fec & ETHTOOL_FEC_LLRS) && !(fec_cfg & BNXT_FEC_ENC_LLRS_CAP)) ||
2057             ((fec & ETHTOOL_FEC_BASER) && !(fec_cfg & BNXT_FEC_ENC_BASE_R_CAP)))
2058                 return -EINVAL;
2059
2060         if (fec & ETHTOOL_FEC_AUTO) {
2061                 if (!link_info->autoneg)
2062                         return -EINVAL;
2063                 new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE;
2064         } else {
2065                 new_cfg = bnxt_ethtool_forced_fec_to_fw(link_info, fec);
2066         }
2067
2068 apply_fec:
2069         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
2070         if (rc)
2071                 return rc;
2072         req->flags = cpu_to_le32(new_cfg | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
2073         rc = hwrm_req_send(bp, req);
2074         /* update current settings */
2075         if (!rc) {
2076                 mutex_lock(&bp->link_lock);
2077                 bnxt_update_link(bp, false);
2078                 mutex_unlock(&bp->link_lock);
2079         }
2080         return rc;
2081 }
2082
2083 static void bnxt_get_pauseparam(struct net_device *dev,
2084                                 struct ethtool_pauseparam *epause)
2085 {
2086         struct bnxt *bp = netdev_priv(dev);
2087         struct bnxt_link_info *link_info = &bp->link_info;
2088
2089         if (BNXT_VF(bp))
2090                 return;
2091         epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL);
2092         epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX);
2093         epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX);
2094 }
2095
2096 static void bnxt_get_pause_stats(struct net_device *dev,
2097                                  struct ethtool_pause_stats *epstat)
2098 {
2099         struct bnxt *bp = netdev_priv(dev);
2100         u64 *rx, *tx;
2101
2102         if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
2103                 return;
2104
2105         rx = bp->port_stats.sw_stats;
2106         tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
2107
2108         epstat->rx_pause_frames = BNXT_GET_RX_PORT_STATS64(rx, rx_pause_frames);
2109         epstat->tx_pause_frames = BNXT_GET_TX_PORT_STATS64(tx, tx_pause_frames);
2110 }
2111
2112 static int bnxt_set_pauseparam(struct net_device *dev,
2113                                struct ethtool_pauseparam *epause)
2114 {
2115         int rc = 0;
2116         struct bnxt *bp = netdev_priv(dev);
2117         struct bnxt_link_info *link_info = &bp->link_info;
2118
2119         if (!BNXT_PHY_CFG_ABLE(bp) || (bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
2120                 return -EOPNOTSUPP;
2121
2122         mutex_lock(&bp->link_lock);
2123         if (epause->autoneg) {
2124                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
2125                         rc = -EINVAL;
2126                         goto pause_exit;
2127                 }
2128
2129                 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
2130                 link_info->req_flow_ctrl = 0;
2131         } else {
2132                 /* when transition from auto pause to force pause,
2133                  * force a link change
2134                  */
2135                 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
2136                         link_info->force_link_chng = true;
2137                 link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL;
2138                 link_info->req_flow_ctrl = 0;
2139         }
2140         if (epause->rx_pause)
2141                 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX;
2142
2143         if (epause->tx_pause)
2144                 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
2145
2146         if (netif_running(dev))
2147                 rc = bnxt_hwrm_set_pause(bp);
2148
2149 pause_exit:
2150         mutex_unlock(&bp->link_lock);
2151         return rc;
2152 }
2153
2154 static u32 bnxt_get_link(struct net_device *dev)
2155 {
2156         struct bnxt *bp = netdev_priv(dev);
2157
2158         /* TODO: handle MF, VF, driver close case */
2159         return BNXT_LINK_IS_UP(bp);
2160 }
2161
2162 int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
2163                                struct hwrm_nvm_get_dev_info_output *nvm_dev_info)
2164 {
2165         struct hwrm_nvm_get_dev_info_output *resp;
2166         struct hwrm_nvm_get_dev_info_input *req;
2167         int rc;
2168
2169         if (BNXT_VF(bp))
2170                 return -EOPNOTSUPP;
2171
2172         rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DEV_INFO);
2173         if (rc)
2174                 return rc;
2175
2176         resp = hwrm_req_hold(bp, req);
2177         rc = hwrm_req_send(bp, req);
2178         if (!rc)
2179                 memcpy(nvm_dev_info, resp, sizeof(*resp));
2180         hwrm_req_drop(bp, req);
2181         return rc;
2182 }
2183
2184 static void bnxt_print_admin_err(struct bnxt *bp)
2185 {
2186         netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n");
2187 }
2188
2189 int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
2190                          u16 ext, u16 *index, u32 *item_length,
2191                          u32 *data_length);
2192
2193 int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
2194                      u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
2195                      u32 dir_item_len, const u8 *data,
2196                      size_t data_len)
2197 {
2198         struct bnxt *bp = netdev_priv(dev);
2199         struct hwrm_nvm_write_input *req;
2200         int rc;
2201
2202         rc = hwrm_req_init(bp, req, HWRM_NVM_WRITE);
2203         if (rc)
2204                 return rc;
2205
2206         if (data_len && data) {
2207                 dma_addr_t dma_handle;
2208                 u8 *kmem;
2209
2210                 kmem = hwrm_req_dma_slice(bp, req, data_len, &dma_handle);
2211                 if (!kmem) {
2212                         hwrm_req_drop(bp, req);
2213                         return -ENOMEM;
2214                 }
2215
2216                 req->dir_data_length = cpu_to_le32(data_len);
2217
2218                 memcpy(kmem, data, data_len);
2219                 req->host_src_addr = cpu_to_le64(dma_handle);
2220         }
2221
2222         hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout);
2223         req->dir_type = cpu_to_le16(dir_type);
2224         req->dir_ordinal = cpu_to_le16(dir_ordinal);
2225         req->dir_ext = cpu_to_le16(dir_ext);
2226         req->dir_attr = cpu_to_le16(dir_attr);
2227         req->dir_item_length = cpu_to_le32(dir_item_len);
2228         rc = hwrm_req_send(bp, req);
2229
2230         if (rc == -EACCES)
2231                 bnxt_print_admin_err(bp);
2232         return rc;
2233 }
2234
2235 int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
2236                              u8 self_reset, u8 flags)
2237 {
2238         struct bnxt *bp = netdev_priv(dev);
2239         struct hwrm_fw_reset_input *req;
2240         int rc;
2241
2242         if (!bnxt_hwrm_reset_permitted(bp)) {
2243                 netdev_warn(bp->dev, "Reset denied by firmware, it may be inhibited by remote driver");
2244                 return -EPERM;
2245         }
2246
2247         rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
2248         if (rc)
2249                 return rc;
2250
2251         req->embedded_proc_type = proc_type;
2252         req->selfrst_status = self_reset;
2253         req->flags = flags;
2254
2255         if (proc_type == FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP) {
2256                 rc = hwrm_req_send_silent(bp, req);
2257         } else {
2258                 rc = hwrm_req_send(bp, req);
2259                 if (rc == -EACCES)
2260                         bnxt_print_admin_err(bp);
2261         }
2262         return rc;
2263 }
2264
2265 static int bnxt_firmware_reset(struct net_device *dev,
2266                                enum bnxt_nvm_directory_type dir_type)
2267 {
2268         u8 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE;
2269         u8 proc_type, flags = 0;
2270
2271         /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */
2272         /*       (e.g. when firmware isn't already running) */
2273         switch (dir_type) {
2274         case BNX_DIR_TYPE_CHIMP_PATCH:
2275         case BNX_DIR_TYPE_BOOTCODE:
2276         case BNX_DIR_TYPE_BOOTCODE_2:
2277                 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT;
2278                 /* Self-reset ChiMP upon next PCIe reset: */
2279                 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
2280                 break;
2281         case BNX_DIR_TYPE_APE_FW:
2282         case BNX_DIR_TYPE_APE_PATCH:
2283                 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
2284                 /* Self-reset APE upon next PCIe reset: */
2285                 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
2286                 break;
2287         case BNX_DIR_TYPE_KONG_FW:
2288         case BNX_DIR_TYPE_KONG_PATCH:
2289                 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL;
2290                 break;
2291         case BNX_DIR_TYPE_BONO_FW:
2292         case BNX_DIR_TYPE_BONO_PATCH:
2293                 proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE;
2294                 break;
2295         default:
2296                 return -EINVAL;
2297         }
2298
2299         return bnxt_hwrm_firmware_reset(dev, proc_type, self_reset, flags);
2300 }
2301
2302 static int bnxt_firmware_reset_chip(struct net_device *dev)
2303 {
2304         struct bnxt *bp = netdev_priv(dev);
2305         u8 flags = 0;
2306
2307         if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
2308                 flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
2309
2310         return bnxt_hwrm_firmware_reset(dev,
2311                                         FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP,
2312                                         FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP,
2313                                         flags);
2314 }
2315
2316 static int bnxt_firmware_reset_ap(struct net_device *dev)
2317 {
2318         return bnxt_hwrm_firmware_reset(dev, FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP,
2319                                         FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE,
2320                                         0);
2321 }
2322
2323 static int bnxt_flash_firmware(struct net_device *dev,
2324                                u16 dir_type,
2325                                const u8 *fw_data,
2326                                size_t fw_size)
2327 {
2328         int     rc = 0;
2329         u16     code_type;
2330         u32     stored_crc;
2331         u32     calculated_crc;
2332         struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data;
2333
2334         switch (dir_type) {
2335         case BNX_DIR_TYPE_BOOTCODE:
2336         case BNX_DIR_TYPE_BOOTCODE_2:
2337                 code_type = CODE_BOOT;
2338                 break;
2339         case BNX_DIR_TYPE_CHIMP_PATCH:
2340                 code_type = CODE_CHIMP_PATCH;
2341                 break;
2342         case BNX_DIR_TYPE_APE_FW:
2343                 code_type = CODE_MCTP_PASSTHRU;
2344                 break;
2345         case BNX_DIR_TYPE_APE_PATCH:
2346                 code_type = CODE_APE_PATCH;
2347                 break;
2348         case BNX_DIR_TYPE_KONG_FW:
2349                 code_type = CODE_KONG_FW;
2350                 break;
2351         case BNX_DIR_TYPE_KONG_PATCH:
2352                 code_type = CODE_KONG_PATCH;
2353                 break;
2354         case BNX_DIR_TYPE_BONO_FW:
2355                 code_type = CODE_BONO_FW;
2356                 break;
2357         case BNX_DIR_TYPE_BONO_PATCH:
2358                 code_type = CODE_BONO_PATCH;
2359                 break;
2360         default:
2361                 netdev_err(dev, "Unsupported directory entry type: %u\n",
2362                            dir_type);
2363                 return -EINVAL;
2364         }
2365         if (fw_size < sizeof(struct bnxt_fw_header)) {
2366                 netdev_err(dev, "Invalid firmware file size: %u\n",
2367                            (unsigned int)fw_size);
2368                 return -EINVAL;
2369         }
2370         if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) {
2371                 netdev_err(dev, "Invalid firmware signature: %08X\n",
2372                            le32_to_cpu(header->signature));
2373                 return -EINVAL;
2374         }
2375         if (header->code_type != code_type) {
2376                 netdev_err(dev, "Expected firmware type: %d, read: %d\n",
2377                            code_type, header->code_type);
2378                 return -EINVAL;
2379         }
2380         if (header->device != DEVICE_CUMULUS_FAMILY) {
2381                 netdev_err(dev, "Expected firmware device family %d, read: %d\n",
2382                            DEVICE_CUMULUS_FAMILY, header->device);
2383                 return -EINVAL;
2384         }
2385         /* Confirm the CRC32 checksum of the file: */
2386         stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
2387                                              sizeof(stored_crc)));
2388         calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
2389         if (calculated_crc != stored_crc) {
2390                 netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n",
2391                            (unsigned long)stored_crc,
2392                            (unsigned long)calculated_crc);
2393                 return -EINVAL;
2394         }
2395         rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
2396                               0, 0, 0, fw_data, fw_size);
2397         if (rc == 0)    /* Firmware update successful */
2398                 rc = bnxt_firmware_reset(dev, dir_type);
2399
2400         return rc;
2401 }
2402
2403 static int bnxt_flash_microcode(struct net_device *dev,
2404                                 u16 dir_type,
2405                                 const u8 *fw_data,
2406                                 size_t fw_size)
2407 {
2408         struct bnxt_ucode_trailer *trailer;
2409         u32 calculated_crc;
2410         u32 stored_crc;
2411         int rc = 0;
2412
2413         if (fw_size < sizeof(struct bnxt_ucode_trailer)) {
2414                 netdev_err(dev, "Invalid microcode file size: %u\n",
2415                            (unsigned int)fw_size);
2416                 return -EINVAL;
2417         }
2418         trailer = (struct bnxt_ucode_trailer *)(fw_data + (fw_size -
2419                                                 sizeof(*trailer)));
2420         if (trailer->sig != cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE)) {
2421                 netdev_err(dev, "Invalid microcode trailer signature: %08X\n",
2422                            le32_to_cpu(trailer->sig));
2423                 return -EINVAL;
2424         }
2425         if (le16_to_cpu(trailer->dir_type) != dir_type) {
2426                 netdev_err(dev, "Expected microcode type: %d, read: %d\n",
2427                            dir_type, le16_to_cpu(trailer->dir_type));
2428                 return -EINVAL;
2429         }
2430         if (le16_to_cpu(trailer->trailer_length) <
2431                 sizeof(struct bnxt_ucode_trailer)) {
2432                 netdev_err(dev, "Invalid microcode trailer length: %d\n",
2433                            le16_to_cpu(trailer->trailer_length));
2434                 return -EINVAL;
2435         }
2436
2437         /* Confirm the CRC32 checksum of the file: */
2438         stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size -
2439                                              sizeof(stored_crc)));
2440         calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc));
2441         if (calculated_crc != stored_crc) {
2442                 netdev_err(dev,
2443                            "CRC32 (%08lX) does not match calculated: %08lX\n",
2444                            (unsigned long)stored_crc,
2445                            (unsigned long)calculated_crc);
2446                 return -EINVAL;
2447         }
2448         rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
2449                               0, 0, 0, fw_data, fw_size);
2450
2451         return rc;
2452 }
2453
2454 static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
2455 {
2456         switch (dir_type) {
2457         case BNX_DIR_TYPE_CHIMP_PATCH:
2458         case BNX_DIR_TYPE_BOOTCODE:
2459         case BNX_DIR_TYPE_BOOTCODE_2:
2460         case BNX_DIR_TYPE_APE_FW:
2461         case BNX_DIR_TYPE_APE_PATCH:
2462         case BNX_DIR_TYPE_KONG_FW:
2463         case BNX_DIR_TYPE_KONG_PATCH:
2464         case BNX_DIR_TYPE_BONO_FW:
2465         case BNX_DIR_TYPE_BONO_PATCH:
2466                 return true;
2467         }
2468
2469         return false;
2470 }
2471
2472 static bool bnxt_dir_type_is_other_exec_format(u16 dir_type)
2473 {
2474         switch (dir_type) {
2475         case BNX_DIR_TYPE_AVS:
2476         case BNX_DIR_TYPE_EXP_ROM_MBA:
2477         case BNX_DIR_TYPE_PCIE:
2478         case BNX_DIR_TYPE_TSCF_UCODE:
2479         case BNX_DIR_TYPE_EXT_PHY:
2480         case BNX_DIR_TYPE_CCM:
2481         case BNX_DIR_TYPE_ISCSI_BOOT:
2482         case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
2483         case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
2484                 return true;
2485         }
2486
2487         return false;
2488 }
2489
2490 static bool bnxt_dir_type_is_executable(u16 dir_type)
2491 {
2492         return bnxt_dir_type_is_ape_bin_format(dir_type) ||
2493                 bnxt_dir_type_is_other_exec_format(dir_type);
2494 }
2495
2496 static int bnxt_flash_firmware_from_file(struct net_device *dev,
2497                                          u16 dir_type,
2498                                          const char *filename)
2499 {
2500         const struct firmware  *fw;
2501         int                     rc;
2502
2503         rc = request_firmware(&fw, filename, &dev->dev);
2504         if (rc != 0) {
2505                 netdev_err(dev, "Error %d requesting firmware file: %s\n",
2506                            rc, filename);
2507                 return rc;
2508         }
2509         if (bnxt_dir_type_is_ape_bin_format(dir_type))
2510                 rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size);
2511         else if (bnxt_dir_type_is_other_exec_format(dir_type))
2512                 rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size);
2513         else
2514                 rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST,
2515                                       0, 0, 0, fw->data, fw->size);
2516         release_firmware(fw);
2517         return rc;
2518 }
2519
2520 #define MSG_INTEGRITY_ERR "PKG install error : Data integrity on NVM"
2521 #define MSG_INVALID_PKG "PKG install error : Invalid package"
2522 #define MSG_AUTHENTICATION_ERR "PKG install error : Authentication error"
2523 #define MSG_INVALID_DEV "PKG install error : Invalid device"
2524 #define MSG_INTERNAL_ERR "PKG install error : Internal error"
2525 #define MSG_NO_PKG_UPDATE_AREA_ERR "PKG update area not created in nvram"
2526 #define MSG_NO_SPACE_ERR "PKG insufficient update area in nvram"
2527 #define MSG_RESIZE_UPDATE_ERR "Resize UPDATE entry error"
2528 #define MSG_ANTI_ROLLBACK_ERR "HWRM_NVM_INSTALL_UPDATE failure due to Anti-rollback detected"
2529 #define MSG_GENERIC_FAILURE_ERR "HWRM_NVM_INSTALL_UPDATE failure"
2530
2531 static int nvm_update_err_to_stderr(struct net_device *dev, u8 result,
2532                                     struct netlink_ext_ack *extack)
2533 {
2534         switch (result) {
2535         case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER:
2536         case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER:
2537         case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR:
2538         case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR:
2539         case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND:
2540         case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED:
2541                 BNXT_NVM_ERR_MSG(dev, extack, MSG_INTEGRITY_ERR);
2542                 return -EINVAL;
2543         case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE:
2544         case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER:
2545         case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE:
2546         case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM:
2547         case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH:
2548         case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST:
2549         case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER:
2550         case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM:
2551         case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM:
2552         case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH:
2553         case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE:
2554         case NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM:
2555         case NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM:
2556                 BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_PKG);
2557                 return -ENOPKG;
2558         case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR:
2559                 BNXT_NVM_ERR_MSG(dev, extack, MSG_AUTHENTICATION_ERR);
2560                 return -EPERM;
2561         case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV:
2562         case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID:
2563         case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR:
2564         case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID:
2565         case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM:
2566                 BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_DEV);
2567                 return -EOPNOTSUPP;
2568         default:
2569                 BNXT_NVM_ERR_MSG(dev, extack, MSG_INTERNAL_ERR);
2570                 return -EIO;
2571         }
2572 }
2573
2574 #define BNXT_PKG_DMA_SIZE       0x40000
2575 #define BNXT_NVM_MORE_FLAG      (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_MODE))
2576 #define BNXT_NVM_LAST_FLAG      (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_LAST))
2577
2578 static int bnxt_resize_update_entry(struct net_device *dev, size_t fw_size,
2579                                     struct netlink_ext_ack *extack)
2580 {
2581         u32 item_len;
2582         int rc;
2583
2584         rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
2585                                   BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, NULL,
2586                                   &item_len, NULL);
2587         if (rc) {
2588                 BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR);
2589                 return rc;
2590         }
2591
2592         if (fw_size > item_len) {
2593                 rc = bnxt_flash_nvram(dev, BNX_DIR_TYPE_UPDATE,
2594                                       BNX_DIR_ORDINAL_FIRST, 0, 1,
2595                                       round_up(fw_size, 4096), NULL, 0);
2596                 if (rc) {
2597                         BNXT_NVM_ERR_MSG(dev, extack, MSG_RESIZE_UPDATE_ERR);
2598                         return rc;
2599                 }
2600         }
2601         return 0;
2602 }
2603
2604 int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw,
2605                                    u32 install_type, struct netlink_ext_ack *extack)
2606 {
2607         struct hwrm_nvm_install_update_input *install;
2608         struct hwrm_nvm_install_update_output *resp;
2609         struct hwrm_nvm_modify_input *modify;
2610         struct bnxt *bp = netdev_priv(dev);
2611         bool defrag_attempted = false;
2612         dma_addr_t dma_handle;
2613         u8 *kmem = NULL;
2614         u32 modify_len;
2615         u32 item_len;
2616         u8 cmd_err;
2617         u16 index;
2618         int rc;
2619
2620         /* resize before flashing larger image than available space */
2621         rc = bnxt_resize_update_entry(dev, fw->size, extack);
2622         if (rc)
2623                 return rc;
2624
2625         bnxt_hwrm_fw_set_time(bp);
2626
2627         rc = hwrm_req_init(bp, modify, HWRM_NVM_MODIFY);
2628         if (rc)
2629                 return rc;
2630
2631         /* Try allocating a large DMA buffer first.  Older fw will
2632          * cause excessive NVRAM erases when using small blocks.
2633          */
2634         modify_len = roundup_pow_of_two(fw->size);
2635         modify_len = min_t(u32, modify_len, BNXT_PKG_DMA_SIZE);
2636         while (1) {
2637                 kmem = hwrm_req_dma_slice(bp, modify, modify_len, &dma_handle);
2638                 if (!kmem && modify_len > PAGE_SIZE)
2639                         modify_len /= 2;
2640                 else
2641                         break;
2642         }
2643         if (!kmem) {
2644                 hwrm_req_drop(bp, modify);
2645                 return -ENOMEM;
2646         }
2647
2648         rc = hwrm_req_init(bp, install, HWRM_NVM_INSTALL_UPDATE);
2649         if (rc) {
2650                 hwrm_req_drop(bp, modify);
2651                 return rc;
2652         }
2653
2654         hwrm_req_timeout(bp, modify, bp->hwrm_cmd_max_timeout);
2655         hwrm_req_timeout(bp, install, bp->hwrm_cmd_max_timeout);
2656
2657         hwrm_req_hold(bp, modify);
2658         modify->host_src_addr = cpu_to_le64(dma_handle);
2659
2660         resp = hwrm_req_hold(bp, install);
2661         if ((install_type & 0xffff) == 0)
2662                 install_type >>= 16;
2663         install->install_type = cpu_to_le32(install_type);
2664
2665         do {
2666                 u32 copied = 0, len = modify_len;
2667
2668                 rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
2669                                           BNX_DIR_ORDINAL_FIRST,
2670                                           BNX_DIR_EXT_NONE,
2671                                           &index, &item_len, NULL);
2672                 if (rc) {
2673                         BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR);
2674                         break;
2675                 }
2676                 if (fw->size > item_len) {
2677                         BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_SPACE_ERR);
2678                         rc = -EFBIG;
2679                         break;
2680                 }
2681
2682                 modify->dir_idx = cpu_to_le16(index);
2683
2684                 if (fw->size > modify_len)
2685                         modify->flags = BNXT_NVM_MORE_FLAG;
2686                 while (copied < fw->size) {
2687                         u32 balance = fw->size - copied;
2688
2689                         if (balance <= modify_len) {
2690                                 len = balance;
2691                                 if (copied)
2692                                         modify->flags |= BNXT_NVM_LAST_FLAG;
2693                         }
2694                         memcpy(kmem, fw->data + copied, len);
2695                         modify->len = cpu_to_le32(len);
2696                         modify->offset = cpu_to_le32(copied);
2697                         rc = hwrm_req_send(bp, modify);
2698                         if (rc)
2699                                 goto pkg_abort;
2700                         copied += len;
2701                 }
2702
2703                 rc = hwrm_req_send_silent(bp, install);
2704                 if (!rc)
2705                         break;
2706
2707                 if (defrag_attempted) {
2708                         /* We have tried to defragment already in the previous
2709                          * iteration. Return with the result for INSTALL_UPDATE
2710                          */
2711                         break;
2712                 }
2713
2714                 cmd_err = ((struct hwrm_err_output *)resp)->cmd_err;
2715
2716                 switch (cmd_err) {
2717                 case NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK:
2718                         BNXT_NVM_ERR_MSG(dev, extack, MSG_ANTI_ROLLBACK_ERR);
2719                         rc = -EALREADY;
2720                         break;
2721                 case NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR:
2722                         install->flags =
2723                                 cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
2724
2725                         rc = hwrm_req_send_silent(bp, install);
2726                         if (!rc)
2727                                 break;
2728
2729                         cmd_err = ((struct hwrm_err_output *)resp)->cmd_err;
2730
2731                         if (cmd_err == NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) {
2732                                 /* FW has cleared NVM area, driver will create
2733                                  * UPDATE directory and try the flash again
2734                                  */
2735                                 defrag_attempted = true;
2736                                 install->flags = 0;
2737                                 rc = bnxt_flash_nvram(bp->dev,
2738                                                       BNX_DIR_TYPE_UPDATE,
2739                                                       BNX_DIR_ORDINAL_FIRST,
2740                                                       0, 0, item_len, NULL, 0);
2741                                 if (!rc)
2742                                         break;
2743                         }
2744                         fallthrough;
2745                 default:
2746                         BNXT_NVM_ERR_MSG(dev, extack, MSG_GENERIC_FAILURE_ERR);
2747                 }
2748         } while (defrag_attempted && !rc);
2749
2750 pkg_abort:
2751         hwrm_req_drop(bp, modify);
2752         hwrm_req_drop(bp, install);
2753
2754         if (resp->result) {
2755                 netdev_err(dev, "PKG install error = %d, problem_item = %d\n",
2756                            (s8)resp->result, (int)resp->problem_item);
2757                 rc = nvm_update_err_to_stderr(dev, resp->result, extack);
2758         }
2759         if (rc == -EACCES)
2760                 bnxt_print_admin_err(bp);
2761         return rc;
2762 }
2763
2764 static int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
2765                                         u32 install_type, struct netlink_ext_ack *extack)
2766 {
2767         const struct firmware *fw;
2768         int rc;
2769
2770         rc = request_firmware(&fw, filename, &dev->dev);
2771         if (rc != 0) {
2772                 netdev_err(dev, "PKG error %d requesting file: %s\n",
2773                            rc, filename);
2774                 return rc;
2775         }
2776
2777         rc = bnxt_flash_package_from_fw_obj(dev, fw, install_type, extack);
2778
2779         release_firmware(fw);
2780
2781         return rc;
2782 }
2783
2784 static int bnxt_flash_device(struct net_device *dev,
2785                              struct ethtool_flash *flash)
2786 {
2787         if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) {
2788                 netdev_err(dev, "flashdev not supported from a virtual function\n");
2789                 return -EINVAL;
2790         }
2791
2792         if (flash->region == ETHTOOL_FLASH_ALL_REGIONS ||
2793             flash->region > 0xffff)
2794                 return bnxt_flash_package_from_file(dev, flash->data,
2795                                                     flash->region, NULL);
2796
2797         return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
2798 }
2799
2800 static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length)
2801 {
2802         struct hwrm_nvm_get_dir_info_output *output;
2803         struct hwrm_nvm_get_dir_info_input *req;
2804         struct bnxt *bp = netdev_priv(dev);
2805         int rc;
2806
2807         rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_INFO);
2808         if (rc)
2809                 return rc;
2810
2811         output = hwrm_req_hold(bp, req);
2812         rc = hwrm_req_send(bp, req);
2813         if (!rc) {
2814                 *entries = le32_to_cpu(output->entries);
2815                 *length = le32_to_cpu(output->entry_length);
2816         }
2817         hwrm_req_drop(bp, req);
2818         return rc;
2819 }
2820
2821 static int bnxt_get_eeprom_len(struct net_device *dev)
2822 {
2823         struct bnxt *bp = netdev_priv(dev);
2824
2825         if (BNXT_VF(bp))
2826                 return 0;
2827
2828         /* The -1 return value allows the entire 32-bit range of offsets to be
2829          * passed via the ethtool command-line utility.
2830          */
2831         return -1;
2832 }
2833
2834 static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
2835 {
2836         struct bnxt *bp = netdev_priv(dev);
2837         int rc;
2838         u32 dir_entries;
2839         u32 entry_length;
2840         u8 *buf;
2841         size_t buflen;
2842         dma_addr_t dma_handle;
2843         struct hwrm_nvm_get_dir_entries_input *req;
2844
2845         rc = nvm_get_dir_info(dev, &dir_entries, &entry_length);
2846         if (rc != 0)
2847                 return rc;
2848
2849         if (!dir_entries || !entry_length)
2850                 return -EIO;
2851
2852         /* Insert 2 bytes of directory info (count and size of entries) */
2853         if (len < 2)
2854                 return -EINVAL;
2855
2856         *data++ = dir_entries;
2857         *data++ = entry_length;
2858         len -= 2;
2859         memset(data, 0xff, len);
2860
2861         rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_ENTRIES);
2862         if (rc)
2863                 return rc;
2864
2865         buflen = dir_entries * entry_length;
2866         buf = hwrm_req_dma_slice(bp, req, buflen, &dma_handle);
2867         if (!buf) {
2868                 hwrm_req_drop(bp, req);
2869                 return -ENOMEM;
2870         }
2871         req->host_dest_addr = cpu_to_le64(dma_handle);
2872
2873         hwrm_req_hold(bp, req); /* hold the slice */
2874         rc = hwrm_req_send(bp, req);
2875         if (rc == 0)
2876                 memcpy(data, buf, len > buflen ? buflen : len);
2877         hwrm_req_drop(bp, req);
2878         return rc;
2879 }
2880
2881 int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
2882                         u32 length, u8 *data)
2883 {
2884         struct bnxt *bp = netdev_priv(dev);
2885         int rc;
2886         u8 *buf;
2887         dma_addr_t dma_handle;
2888         struct hwrm_nvm_read_input *req;
2889
2890         if (!length)
2891                 return -EINVAL;
2892
2893         rc = hwrm_req_init(bp, req, HWRM_NVM_READ);
2894         if (rc)
2895                 return rc;
2896
2897         buf = hwrm_req_dma_slice(bp, req, length, &dma_handle);
2898         if (!buf) {
2899                 hwrm_req_drop(bp, req);
2900                 return -ENOMEM;
2901         }
2902
2903         req->host_dest_addr = cpu_to_le64(dma_handle);
2904         req->dir_idx = cpu_to_le16(index);
2905         req->offset = cpu_to_le32(offset);
2906         req->len = cpu_to_le32(length);
2907
2908         hwrm_req_hold(bp, req); /* hold the slice */
2909         rc = hwrm_req_send(bp, req);
2910         if (rc == 0)
2911                 memcpy(data, buf, length);
2912         hwrm_req_drop(bp, req);
2913         return rc;
2914 }
2915
2916 int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
2917                          u16 ext, u16 *index, u32 *item_length,
2918                          u32 *data_length)
2919 {
2920         struct hwrm_nvm_find_dir_entry_output *output;
2921         struct hwrm_nvm_find_dir_entry_input *req;
2922         struct bnxt *bp = netdev_priv(dev);
2923         int rc;
2924
2925         rc = hwrm_req_init(bp, req, HWRM_NVM_FIND_DIR_ENTRY);
2926         if (rc)
2927                 return rc;
2928
2929         req->enables = 0;
2930         req->dir_idx = 0;
2931         req->dir_type = cpu_to_le16(type);
2932         req->dir_ordinal = cpu_to_le16(ordinal);
2933         req->dir_ext = cpu_to_le16(ext);
2934         req->opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
2935         output = hwrm_req_hold(bp, req);
2936         rc = hwrm_req_send_silent(bp, req);
2937         if (rc == 0) {
2938                 if (index)
2939                         *index = le16_to_cpu(output->dir_idx);
2940                 if (item_length)
2941                         *item_length = le32_to_cpu(output->dir_item_length);
2942                 if (data_length)
2943                         *data_length = le32_to_cpu(output->dir_data_length);
2944         }
2945         hwrm_req_drop(bp, req);
2946         return rc;
2947 }
2948
2949 static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen)
2950 {
2951         char    *retval = NULL;
2952         char    *p;
2953         char    *value;
2954         int     field = 0;
2955
2956         if (datalen < 1)
2957                 return NULL;
2958         /* null-terminate the log data (removing last '\n'): */
2959         data[datalen - 1] = 0;
2960         for (p = data; *p != 0; p++) {
2961                 field = 0;
2962                 retval = NULL;
2963                 while (*p != 0 && *p != '\n') {
2964                         value = p;
2965                         while (*p != 0 && *p != '\t' && *p != '\n')
2966                                 p++;
2967                         if (field == desired_field)
2968                                 retval = value;
2969                         if (*p != '\t')
2970                                 break;
2971                         *p = 0;
2972                         field++;
2973                         p++;
2974                 }
2975                 if (*p == 0)
2976                         break;
2977                 *p = 0;
2978         }
2979         return retval;
2980 }
2981
2982 int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size)
2983 {
2984         struct bnxt *bp = netdev_priv(dev);
2985         u16 index = 0;
2986         char *pkgver;
2987         u32 pkglen;
2988         u8 *pkgbuf;
2989         int rc;
2990
2991         rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
2992                                   BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
2993                                   &index, NULL, &pkglen);
2994         if (rc)
2995                 return rc;
2996
2997         pkgbuf = kzalloc(pkglen, GFP_KERNEL);
2998         if (!pkgbuf) {
2999                 dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n",
3000                         pkglen);
3001                 return -ENOMEM;
3002         }
3003
3004         rc = bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf);
3005         if (rc)
3006                 goto err;
3007
3008         pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf,
3009                                    pkglen);
3010         if (pkgver && *pkgver != 0 && isdigit(*pkgver))
3011                 strscpy(ver, pkgver, size);
3012         else
3013                 rc = -ENOENT;
3014
3015 err:
3016         kfree(pkgbuf);
3017
3018         return rc;
3019 }
3020
3021 static void bnxt_get_pkgver(struct net_device *dev)
3022 {
3023         struct bnxt *bp = netdev_priv(dev);
3024         char buf[FW_VER_STR_LEN];
3025         int len;
3026
3027         if (!bnxt_get_pkginfo(dev, buf, sizeof(buf))) {
3028                 len = strlen(bp->fw_ver_str);
3029                 snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
3030                          "/pkg %s", buf);
3031         }
3032 }
3033
3034 static int bnxt_get_eeprom(struct net_device *dev,
3035                            struct ethtool_eeprom *eeprom,
3036                            u8 *data)
3037 {
3038         u32 index;
3039         u32 offset;
3040
3041         if (eeprom->offset == 0) /* special offset value to get directory */
3042                 return bnxt_get_nvram_directory(dev, eeprom->len, data);
3043
3044         index = eeprom->offset >> 24;
3045         offset = eeprom->offset & 0xffffff;
3046
3047         if (index == 0) {
3048                 netdev_err(dev, "unsupported index value: %d\n", index);
3049                 return -EINVAL;
3050         }
3051
3052         return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, data);
3053 }
3054
3055 static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index)
3056 {
3057         struct hwrm_nvm_erase_dir_entry_input *req;
3058         struct bnxt *bp = netdev_priv(dev);
3059         int rc;
3060
3061         rc = hwrm_req_init(bp, req, HWRM_NVM_ERASE_DIR_ENTRY);
3062         if (rc)
3063                 return rc;
3064
3065         req->dir_idx = cpu_to_le16(index);
3066         return hwrm_req_send(bp, req);
3067 }
3068
3069 static int bnxt_set_eeprom(struct net_device *dev,
3070                            struct ethtool_eeprom *eeprom,
3071                            u8 *data)
3072 {
3073         struct bnxt *bp = netdev_priv(dev);
3074         u8 index, dir_op;
3075         u16 type, ext, ordinal, attr;
3076
3077         if (!BNXT_PF(bp)) {
3078                 netdev_err(dev, "NVM write not supported from a virtual function\n");
3079                 return -EINVAL;
3080         }
3081
3082         type = eeprom->magic >> 16;
3083
3084         if (type == 0xffff) { /* special value for directory operations */
3085                 index = eeprom->magic & 0xff;
3086                 dir_op = eeprom->magic >> 8;
3087                 if (index == 0)
3088                         return -EINVAL;
3089                 switch (dir_op) {
3090                 case 0x0e: /* erase */
3091                         if (eeprom->offset != ~eeprom->magic)
3092                                 return -EINVAL;
3093                         return bnxt_erase_nvram_directory(dev, index - 1);
3094                 default:
3095                         return -EINVAL;
3096                 }
3097         }
3098
3099         /* Create or re-write an NVM item: */
3100         if (bnxt_dir_type_is_executable(type))
3101                 return -EOPNOTSUPP;
3102         ext = eeprom->magic & 0xffff;
3103         ordinal = eeprom->offset >> 16;
3104         attr = eeprom->offset & 0xffff;
3105
3106         return bnxt_flash_nvram(dev, type, ordinal, ext, attr, 0, data,
3107                                 eeprom->len);
3108 }
3109
3110 static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
3111 {
3112         struct bnxt *bp = netdev_priv(dev);
3113         struct ethtool_eee *eee = &bp->eee;
3114         struct bnxt_link_info *link_info = &bp->link_info;
3115         u32 advertising;
3116         int rc = 0;
3117
3118         if (!BNXT_PHY_CFG_ABLE(bp))
3119                 return -EOPNOTSUPP;
3120
3121         if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
3122                 return -EOPNOTSUPP;
3123
3124         mutex_lock(&bp->link_lock);
3125         advertising = _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
3126         if (!edata->eee_enabled)
3127                 goto eee_ok;
3128
3129         if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
3130                 netdev_warn(dev, "EEE requires autoneg\n");
3131                 rc = -EINVAL;
3132                 goto eee_exit;
3133         }
3134         if (edata->tx_lpi_enabled) {
3135                 if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi ||
3136                                        edata->tx_lpi_timer < bp->lpi_tmr_lo)) {
3137                         netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n",
3138                                     bp->lpi_tmr_lo, bp->lpi_tmr_hi);
3139                         rc = -EINVAL;
3140                         goto eee_exit;
3141                 } else if (!bp->lpi_tmr_hi) {
3142                         edata->tx_lpi_timer = eee->tx_lpi_timer;
3143                 }
3144         }
3145         if (!edata->advertised) {
3146                 edata->advertised = advertising & eee->supported;
3147         } else if (edata->advertised & ~advertising) {
3148                 netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n",
3149                             edata->advertised, advertising);
3150                 rc = -EINVAL;
3151                 goto eee_exit;
3152         }
3153
3154         eee->advertised = edata->advertised;
3155         eee->tx_lpi_enabled = edata->tx_lpi_enabled;
3156         eee->tx_lpi_timer = edata->tx_lpi_timer;
3157 eee_ok:
3158         eee->eee_enabled = edata->eee_enabled;
3159
3160         if (netif_running(dev))
3161                 rc = bnxt_hwrm_set_link_setting(bp, false, true);
3162
3163 eee_exit:
3164         mutex_unlock(&bp->link_lock);
3165         return rc;
3166 }
3167
3168 static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
3169 {
3170         struct bnxt *bp = netdev_priv(dev);
3171
3172         if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
3173                 return -EOPNOTSUPP;
3174
3175         *edata = bp->eee;
3176         if (!bp->eee.eee_enabled) {
3177                 /* Preserve tx_lpi_timer so that the last value will be used
3178                  * by default when it is re-enabled.
3179                  */
3180                 edata->advertised = 0;
3181                 edata->tx_lpi_enabled = 0;
3182         }
3183
3184         if (!bp->eee.eee_active)
3185                 edata->lp_advertised = 0;
3186
3187         return 0;
3188 }
3189
3190 static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
3191                                             u16 page_number, u8 bank,
3192                                             u16 start_addr, u16 data_length,
3193                                             u8 *buf)
3194 {
3195         struct hwrm_port_phy_i2c_read_output *output;
3196         struct hwrm_port_phy_i2c_read_input *req;
3197         int rc, byte_offset = 0;
3198
3199         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_READ);
3200         if (rc)
3201                 return rc;
3202
3203         output = hwrm_req_hold(bp, req);
3204         req->i2c_slave_addr = i2c_addr;
3205         req->page_number = cpu_to_le16(page_number);
3206         req->port_id = cpu_to_le16(bp->pf.port_id);
3207         do {
3208                 u16 xfer_size;
3209
3210                 xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE);
3211                 data_length -= xfer_size;
3212                 req->page_offset = cpu_to_le16(start_addr + byte_offset);
3213                 req->data_length = xfer_size;
3214                 req->enables =
3215                         cpu_to_le32((start_addr + byte_offset ?
3216                                      PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET :
3217                                      0) |
3218                                     (bank ?
3219                                      PORT_PHY_I2C_READ_REQ_ENABLES_BANK_NUMBER :
3220                                      0));
3221                 rc = hwrm_req_send(bp, req);
3222                 if (!rc)
3223                         memcpy(buf + byte_offset, output->data, xfer_size);
3224                 byte_offset += xfer_size;
3225         } while (!rc && data_length > 0);
3226         hwrm_req_drop(bp, req);
3227
3228         return rc;
3229 }
3230
3231 static int bnxt_get_module_info(struct net_device *dev,
3232                                 struct ethtool_modinfo *modinfo)
3233 {
3234         u8 data[SFF_DIAG_SUPPORT_OFFSET + 1];
3235         struct bnxt *bp = netdev_priv(dev);
3236         int rc;
3237
3238         /* No point in going further if phy status indicates
3239          * module is not inserted or if it is powered down or
3240          * if it is of type 10GBase-T
3241          */
3242         if (bp->link_info.module_status >
3243                 PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
3244                 return -EOPNOTSUPP;
3245
3246         /* This feature is not supported in older firmware versions */
3247         if (bp->hwrm_spec_code < 0x10202)
3248                 return -EOPNOTSUPP;
3249
3250         rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 0,
3251                                               SFF_DIAG_SUPPORT_OFFSET + 1,
3252                                               data);
3253         if (!rc) {
3254                 u8 module_id = data[0];
3255                 u8 diag_supported = data[SFF_DIAG_SUPPORT_OFFSET];
3256
3257                 switch (module_id) {
3258                 case SFF_MODULE_ID_SFP:
3259                         modinfo->type = ETH_MODULE_SFF_8472;
3260                         modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
3261                         if (!diag_supported)
3262                                 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
3263                         break;
3264                 case SFF_MODULE_ID_QSFP:
3265                 case SFF_MODULE_ID_QSFP_PLUS:
3266                         modinfo->type = ETH_MODULE_SFF_8436;
3267                         modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
3268                         break;
3269                 case SFF_MODULE_ID_QSFP28:
3270                         modinfo->type = ETH_MODULE_SFF_8636;
3271                         modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
3272                         break;
3273                 default:
3274                         rc = -EOPNOTSUPP;
3275                         break;
3276                 }
3277         }
3278         return rc;
3279 }
3280
3281 static int bnxt_get_module_eeprom(struct net_device *dev,
3282                                   struct ethtool_eeprom *eeprom,
3283                                   u8 *data)
3284 {
3285         struct bnxt *bp = netdev_priv(dev);
3286         u16  start = eeprom->offset, length = eeprom->len;
3287         int rc = 0;
3288
3289         memset(data, 0, eeprom->len);
3290
3291         /* Read A0 portion of the EEPROM */
3292         if (start < ETH_MODULE_SFF_8436_LEN) {
3293                 if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN)
3294                         length = ETH_MODULE_SFF_8436_LEN - start;
3295                 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0,
3296                                                       start, length, data);
3297                 if (rc)
3298                         return rc;
3299                 start += length;
3300                 data += length;
3301                 length = eeprom->len - length;
3302         }
3303
3304         /* Read A2 portion of the EEPROM */
3305         if (length) {
3306                 start -= ETH_MODULE_SFF_8436_LEN;
3307                 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0, 0,
3308                                                       start, length, data);
3309         }
3310         return rc;
3311 }
3312
3313 static int bnxt_get_module_status(struct bnxt *bp, struct netlink_ext_ack *extack)
3314 {
3315         if (bp->link_info.module_status <=
3316             PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
3317                 return 0;
3318
3319         switch (bp->link_info.module_status) {
3320         case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
3321                 NL_SET_ERR_MSG_MOD(extack, "Transceiver module is powering down");
3322                 break;
3323         case PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED:
3324                 NL_SET_ERR_MSG_MOD(extack, "Transceiver module not inserted");
3325                 break;
3326         case PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT:
3327                 NL_SET_ERR_MSG_MOD(extack, "Transceiver module disabled due to current fault");
3328                 break;
3329         default:
3330                 NL_SET_ERR_MSG_MOD(extack, "Unknown error");
3331                 break;
3332         }
3333         return -EINVAL;
3334 }
3335
3336 static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
3337                                           const struct ethtool_module_eeprom *page_data,
3338                                           struct netlink_ext_ack *extack)
3339 {
3340         struct bnxt *bp = netdev_priv(dev);
3341         int rc;
3342
3343         rc = bnxt_get_module_status(bp, extack);
3344         if (rc)
3345                 return rc;
3346
3347         if (bp->hwrm_spec_code < 0x10202) {
3348                 NL_SET_ERR_MSG_MOD(extack, "Firmware version too old");
3349                 return -EINVAL;
3350         }
3351
3352         if (page_data->bank && !(bp->phy_flags & BNXT_PHY_FL_BANK_SEL)) {
3353                 NL_SET_ERR_MSG_MOD(extack, "Firmware not capable for bank selection");
3354                 return -EINVAL;
3355         }
3356
3357         rc = bnxt_read_sfp_module_eeprom_info(bp, page_data->i2c_address << 1,
3358                                               page_data->page, page_data->bank,
3359                                               page_data->offset,
3360                                               page_data->length,
3361                                               page_data->data);
3362         if (rc) {
3363                 NL_SET_ERR_MSG_MOD(extack, "Module`s eeprom read failed");
3364                 return rc;
3365         }
3366         return page_data->length;
3367 }
3368
3369 static int bnxt_nway_reset(struct net_device *dev)
3370 {
3371         int rc = 0;
3372
3373         struct bnxt *bp = netdev_priv(dev);
3374         struct bnxt_link_info *link_info = &bp->link_info;
3375
3376         if (!BNXT_PHY_CFG_ABLE(bp))
3377                 return -EOPNOTSUPP;
3378
3379         if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
3380                 return -EINVAL;
3381
3382         if (netif_running(dev))
3383                 rc = bnxt_hwrm_set_link_setting(bp, true, false);
3384
3385         return rc;
3386 }
3387
3388 static int bnxt_set_phys_id(struct net_device *dev,
3389                             enum ethtool_phys_id_state state)
3390 {
3391         struct hwrm_port_led_cfg_input *req;
3392         struct bnxt *bp = netdev_priv(dev);
3393         struct bnxt_pf_info *pf = &bp->pf;
3394         struct bnxt_led_cfg *led_cfg;
3395         u8 led_state;
3396         __le16 duration;
3397         int rc, i;
3398
3399         if (!bp->num_leds || BNXT_VF(bp))
3400                 return -EOPNOTSUPP;
3401
3402         if (state == ETHTOOL_ID_ACTIVE) {
3403                 led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT;
3404                 duration = cpu_to_le16(500);
3405         } else if (state == ETHTOOL_ID_INACTIVE) {
3406                 led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT;
3407                 duration = cpu_to_le16(0);
3408         } else {
3409                 return -EINVAL;
3410         }
3411         rc = hwrm_req_init(bp, req, HWRM_PORT_LED_CFG);
3412         if (rc)
3413                 return rc;
3414
3415         req->port_id = cpu_to_le16(pf->port_id);
3416         req->num_leds = bp->num_leds;
3417         led_cfg = (struct bnxt_led_cfg *)&req->led0_id;
3418         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3419                 req->enables |= BNXT_LED_DFLT_ENABLES(i);
3420                 led_cfg->led_id = bp->leds[i].led_id;
3421                 led_cfg->led_state = led_state;
3422                 led_cfg->led_blink_on = duration;
3423                 led_cfg->led_blink_off = duration;
3424                 led_cfg->led_group_id = bp->leds[i].led_group_id;
3425         }
3426         return hwrm_req_send(bp, req);
3427 }
3428
3429 static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring)
3430 {
3431         struct hwrm_selftest_irq_input *req;
3432         int rc;
3433
3434         rc = hwrm_req_init(bp, req, HWRM_SELFTEST_IRQ);
3435         if (rc)
3436                 return rc;
3437
3438         req->cmpl_ring = cpu_to_le16(cmpl_ring);
3439         return hwrm_req_send(bp, req);
3440 }
3441
3442 static int bnxt_test_irq(struct bnxt *bp)
3443 {
3444         int i;
3445
3446         for (i = 0; i < bp->cp_nr_rings; i++) {
3447                 u16 cmpl_ring = bp->grp_info[i].cp_fw_ring_id;
3448                 int rc;
3449
3450                 rc = bnxt_hwrm_selftest_irq(bp, cmpl_ring);
3451                 if (rc)
3452                         return rc;
3453         }
3454         return 0;
3455 }
3456
3457 static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable)
3458 {
3459         struct hwrm_port_mac_cfg_input *req;
3460         int rc;
3461
3462         rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
3463         if (rc)
3464                 return rc;
3465
3466         req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK);
3467         if (enable)
3468                 req->lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL;
3469         else
3470                 req->lpbk = PORT_MAC_CFG_REQ_LPBK_NONE;
3471         return hwrm_req_send(bp, req);
3472 }
3473
3474 static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds)
3475 {
3476         struct hwrm_port_phy_qcaps_output *resp;
3477         struct hwrm_port_phy_qcaps_input *req;
3478         int rc;
3479
3480         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
3481         if (rc)
3482                 return rc;
3483
3484         resp = hwrm_req_hold(bp, req);
3485         rc = hwrm_req_send(bp, req);
3486         if (!rc)
3487                 *force_speeds = le16_to_cpu(resp->supported_speeds_force_mode);
3488
3489         hwrm_req_drop(bp, req);
3490         return rc;
3491 }
3492
3493 static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
3494                                     struct hwrm_port_phy_cfg_input *req)
3495 {
3496         struct bnxt_link_info *link_info = &bp->link_info;
3497         u16 fw_advertising;
3498         u16 fw_speed;
3499         int rc;
3500
3501         if (!link_info->autoneg ||
3502             (bp->phy_flags & BNXT_PHY_FL_AN_PHY_LPBK))
3503                 return 0;
3504
3505         rc = bnxt_query_force_speeds(bp, &fw_advertising);
3506         if (rc)
3507                 return rc;
3508
3509         fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
3510         if (BNXT_LINK_IS_UP(bp))
3511                 fw_speed = bp->link_info.link_speed;
3512         else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB)
3513                 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
3514         else if (fw_advertising & BNXT_LINK_SPEED_MSK_25GB)
3515                 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
3516         else if (fw_advertising & BNXT_LINK_SPEED_MSK_40GB)
3517                 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
3518         else if (fw_advertising & BNXT_LINK_SPEED_MSK_50GB)
3519                 fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
3520
3521         req->force_link_speed = cpu_to_le16(fw_speed);
3522         req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE |
3523                                   PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
3524         rc = hwrm_req_send(bp, req);
3525         req->flags = 0;
3526         req->force_link_speed = cpu_to_le16(0);
3527         return rc;
3528 }
3529
3530 static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext)
3531 {
3532         struct hwrm_port_phy_cfg_input *req;
3533         int rc;
3534
3535         rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
3536         if (rc)
3537                 return rc;
3538
3539         /* prevent bnxt_disable_an_for_lpbk() from consuming the request */
3540         hwrm_req_hold(bp, req);
3541
3542         if (enable) {
3543                 bnxt_disable_an_for_lpbk(bp, req);
3544                 if (ext)
3545                         req->lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL;
3546                 else
3547                         req->lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL;
3548         } else {
3549                 req->lpbk = PORT_PHY_CFG_REQ_LPBK_NONE;
3550         }
3551         req->enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK);
3552         rc = hwrm_req_send(bp, req);
3553         hwrm_req_drop(bp, req);
3554         return rc;
3555 }
3556
3557 static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
3558                             u32 raw_cons, int pkt_size)
3559 {
3560         struct bnxt_napi *bnapi = cpr->bnapi;
3561         struct bnxt_rx_ring_info *rxr;
3562         struct bnxt_sw_rx_bd *rx_buf;
3563         struct rx_cmp *rxcmp;
3564         u16 cp_cons, cons;
3565         u8 *data;
3566         u32 len;
3567         int i;
3568
3569         rxr = bnapi->rx_ring;
3570         cp_cons = RING_CMP(raw_cons);
3571         rxcmp = (struct rx_cmp *)
3572                 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
3573         cons = rxcmp->rx_cmp_opaque;
3574         rx_buf = &rxr->rx_buf_ring[cons];
3575         data = rx_buf->data_ptr;
3576         len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
3577         if (len != pkt_size)
3578                 return -EIO;
3579         i = ETH_ALEN;
3580         if (!ether_addr_equal(data + i, bnapi->bp->dev->dev_addr))
3581                 return -EIO;
3582         i += ETH_ALEN;
3583         for (  ; i < pkt_size; i++) {
3584                 if (data[i] != (u8)(i & 0xff))
3585                         return -EIO;
3586         }
3587         return 0;
3588 }
3589
3590 static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
3591                               int pkt_size)
3592 {
3593         struct tx_cmp *txcmp;
3594         int rc = -EIO;
3595         u32 raw_cons;
3596         u32 cons;
3597         int i;
3598
3599         raw_cons = cpr->cp_raw_cons;
3600         for (i = 0; i < 200; i++) {
3601                 cons = RING_CMP(raw_cons);
3602                 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
3603
3604                 if (!TX_CMP_VALID(txcmp, raw_cons)) {
3605                         udelay(5);
3606                         continue;
3607                 }
3608
3609                 /* The valid test of the entry must be done first before
3610                  * reading any further.
3611                  */
3612                 dma_rmb();
3613                 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP) {
3614                         rc = bnxt_rx_loopback(bp, cpr, raw_cons, pkt_size);
3615                         raw_cons = NEXT_RAW_CMP(raw_cons);
3616                         raw_cons = NEXT_RAW_CMP(raw_cons);
3617                         break;
3618                 }
3619                 raw_cons = NEXT_RAW_CMP(raw_cons);
3620         }
3621         cpr->cp_raw_cons = raw_cons;
3622         return rc;
3623 }
3624
3625 static int bnxt_run_loopback(struct bnxt *bp)
3626 {
3627         struct bnxt_tx_ring_info *txr = &bp->tx_ring[0];
3628         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
3629         struct bnxt_cp_ring_info *cpr;
3630         int pkt_size, i = 0;
3631         struct sk_buff *skb;
3632         dma_addr_t map;
3633         u8 *data;
3634         int rc;
3635
3636         cpr = &rxr->bnapi->cp_ring;
3637         if (bp->flags & BNXT_FLAG_CHIP_P5)
3638                 cpr = cpr->cp_ring_arr[BNXT_RX_HDL];
3639         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
3640         skb = netdev_alloc_skb(bp->dev, pkt_size);
3641         if (!skb)
3642                 return -ENOMEM;
3643         data = skb_put(skb, pkt_size);
3644         ether_addr_copy(&data[i], bp->dev->dev_addr);
3645         i += ETH_ALEN;
3646         ether_addr_copy(&data[i], bp->dev->dev_addr);
3647         i += ETH_ALEN;
3648         for ( ; i < pkt_size; i++)
3649                 data[i] = (u8)(i & 0xff);
3650
3651         map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
3652                              DMA_TO_DEVICE);
3653         if (dma_mapping_error(&bp->pdev->dev, map)) {
3654                 dev_kfree_skb(skb);
3655                 return -EIO;
3656         }
3657         bnxt_xmit_bd(bp, txr, map, pkt_size, NULL);
3658
3659         /* Sync BD data before updating doorbell */
3660         wmb();
3661
3662         bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
3663         rc = bnxt_poll_loopback(bp, cpr, pkt_size);
3664
3665         dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE);
3666         dev_kfree_skb(skb);
3667         return rc;
3668 }
3669
3670 static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results)
3671 {
3672         struct hwrm_selftest_exec_output *resp;
3673         struct hwrm_selftest_exec_input *req;
3674         int rc;
3675
3676         rc = hwrm_req_init(bp, req, HWRM_SELFTEST_EXEC);
3677         if (rc)
3678                 return rc;
3679
3680         hwrm_req_timeout(bp, req, bp->test_info->timeout);
3681         req->flags = test_mask;
3682
3683         resp = hwrm_req_hold(bp, req);
3684         rc = hwrm_req_send(bp, req);
3685         *test_results = resp->test_success;
3686         hwrm_req_drop(bp, req);
3687         return rc;
3688 }
3689
3690 #define BNXT_DRV_TESTS                  4
3691 #define BNXT_MACLPBK_TEST_IDX           (bp->num_tests - BNXT_DRV_TESTS)
3692 #define BNXT_PHYLPBK_TEST_IDX           (BNXT_MACLPBK_TEST_IDX + 1)
3693 #define BNXT_EXTLPBK_TEST_IDX           (BNXT_MACLPBK_TEST_IDX + 2)
3694 #define BNXT_IRQ_TEST_IDX               (BNXT_MACLPBK_TEST_IDX + 3)
3695
3696 static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
3697                            u64 *buf)
3698 {
3699         struct bnxt *bp = netdev_priv(dev);
3700         bool do_ext_lpbk = false;
3701         bool offline = false;
3702         u8 test_results = 0;
3703         u8 test_mask = 0;
3704         int rc = 0, i;
3705
3706         if (!bp->num_tests || !BNXT_PF(bp))
3707                 return;
3708         memset(buf, 0, sizeof(u64) * bp->num_tests);
3709         if (!netif_running(dev)) {
3710                 etest->flags |= ETH_TEST_FL_FAILED;
3711                 return;
3712         }
3713
3714         if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) &&
3715             (bp->phy_flags & BNXT_PHY_FL_EXT_LPBK))
3716                 do_ext_lpbk = true;
3717
3718         if (etest->flags & ETH_TEST_FL_OFFLINE) {
3719                 if (bp->pf.active_vfs || !BNXT_SINGLE_PF(bp)) {
3720                         etest->flags |= ETH_TEST_FL_FAILED;
3721                         netdev_warn(dev, "Offline tests cannot be run with active VFs or on shared PF\n");
3722                         return;
3723                 }
3724                 offline = true;
3725         }
3726
3727         for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
3728                 u8 bit_val = 1 << i;
3729
3730                 if (!(bp->test_info->offline_mask & bit_val))
3731                         test_mask |= bit_val;
3732                 else if (offline)
3733                         test_mask |= bit_val;
3734         }
3735         if (!offline) {
3736                 bnxt_run_fw_tests(bp, test_mask, &test_results);
3737         } else {
3738                 bnxt_ulp_stop(bp);
3739                 rc = bnxt_close_nic(bp, true, false);
3740                 if (rc) {
3741                         bnxt_ulp_start(bp, rc);
3742                         return;
3743                 }
3744                 bnxt_run_fw_tests(bp, test_mask, &test_results);
3745
3746                 buf[BNXT_MACLPBK_TEST_IDX] = 1;
3747                 bnxt_hwrm_mac_loopback(bp, true);
3748                 msleep(250);
3749                 rc = bnxt_half_open_nic(bp);
3750                 if (rc) {
3751                         bnxt_hwrm_mac_loopback(bp, false);
3752                         etest->flags |= ETH_TEST_FL_FAILED;
3753                         bnxt_ulp_start(bp, rc);
3754                         return;
3755                 }
3756                 if (bnxt_run_loopback(bp))
3757                         etest->flags |= ETH_TEST_FL_FAILED;
3758                 else
3759                         buf[BNXT_MACLPBK_TEST_IDX] = 0;
3760
3761                 bnxt_hwrm_mac_loopback(bp, false);
3762                 bnxt_hwrm_phy_loopback(bp, true, false);
3763                 msleep(1000);
3764                 if (bnxt_run_loopback(bp)) {
3765                         buf[BNXT_PHYLPBK_TEST_IDX] = 1;
3766                         etest->flags |= ETH_TEST_FL_FAILED;
3767                 }
3768                 if (do_ext_lpbk) {
3769                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
3770                         bnxt_hwrm_phy_loopback(bp, true, true);
3771                         msleep(1000);
3772                         if (bnxt_run_loopback(bp)) {
3773                                 buf[BNXT_EXTLPBK_TEST_IDX] = 1;
3774                                 etest->flags |= ETH_TEST_FL_FAILED;
3775                         }
3776                 }
3777                 bnxt_hwrm_phy_loopback(bp, false, false);
3778                 bnxt_half_close_nic(bp);
3779                 rc = bnxt_open_nic(bp, true, true);
3780                 bnxt_ulp_start(bp, rc);
3781         }
3782         if (rc || bnxt_test_irq(bp)) {
3783                 buf[BNXT_IRQ_TEST_IDX] = 1;
3784                 etest->flags |= ETH_TEST_FL_FAILED;
3785         }
3786         for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) {
3787                 u8 bit_val = 1 << i;
3788
3789                 if ((test_mask & bit_val) && !(test_results & bit_val)) {
3790                         buf[i] = 1;
3791                         etest->flags |= ETH_TEST_FL_FAILED;
3792                 }
3793         }
3794 }
3795
3796 static int bnxt_reset(struct net_device *dev, u32 *flags)
3797 {
3798         struct bnxt *bp = netdev_priv(dev);
3799         bool reload = false;
3800         u32 req = *flags;
3801
3802         if (!req)
3803                 return -EINVAL;
3804
3805         if (!BNXT_PF(bp)) {
3806                 netdev_err(dev, "Reset is not supported from a VF\n");
3807                 return -EOPNOTSUPP;
3808         }
3809
3810         if (pci_vfs_assigned(bp->pdev) &&
3811             !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) {
3812                 netdev_err(dev,
3813                            "Reset not allowed when VFs are assigned to VMs\n");
3814                 return -EBUSY;
3815         }
3816
3817         if ((req & BNXT_FW_RESET_CHIP) == BNXT_FW_RESET_CHIP) {
3818                 /* This feature is not supported in older firmware versions */
3819                 if (bp->hwrm_spec_code >= 0x10803) {
3820                         if (!bnxt_firmware_reset_chip(dev)) {
3821                                 netdev_info(dev, "Firmware reset request successful.\n");
3822                                 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET))
3823                                         reload = true;
3824                                 *flags &= ~BNXT_FW_RESET_CHIP;
3825                         }
3826                 } else if (req == BNXT_FW_RESET_CHIP) {
3827                         return -EOPNOTSUPP; /* only request, fail hard */
3828                 }
3829         }
3830
3831         if (req & BNXT_FW_RESET_AP) {
3832                 /* This feature is not supported in older firmware versions */
3833                 if (bp->hwrm_spec_code >= 0x10803) {
3834                         if (!bnxt_firmware_reset_ap(dev)) {
3835                                 netdev_info(dev, "Reset application processor successful.\n");
3836                                 reload = true;
3837                                 *flags &= ~BNXT_FW_RESET_AP;
3838                         }
3839                 } else if (req == BNXT_FW_RESET_AP) {
3840                         return -EOPNOTSUPP; /* only request, fail hard */
3841                 }
3842         }
3843
3844         if (reload)
3845                 netdev_info(dev, "Reload driver to complete reset\n");
3846
3847         return 0;
3848 }
3849
3850 static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump)
3851 {
3852         struct bnxt *bp = netdev_priv(dev);
3853
3854         if (dump->flag > BNXT_DUMP_CRASH) {
3855                 netdev_info(dev, "Supports only Live(0) and Crash(1) dumps.\n");
3856                 return -EINVAL;
3857         }
3858
3859         if (!IS_ENABLED(CONFIG_TEE_BNXT_FW) && dump->flag == BNXT_DUMP_CRASH) {
3860                 netdev_info(dev, "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n");
3861                 return -EOPNOTSUPP;
3862         }
3863
3864         bp->dump_flag = dump->flag;
3865         return 0;
3866 }
3867
3868 static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
3869 {
3870         struct bnxt *bp = netdev_priv(dev);
3871
3872         if (bp->hwrm_spec_code < 0x10801)
3873                 return -EOPNOTSUPP;
3874
3875         dump->version = bp->ver_resp.hwrm_fw_maj_8b << 24 |
3876                         bp->ver_resp.hwrm_fw_min_8b << 16 |
3877                         bp->ver_resp.hwrm_fw_bld_8b << 8 |
3878                         bp->ver_resp.hwrm_fw_rsvd_8b;
3879
3880         dump->flag = bp->dump_flag;
3881         dump->len = bnxt_get_coredump_length(bp, bp->dump_flag);
3882         return 0;
3883 }
3884
3885 static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
3886                               void *buf)
3887 {
3888         struct bnxt *bp = netdev_priv(dev);
3889
3890         if (bp->hwrm_spec_code < 0x10801)
3891                 return -EOPNOTSUPP;
3892
3893         memset(buf, 0, dump->len);
3894
3895         dump->flag = bp->dump_flag;
3896         return bnxt_get_coredump(bp, dump->flag, buf, &dump->len);
3897 }
3898
3899 static int bnxt_get_ts_info(struct net_device *dev,
3900                             struct ethtool_ts_info *info)
3901 {
3902         struct bnxt *bp = netdev_priv(dev);
3903         struct bnxt_ptp_cfg *ptp;
3904
3905         ptp = bp->ptp_cfg;
3906         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
3907                                 SOF_TIMESTAMPING_RX_SOFTWARE |
3908                                 SOF_TIMESTAMPING_SOFTWARE;
3909
3910         info->phc_index = -1;
3911         if (!ptp)
3912                 return 0;
3913
3914         info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
3915                                  SOF_TIMESTAMPING_RX_HARDWARE |
3916                                  SOF_TIMESTAMPING_RAW_HARDWARE;
3917         if (ptp->ptp_clock)
3918                 info->phc_index = ptp_clock_index(ptp->ptp_clock);
3919
3920         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
3921
3922         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
3923                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
3924                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
3925
3926         if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS)
3927                 info->rx_filters |= (1 << HWTSTAMP_FILTER_ALL);
3928         return 0;
3929 }
3930
3931 void bnxt_ethtool_init(struct bnxt *bp)
3932 {
3933         struct hwrm_selftest_qlist_output *resp;
3934         struct hwrm_selftest_qlist_input *req;
3935         struct bnxt_test_info *test_info;
3936         struct net_device *dev = bp->dev;
3937         int i, rc;
3938
3939         if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER))
3940                 bnxt_get_pkgver(dev);
3941
3942         bp->num_tests = 0;
3943         if (bp->hwrm_spec_code < 0x10704 || !BNXT_PF(bp))
3944                 return;
3945
3946         test_info = bp->test_info;
3947         if (!test_info) {
3948                 test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL);
3949                 if (!test_info)
3950                         return;
3951                 bp->test_info = test_info;
3952         }
3953
3954         if (hwrm_req_init(bp, req, HWRM_SELFTEST_QLIST))
3955                 return;
3956
3957         resp = hwrm_req_hold(bp, req);
3958         rc = hwrm_req_send_silent(bp, req);
3959         if (rc)
3960                 goto ethtool_init_exit;
3961
3962         bp->num_tests = resp->num_tests + BNXT_DRV_TESTS;
3963         if (bp->num_tests > BNXT_MAX_TEST)
3964                 bp->num_tests = BNXT_MAX_TEST;
3965
3966         test_info->offline_mask = resp->offline_tests;
3967         test_info->timeout = le16_to_cpu(resp->test_timeout);
3968         if (!test_info->timeout)
3969                 test_info->timeout = HWRM_CMD_TIMEOUT;
3970         for (i = 0; i < bp->num_tests; i++) {
3971                 char *str = test_info->string[i];
3972                 char *fw_str = resp->test_name[i];
3973
3974                 if (i == BNXT_MACLPBK_TEST_IDX) {
3975                         strcpy(str, "Mac loopback test (offline)");
3976                 } else if (i == BNXT_PHYLPBK_TEST_IDX) {
3977                         strcpy(str, "Phy loopback test (offline)");
3978                 } else if (i == BNXT_EXTLPBK_TEST_IDX) {
3979                         strcpy(str, "Ext loopback test (offline)");
3980                 } else if (i == BNXT_IRQ_TEST_IDX) {
3981                         strcpy(str, "Interrupt_test (offline)");
3982                 } else {
3983                         snprintf(str, ETH_GSTRING_LEN, "%s test (%s)",
3984                                  fw_str, test_info->offline_mask & (1 << i) ?
3985                                         "offline" : "online");
3986                 }
3987         }
3988
3989 ethtool_init_exit:
3990         hwrm_req_drop(bp, req);
3991 }
3992
3993 static void bnxt_get_eth_phy_stats(struct net_device *dev,
3994                                    struct ethtool_eth_phy_stats *phy_stats)
3995 {
3996         struct bnxt *bp = netdev_priv(dev);
3997         u64 *rx;
3998
3999         if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
4000                 return;
4001
4002         rx = bp->rx_port_stats_ext.sw_stats;
4003         phy_stats->SymbolErrorDuringCarrier =
4004                 *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_pcs_symbol_err));
4005 }
4006
4007 static void bnxt_get_eth_mac_stats(struct net_device *dev,
4008                                    struct ethtool_eth_mac_stats *mac_stats)
4009 {
4010         struct bnxt *bp = netdev_priv(dev);
4011         u64 *rx, *tx;
4012
4013         if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
4014                 return;
4015
4016         rx = bp->port_stats.sw_stats;
4017         tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4018
4019         mac_stats->FramesReceivedOK =
4020                 BNXT_GET_RX_PORT_STATS64(rx, rx_good_frames);
4021         mac_stats->FramesTransmittedOK =
4022                 BNXT_GET_TX_PORT_STATS64(tx, tx_good_frames);
4023         mac_stats->FrameCheckSequenceErrors =
4024                 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
4025         mac_stats->AlignmentErrors =
4026                 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
4027         mac_stats->OutOfRangeLengthField =
4028                 BNXT_GET_RX_PORT_STATS64(rx, rx_oor_len_frames);
4029 }
4030
4031 static void bnxt_get_eth_ctrl_stats(struct net_device *dev,
4032                                     struct ethtool_eth_ctrl_stats *ctrl_stats)
4033 {
4034         struct bnxt *bp = netdev_priv(dev);
4035         u64 *rx;
4036
4037         if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
4038                 return;
4039
4040         rx = bp->port_stats.sw_stats;
4041         ctrl_stats->MACControlFramesReceived =
4042                 BNXT_GET_RX_PORT_STATS64(rx, rx_ctrl_frames);
4043 }
4044
4045 static const struct ethtool_rmon_hist_range bnxt_rmon_ranges[] = {
4046         {    0,    64 },
4047         {   65,   127 },
4048         {  128,   255 },
4049         {  256,   511 },
4050         {  512,  1023 },
4051         { 1024,  1518 },
4052         { 1519,  2047 },
4053         { 2048,  4095 },
4054         { 4096,  9216 },
4055         { 9217, 16383 },
4056         {}
4057 };
4058
4059 static void bnxt_get_rmon_stats(struct net_device *dev,
4060                                 struct ethtool_rmon_stats *rmon_stats,
4061                                 const struct ethtool_rmon_hist_range **ranges)
4062 {
4063         struct bnxt *bp = netdev_priv(dev);
4064         u64 *rx, *tx;
4065
4066         if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
4067                 return;
4068
4069         rx = bp->port_stats.sw_stats;
4070         tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4071
4072         rmon_stats->jabbers =
4073                 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
4074         rmon_stats->oversize_pkts =
4075                 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames);
4076         rmon_stats->undersize_pkts =
4077                 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames);
4078
4079         rmon_stats->hist[0] = BNXT_GET_RX_PORT_STATS64(rx, rx_64b_frames);
4080         rmon_stats->hist[1] = BNXT_GET_RX_PORT_STATS64(rx, rx_65b_127b_frames);
4081         rmon_stats->hist[2] = BNXT_GET_RX_PORT_STATS64(rx, rx_128b_255b_frames);
4082         rmon_stats->hist[3] = BNXT_GET_RX_PORT_STATS64(rx, rx_256b_511b_frames);
4083         rmon_stats->hist[4] =
4084                 BNXT_GET_RX_PORT_STATS64(rx, rx_512b_1023b_frames);
4085         rmon_stats->hist[5] =
4086                 BNXT_GET_RX_PORT_STATS64(rx, rx_1024b_1518b_frames);
4087         rmon_stats->hist[6] =
4088                 BNXT_GET_RX_PORT_STATS64(rx, rx_1519b_2047b_frames);
4089         rmon_stats->hist[7] =
4090                 BNXT_GET_RX_PORT_STATS64(rx, rx_2048b_4095b_frames);
4091         rmon_stats->hist[8] =
4092                 BNXT_GET_RX_PORT_STATS64(rx, rx_4096b_9216b_frames);
4093         rmon_stats->hist[9] =
4094                 BNXT_GET_RX_PORT_STATS64(rx, rx_9217b_16383b_frames);
4095
4096         rmon_stats->hist_tx[0] =
4097                 BNXT_GET_TX_PORT_STATS64(tx, tx_64b_frames);
4098         rmon_stats->hist_tx[1] =
4099                 BNXT_GET_TX_PORT_STATS64(tx, tx_65b_127b_frames);
4100         rmon_stats->hist_tx[2] =
4101                 BNXT_GET_TX_PORT_STATS64(tx, tx_128b_255b_frames);
4102         rmon_stats->hist_tx[3] =
4103                 BNXT_GET_TX_PORT_STATS64(tx, tx_256b_511b_frames);
4104         rmon_stats->hist_tx[4] =
4105                 BNXT_GET_TX_PORT_STATS64(tx, tx_512b_1023b_frames);
4106         rmon_stats->hist_tx[5] =
4107                 BNXT_GET_TX_PORT_STATS64(tx, tx_1024b_1518b_frames);
4108         rmon_stats->hist_tx[6] =
4109                 BNXT_GET_TX_PORT_STATS64(tx, tx_1519b_2047b_frames);
4110         rmon_stats->hist_tx[7] =
4111                 BNXT_GET_TX_PORT_STATS64(tx, tx_2048b_4095b_frames);
4112         rmon_stats->hist_tx[8] =
4113                 BNXT_GET_TX_PORT_STATS64(tx, tx_4096b_9216b_frames);
4114         rmon_stats->hist_tx[9] =
4115                 BNXT_GET_TX_PORT_STATS64(tx, tx_9217b_16383b_frames);
4116
4117         *ranges = bnxt_rmon_ranges;
4118 }
4119
4120 static void bnxt_get_link_ext_stats(struct net_device *dev,
4121                                     struct ethtool_link_ext_stats *stats)
4122 {
4123         struct bnxt *bp = netdev_priv(dev);
4124         u64 *rx;
4125
4126         if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
4127                 return;
4128
4129         rx = bp->rx_port_stats_ext.sw_stats;
4130         stats->link_down_events =
4131                 *(rx + BNXT_RX_STATS_EXT_OFFSET(link_down_events));
4132 }
4133
4134 void bnxt_ethtool_free(struct bnxt *bp)
4135 {
4136         kfree(bp->test_info);
4137         bp->test_info = NULL;
4138 }
4139
4140 const struct ethtool_ops bnxt_ethtool_ops = {
4141         .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
4142                                      ETHTOOL_COALESCE_MAX_FRAMES |
4143                                      ETHTOOL_COALESCE_USECS_IRQ |
4144                                      ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
4145                                      ETHTOOL_COALESCE_STATS_BLOCK_USECS |
4146                                      ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
4147                                      ETHTOOL_COALESCE_USE_CQE,
4148         .get_link_ksettings     = bnxt_get_link_ksettings,
4149         .set_link_ksettings     = bnxt_set_link_ksettings,
4150         .get_fec_stats          = bnxt_get_fec_stats,
4151         .get_fecparam           = bnxt_get_fecparam,
4152         .set_fecparam           = bnxt_set_fecparam,
4153         .get_pause_stats        = bnxt_get_pause_stats,
4154         .get_pauseparam         = bnxt_get_pauseparam,
4155         .set_pauseparam         = bnxt_set_pauseparam,
4156         .get_drvinfo            = bnxt_get_drvinfo,
4157         .get_regs_len           = bnxt_get_regs_len,
4158         .get_regs               = bnxt_get_regs,
4159         .get_wol                = bnxt_get_wol,
4160         .set_wol                = bnxt_set_wol,
4161         .get_coalesce           = bnxt_get_coalesce,
4162         .set_coalesce           = bnxt_set_coalesce,
4163         .get_msglevel           = bnxt_get_msglevel,
4164         .set_msglevel           = bnxt_set_msglevel,
4165         .get_sset_count         = bnxt_get_sset_count,
4166         .get_strings            = bnxt_get_strings,
4167         .get_ethtool_stats      = bnxt_get_ethtool_stats,
4168         .set_ringparam          = bnxt_set_ringparam,
4169         .get_ringparam          = bnxt_get_ringparam,
4170         .get_channels           = bnxt_get_channels,
4171         .set_channels           = bnxt_set_channels,
4172         .get_rxnfc              = bnxt_get_rxnfc,
4173         .set_rxnfc              = bnxt_set_rxnfc,
4174         .get_rxfh_indir_size    = bnxt_get_rxfh_indir_size,
4175         .get_rxfh_key_size      = bnxt_get_rxfh_key_size,
4176         .get_rxfh               = bnxt_get_rxfh,
4177         .set_rxfh               = bnxt_set_rxfh,
4178         .flash_device           = bnxt_flash_device,
4179         .get_eeprom_len         = bnxt_get_eeprom_len,
4180         .get_eeprom             = bnxt_get_eeprom,
4181         .set_eeprom             = bnxt_set_eeprom,
4182         .get_link               = bnxt_get_link,
4183         .get_link_ext_stats     = bnxt_get_link_ext_stats,
4184         .get_eee                = bnxt_get_eee,
4185         .set_eee                = bnxt_set_eee,
4186         .get_module_info        = bnxt_get_module_info,
4187         .get_module_eeprom      = bnxt_get_module_eeprom,
4188         .get_module_eeprom_by_page = bnxt_get_module_eeprom_by_page,
4189         .nway_reset             = bnxt_nway_reset,
4190         .set_phys_id            = bnxt_set_phys_id,
4191         .self_test              = bnxt_self_test,
4192         .get_ts_info            = bnxt_get_ts_info,
4193         .reset                  = bnxt_reset,
4194         .set_dump               = bnxt_set_dump,
4195         .get_dump_flag          = bnxt_get_dump_flag,
4196         .get_dump_data          = bnxt_get_dump_data,
4197         .get_eth_phy_stats      = bnxt_get_eth_phy_stats,
4198         .get_eth_mac_stats      = bnxt_get_eth_mac_stats,
4199         .get_eth_ctrl_stats     = bnxt_get_eth_ctrl_stats,
4200         .get_rmon_stats         = bnxt_get_rmon_stats,
4201 };