2 * Copyright (C) 2015 Netronome Systems, Inc.
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
9 * The BSD 2-Clause License:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * Netronome network device driver: ethtool support
37 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
38 * Jason McMullan <jason.mcmullan@netronome.com>
39 * Rolf Neugebauer <rolf.neugebauer@netronome.com>
40 * Brad Petrus <brad.petrus@netronome.com>
43 #include <linux/version.h>
44 #include <linux/kernel.h>
45 #include <linux/netdevice.h>
46 #include <linux/etherdevice.h>
47 #include <linux/interrupt.h>
48 #include <linux/pci.h>
49 #include <linux/ethtool.h>
51 #include "nfp_net_ctrl.h"
54 /* Support for stats. Returns netdev, driver, and device stats */
55 enum { NETDEV_ET_STATS, NFP_NET_DRV_ET_STATS, NFP_NET_DEV_ET_STATS };
56 struct _nfp_net_et_stats {
57 char name[ETH_GSTRING_LEN];
63 #define NN_ET_NETDEV_STAT(m) NETDEV_ET_STATS, \
64 FIELD_SIZEOF(struct net_device_stats, m), \
65 offsetof(struct net_device_stats, m)
66 /* For stats in the control BAR (other than Q stats) */
67 #define NN_ET_DEV_STAT(m) NFP_NET_DEV_ET_STATS, \
70 static const struct _nfp_net_et_stats nfp_net_et_stats[] = {
72 {"rx_packets", NN_ET_NETDEV_STAT(rx_packets)},
73 {"tx_packets", NN_ET_NETDEV_STAT(tx_packets)},
74 {"rx_bytes", NN_ET_NETDEV_STAT(rx_bytes)},
75 {"tx_bytes", NN_ET_NETDEV_STAT(tx_bytes)},
76 {"rx_errors", NN_ET_NETDEV_STAT(rx_errors)},
77 {"tx_errors", NN_ET_NETDEV_STAT(tx_errors)},
78 {"rx_dropped", NN_ET_NETDEV_STAT(rx_dropped)},
79 {"tx_dropped", NN_ET_NETDEV_STAT(tx_dropped)},
80 {"multicast", NN_ET_NETDEV_STAT(multicast)},
81 {"collisions", NN_ET_NETDEV_STAT(collisions)},
82 {"rx_over_errors", NN_ET_NETDEV_STAT(rx_over_errors)},
83 {"rx_crc_errors", NN_ET_NETDEV_STAT(rx_crc_errors)},
84 {"rx_frame_errors", NN_ET_NETDEV_STAT(rx_frame_errors)},
85 {"rx_fifo_errors", NN_ET_NETDEV_STAT(rx_fifo_errors)},
86 {"rx_missed_errors", NN_ET_NETDEV_STAT(rx_missed_errors)},
87 {"tx_aborted_errors", NN_ET_NETDEV_STAT(tx_aborted_errors)},
88 {"tx_carrier_errors", NN_ET_NETDEV_STAT(tx_carrier_errors)},
89 {"tx_fifo_errors", NN_ET_NETDEV_STAT(tx_fifo_errors)},
90 /* Stats from the device */
91 {"dev_rx_discards", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_DISCARDS)},
92 {"dev_rx_errors", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_ERRORS)},
93 {"dev_rx_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_OCTETS)},
94 {"dev_rx_uc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_UC_OCTETS)},
95 {"dev_rx_mc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_MC_OCTETS)},
96 {"dev_rx_bc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_BC_OCTETS)},
97 {"dev_rx_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_FRAMES)},
98 {"dev_rx_mc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_MC_FRAMES)},
99 {"dev_rx_bc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_BC_FRAMES)},
101 {"dev_tx_discards", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_DISCARDS)},
102 {"dev_tx_errors", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_ERRORS)},
103 {"dev_tx_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_OCTETS)},
104 {"dev_tx_uc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_UC_OCTETS)},
105 {"dev_tx_mc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_MC_OCTETS)},
106 {"dev_tx_bc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_BC_OCTETS)},
107 {"dev_tx_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_FRAMES)},
108 {"dev_tx_mc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_MC_FRAMES)},
109 {"dev_tx_bc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_BC_FRAMES)},
112 #define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
113 #define NN_ET_RVEC_STATS_LEN (nn->num_r_vecs * 3)
114 #define NN_ET_RVEC_GATHER_STATS 7
115 #define NN_ET_QUEUE_STATS_LEN ((nn->num_tx_rings + nn->num_rx_rings) * 2)
116 #define NN_ET_STATS_LEN (NN_ET_GLOBAL_STATS_LEN + NN_ET_RVEC_GATHER_STATS + \
117 NN_ET_RVEC_STATS_LEN + NN_ET_QUEUE_STATS_LEN)
119 static void nfp_net_get_drvinfo(struct net_device *netdev,
120 struct ethtool_drvinfo *drvinfo)
122 struct nfp_net *nn = netdev_priv(netdev);
124 strlcpy(drvinfo->driver, nfp_net_driver_name, sizeof(drvinfo->driver));
125 strlcpy(drvinfo->version, nfp_net_driver_version,
126 sizeof(drvinfo->version));
128 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
130 nn->fw_ver.resv, nn->fw_ver.class,
131 nn->fw_ver.major, nn->fw_ver.minor);
132 strlcpy(drvinfo->bus_info, pci_name(nn->pdev),
133 sizeof(drvinfo->bus_info));
135 drvinfo->n_stats = NN_ET_STATS_LEN;
136 drvinfo->regdump_len = NFP_NET_CFG_BAR_SZ;
139 static void nfp_net_get_ringparam(struct net_device *netdev,
140 struct ethtool_ringparam *ring)
142 struct nfp_net *nn = netdev_priv(netdev);
144 ring->rx_max_pending = NFP_NET_MAX_RX_DESCS;
145 ring->tx_max_pending = NFP_NET_MAX_TX_DESCS;
146 ring->rx_pending = nn->rxd_cnt;
147 ring->tx_pending = nn->txd_cnt;
150 static int nfp_net_set_ringparam(struct net_device *netdev,
151 struct ethtool_ringparam *ring)
153 struct nfp_net *nn = netdev_priv(netdev);
154 u32 rxd_cnt, txd_cnt;
156 /* We don't have separate queues/rings for small/large frames. */
157 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
160 /* Round up to supported values */
161 rxd_cnt = roundup_pow_of_two(ring->rx_pending);
162 txd_cnt = roundup_pow_of_two(ring->tx_pending);
164 if (rxd_cnt < NFP_NET_MIN_RX_DESCS || rxd_cnt > NFP_NET_MAX_RX_DESCS ||
165 txd_cnt < NFP_NET_MIN_TX_DESCS || txd_cnt > NFP_NET_MAX_TX_DESCS)
168 if (nn->rxd_cnt == rxd_cnt && nn->txd_cnt == txd_cnt)
171 nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n",
172 nn->rxd_cnt, rxd_cnt, nn->txd_cnt, txd_cnt);
174 return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt);
177 static void nfp_net_get_strings(struct net_device *netdev,
178 u32 stringset, u8 *data)
180 struct nfp_net *nn = netdev_priv(netdev);
186 for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) {
187 memcpy(p, nfp_net_et_stats[i].name, ETH_GSTRING_LEN);
188 p += ETH_GSTRING_LEN;
190 for (i = 0; i < nn->num_r_vecs; i++) {
191 sprintf(p, "rvec_%u_rx_pkts", i);
192 p += ETH_GSTRING_LEN;
193 sprintf(p, "rvec_%u_tx_pkts", i);
194 p += ETH_GSTRING_LEN;
195 sprintf(p, "rvec_%u_tx_busy", i);
196 p += ETH_GSTRING_LEN;
198 strncpy(p, "hw_rx_csum_ok", ETH_GSTRING_LEN);
199 p += ETH_GSTRING_LEN;
200 strncpy(p, "hw_rx_csum_inner_ok", ETH_GSTRING_LEN);
201 p += ETH_GSTRING_LEN;
202 strncpy(p, "hw_rx_csum_err", ETH_GSTRING_LEN);
203 p += ETH_GSTRING_LEN;
204 strncpy(p, "hw_tx_csum", ETH_GSTRING_LEN);
205 p += ETH_GSTRING_LEN;
206 strncpy(p, "hw_tx_inner_csum", ETH_GSTRING_LEN);
207 p += ETH_GSTRING_LEN;
208 strncpy(p, "tx_gather", ETH_GSTRING_LEN);
209 p += ETH_GSTRING_LEN;
210 strncpy(p, "tx_lso", ETH_GSTRING_LEN);
211 p += ETH_GSTRING_LEN;
212 for (i = 0; i < nn->num_tx_rings; i++) {
213 sprintf(p, "txq_%u_pkts", i);
214 p += ETH_GSTRING_LEN;
215 sprintf(p, "txq_%u_bytes", i);
216 p += ETH_GSTRING_LEN;
218 for (i = 0; i < nn->num_rx_rings; i++) {
219 sprintf(p, "rxq_%u_pkts", i);
220 p += ETH_GSTRING_LEN;
221 sprintf(p, "rxq_%u_bytes", i);
222 p += ETH_GSTRING_LEN;
228 static void nfp_net_get_stats(struct net_device *netdev,
229 struct ethtool_stats *stats, u64 *data)
231 u64 gathered_stats[NN_ET_RVEC_GATHER_STATS] = {};
232 struct nfp_net *nn = netdev_priv(netdev);
233 struct rtnl_link_stats64 *netdev_stats;
234 struct rtnl_link_stats64 temp = {};
235 u64 tmp[NN_ET_RVEC_GATHER_STATS];
240 netdev_stats = dev_get_stats(netdev, &temp);
242 for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) {
243 switch (nfp_net_et_stats[i].type) {
244 case NETDEV_ET_STATS:
245 p = (char *)netdev_stats + nfp_net_et_stats[i].off;
246 data[i] = nfp_net_et_stats[i].sz == sizeof(u64) ?
247 *(u64 *)p : *(u32 *)p;
250 case NFP_NET_DEV_ET_STATS:
251 io_p = nn->ctrl_bar + nfp_net_et_stats[i].off;
252 data[i] = readq(io_p);
256 for (j = 0; j < nn->num_r_vecs; j++) {
260 start = u64_stats_fetch_begin(&nn->r_vecs[j].rx_sync);
261 data[i++] = nn->r_vecs[j].rx_pkts;
262 tmp[0] = nn->r_vecs[j].hw_csum_rx_ok;
263 tmp[1] = nn->r_vecs[j].hw_csum_rx_inner_ok;
264 tmp[2] = nn->r_vecs[j].hw_csum_rx_error;
265 } while (u64_stats_fetch_retry(&nn->r_vecs[j].rx_sync, start));
268 start = u64_stats_fetch_begin(&nn->r_vecs[j].tx_sync);
269 data[i++] = nn->r_vecs[j].tx_pkts;
270 data[i++] = nn->r_vecs[j].tx_busy;
271 tmp[3] = nn->r_vecs[j].hw_csum_tx;
272 tmp[4] = nn->r_vecs[j].hw_csum_tx_inner;
273 tmp[5] = nn->r_vecs[j].tx_gather;
274 tmp[6] = nn->r_vecs[j].tx_lso;
275 } while (u64_stats_fetch_retry(&nn->r_vecs[j].tx_sync, start));
277 for (k = 0; k < NN_ET_RVEC_GATHER_STATS; k++)
278 gathered_stats[k] += tmp[k];
280 for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++)
281 data[i++] = gathered_stats[j];
282 for (j = 0; j < nn->num_tx_rings; j++) {
283 io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j);
284 data[i++] = readq(io_p);
285 io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j) + 8;
286 data[i++] = readq(io_p);
288 for (j = 0; j < nn->num_rx_rings; j++) {
289 io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j);
290 data[i++] = readq(io_p);
291 io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j) + 8;
292 data[i++] = readq(io_p);
296 static int nfp_net_get_sset_count(struct net_device *netdev, int sset)
298 struct nfp_net *nn = netdev_priv(netdev);
302 return NN_ET_STATS_LEN;
308 /* RX network flow classification (RSS, filters, etc)
310 static u32 ethtool_flow_to_nfp_flag(u32 flow_type)
312 static const u32 xlate_ethtool_to_nfp[IPV6_FLOW + 1] = {
313 [TCP_V4_FLOW] = NFP_NET_CFG_RSS_IPV4_TCP,
314 [TCP_V6_FLOW] = NFP_NET_CFG_RSS_IPV6_TCP,
315 [UDP_V4_FLOW] = NFP_NET_CFG_RSS_IPV4_UDP,
316 [UDP_V6_FLOW] = NFP_NET_CFG_RSS_IPV6_UDP,
317 [IPV4_FLOW] = NFP_NET_CFG_RSS_IPV4,
318 [IPV6_FLOW] = NFP_NET_CFG_RSS_IPV6,
321 if (flow_type >= ARRAY_SIZE(xlate_ethtool_to_nfp))
324 return xlate_ethtool_to_nfp[flow_type];
327 static int nfp_net_get_rss_hash_opts(struct nfp_net *nn,
328 struct ethtool_rxnfc *cmd)
334 if (!(nn->cap & NFP_NET_CFG_CTRL_RSS))
337 nfp_rss_flag = ethtool_flow_to_nfp_flag(cmd->flow_type);
341 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
342 if (nn->rss_cfg & nfp_rss_flag)
343 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
348 static int nfp_net_get_rxnfc(struct net_device *netdev,
349 struct ethtool_rxnfc *cmd, u32 *rule_locs)
351 struct nfp_net *nn = netdev_priv(netdev);
354 case ETHTOOL_GRXRINGS:
355 cmd->data = nn->num_rx_rings;
358 return nfp_net_get_rss_hash_opts(nn, cmd);
364 static int nfp_net_set_rss_hash_opt(struct nfp_net *nn,
365 struct ethtool_rxnfc *nfc)
367 u32 new_rss_cfg = nn->rss_cfg;
371 if (!(nn->cap & NFP_NET_CFG_CTRL_RSS))
374 /* RSS only supports IP SA/DA and L4 src/dst ports */
375 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
376 RXH_L4_B_0_1 | RXH_L4_B_2_3))
379 /* We need at least the IP SA/DA fields for hashing */
380 if (!(nfc->data & RXH_IP_SRC) ||
381 !(nfc->data & RXH_IP_DST))
384 nfp_rss_flag = ethtool_flow_to_nfp_flag(nfc->flow_type);
388 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
390 new_rss_cfg &= ~nfp_rss_flag;
392 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
393 new_rss_cfg |= nfp_rss_flag;
399 new_rss_cfg |= NFP_NET_CFG_RSS_TOEPLITZ;
400 new_rss_cfg |= NFP_NET_CFG_RSS_MASK;
402 if (new_rss_cfg == nn->rss_cfg)
405 writel(new_rss_cfg, nn->ctrl_bar + NFP_NET_CFG_RSS_CTRL);
406 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RSS);
410 nn->rss_cfg = new_rss_cfg;
412 nn_dbg(nn, "Changed RSS config to 0x%x\n", nn->rss_cfg);
416 static int nfp_net_set_rxnfc(struct net_device *netdev,
417 struct ethtool_rxnfc *cmd)
419 struct nfp_net *nn = netdev_priv(netdev);
423 return nfp_net_set_rss_hash_opt(nn, cmd);
429 static u32 nfp_net_get_rxfh_indir_size(struct net_device *netdev)
431 struct nfp_net *nn = netdev_priv(netdev);
433 if (!(nn->cap & NFP_NET_CFG_CTRL_RSS))
436 return ARRAY_SIZE(nn->rss_itbl);
439 static u32 nfp_net_get_rxfh_key_size(struct net_device *netdev)
441 return NFP_NET_CFG_RSS_KEY_SZ;
444 static int nfp_net_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
447 struct nfp_net *nn = netdev_priv(netdev);
450 if (!(nn->cap & NFP_NET_CFG_CTRL_RSS))
454 for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++)
455 indir[i] = nn->rss_itbl[i];
457 memcpy(key, nn->rss_key, NFP_NET_CFG_RSS_KEY_SZ);
459 *hfunc = ETH_RSS_HASH_TOP;
464 static int nfp_net_set_rxfh(struct net_device *netdev,
465 const u32 *indir, const u8 *key,
468 struct nfp_net *nn = netdev_priv(netdev);
471 if (!(nn->cap & NFP_NET_CFG_CTRL_RSS) ||
472 !(hfunc == ETH_RSS_HASH_NO_CHANGE || hfunc == ETH_RSS_HASH_TOP))
479 memcpy(nn->rss_key, key, NFP_NET_CFG_RSS_KEY_SZ);
480 nfp_net_rss_write_key(nn);
483 for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++)
484 nn->rss_itbl[i] = indir[i];
486 nfp_net_rss_write_itbl(nn);
489 return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RSS);
492 /* Dump BAR registers
494 static int nfp_net_get_regs_len(struct net_device *netdev)
496 return NFP_NET_CFG_BAR_SZ;
499 static void nfp_net_get_regs(struct net_device *netdev,
500 struct ethtool_regs *regs, void *p)
502 struct nfp_net *nn = netdev_priv(netdev);
506 regs->version = nn_readl(nn, NFP_NET_CFG_VERSION);
508 for (i = 0; i < NFP_NET_CFG_BAR_SZ / sizeof(u32); i++)
509 regs_buf[i] = readl(nn->ctrl_bar + (i * sizeof(u32)));
512 static int nfp_net_get_coalesce(struct net_device *netdev,
513 struct ethtool_coalesce *ec)
515 struct nfp_net *nn = netdev_priv(netdev);
517 if (!(nn->cap & NFP_NET_CFG_CTRL_IRQMOD))
520 ec->rx_coalesce_usecs = nn->rx_coalesce_usecs;
521 ec->rx_max_coalesced_frames = nn->rx_coalesce_max_frames;
522 ec->tx_coalesce_usecs = nn->tx_coalesce_usecs;
523 ec->tx_max_coalesced_frames = nn->tx_coalesce_max_frames;
528 static int nfp_net_set_coalesce(struct net_device *netdev,
529 struct ethtool_coalesce *ec)
531 struct nfp_net *nn = netdev_priv(netdev);
534 if (ec->rx_coalesce_usecs_irq ||
535 ec->rx_max_coalesced_frames_irq ||
536 ec->tx_coalesce_usecs_irq ||
537 ec->tx_max_coalesced_frames_irq ||
538 ec->stats_block_coalesce_usecs ||
539 ec->use_adaptive_rx_coalesce ||
540 ec->use_adaptive_tx_coalesce ||
542 ec->rx_coalesce_usecs_low ||
543 ec->rx_max_coalesced_frames_low ||
544 ec->tx_coalesce_usecs_low ||
545 ec->tx_max_coalesced_frames_low ||
547 ec->rx_coalesce_usecs_high ||
548 ec->rx_max_coalesced_frames_high ||
549 ec->tx_coalesce_usecs_high ||
550 ec->tx_max_coalesced_frames_high ||
551 ec->rate_sample_interval)
554 /* Compute factor used to convert coalesce '_usecs' parameters to
555 * ME timestamp ticks. There are 16 ME clock cycles for each timestamp
558 factor = nn->me_freq_mhz / 16;
560 /* Each pair of (usecs, max_frames) fields specifies that interrupts
561 * should be coalesced until
562 * (usecs > 0 && time_since_first_completion >= usecs) ||
563 * (max_frames > 0 && completed_frames >= max_frames)
565 * It is illegal to set both usecs and max_frames to zero as this would
566 * cause interrupts to never be generated. To disable coalescing, set
567 * usecs = 0 and max_frames = 1.
569 * Some implementations ignore the value of max_frames and use the
570 * condition time_since_first_completion >= usecs
573 if (!(nn->cap & NFP_NET_CFG_CTRL_IRQMOD))
576 /* ensure valid configuration */
577 if (!ec->rx_coalesce_usecs && !ec->rx_max_coalesced_frames)
580 if (!ec->tx_coalesce_usecs && !ec->tx_max_coalesced_frames)
583 if (ec->rx_coalesce_usecs * factor >= ((1 << 16) - 1))
586 if (ec->tx_coalesce_usecs * factor >= ((1 << 16) - 1))
589 if (ec->rx_max_coalesced_frames >= ((1 << 16) - 1))
592 if (ec->tx_max_coalesced_frames >= ((1 << 16) - 1))
595 /* configuration is valid */
596 nn->rx_coalesce_usecs = ec->rx_coalesce_usecs;
597 nn->rx_coalesce_max_frames = ec->rx_max_coalesced_frames;
598 nn->tx_coalesce_usecs = ec->tx_coalesce_usecs;
599 nn->tx_coalesce_max_frames = ec->tx_max_coalesced_frames;
601 /* write configuration to device */
602 nfp_net_coalesce_write_cfg(nn);
603 return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD);
606 static const struct ethtool_ops nfp_net_ethtool_ops = {
607 .get_drvinfo = nfp_net_get_drvinfo,
608 .get_link = ethtool_op_get_link,
609 .get_ringparam = nfp_net_get_ringparam,
610 .set_ringparam = nfp_net_set_ringparam,
611 .get_strings = nfp_net_get_strings,
612 .get_ethtool_stats = nfp_net_get_stats,
613 .get_sset_count = nfp_net_get_sset_count,
614 .get_rxnfc = nfp_net_get_rxnfc,
615 .set_rxnfc = nfp_net_set_rxnfc,
616 .get_rxfh_indir_size = nfp_net_get_rxfh_indir_size,
617 .get_rxfh_key_size = nfp_net_get_rxfh_key_size,
618 .get_rxfh = nfp_net_get_rxfh,
619 .set_rxfh = nfp_net_set_rxfh,
620 .get_regs_len = nfp_net_get_regs_len,
621 .get_regs = nfp_net_get_regs,
622 .get_coalesce = nfp_net_get_coalesce,
623 .set_coalesce = nfp_net_set_coalesce,
626 void nfp_net_set_ethtool_ops(struct net_device *netdev)
628 netdev->ethtool_ops = &nfp_net_ethtool_ops;