b78e296c4cba98ee8938e7acfe613475be7352f7
[platform/kernel/linux-exynos.git] / drivers / net / ethernet / cavium / liquidio / lio_ethtool.c
1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2016 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more details.
17  ***********************************************************************/
18 #include <linux/netdevice.h>
19 #include <linux/net_tstamp.h>
20 #include <linux/pci.h>
21 #include "liquidio_common.h"
22 #include "octeon_droq.h"
23 #include "octeon_iq.h"
24 #include "response_manager.h"
25 #include "octeon_device.h"
26 #include "octeon_nic.h"
27 #include "octeon_main.h"
28 #include "octeon_network.h"
29 #include "cn66xx_regs.h"
30 #include "cn66xx_device.h"
31 #include "cn23xx_pf_device.h"
32 #include "cn23xx_vf_device.h"
33
34 static int octnet_get_link_stats(struct net_device *netdev);
35
36 struct oct_intrmod_context {
37         int octeon_id;
38         wait_queue_head_t wc;
39         int cond;
40         int status;
41 };
42
43 struct oct_intrmod_resp {
44         u64     rh;
45         struct oct_intrmod_cfg intrmod;
46         u64     status;
47 };
48
49 struct oct_mdio_cmd_context {
50         int octeon_id;
51         wait_queue_head_t wc;
52         int cond;
53 };
54
55 struct oct_mdio_cmd_resp {
56         u64 rh;
57         struct oct_mdio_cmd resp;
58         u64 status;
59 };
60
61 #define OCT_MDIO45_RESP_SIZE   (sizeof(struct oct_mdio_cmd_resp))
62
63 /* Octeon's interface mode of operation */
64 enum {
65         INTERFACE_MODE_DISABLED,
66         INTERFACE_MODE_RGMII,
67         INTERFACE_MODE_GMII,
68         INTERFACE_MODE_SPI,
69         INTERFACE_MODE_PCIE,
70         INTERFACE_MODE_XAUI,
71         INTERFACE_MODE_SGMII,
72         INTERFACE_MODE_PICMG,
73         INTERFACE_MODE_NPI,
74         INTERFACE_MODE_LOOP,
75         INTERFACE_MODE_SRIO,
76         INTERFACE_MODE_ILK,
77         INTERFACE_MODE_RXAUI,
78         INTERFACE_MODE_QSGMII,
79         INTERFACE_MODE_AGL,
80         INTERFACE_MODE_XLAUI,
81         INTERFACE_MODE_XFI,
82         INTERFACE_MODE_10G_KR,
83         INTERFACE_MODE_40G_KR4,
84         INTERFACE_MODE_MIXED,
85 };
86
87 #define OCT_ETHTOOL_REGDUMP_LEN  4096
88 #define OCT_ETHTOOL_REGDUMP_LEN_23XX  (4096 * 11)
89 #define OCT_ETHTOOL_REGDUMP_LEN_23XX_VF  (4096 * 2)
90 #define OCT_ETHTOOL_REGSVER  1
91
92 /* statistics of PF */
93 static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
94         "rx_packets",
95         "tx_packets",
96         "rx_bytes",
97         "tx_bytes",
98         "rx_errors",    /*jabber_err+l2_err+frame_err */
99         "tx_errors",    /*fw_err_pko+fw_err_link+fw_err_drop */
100         "rx_dropped",   /*st->fromwire.total_rcvd - st->fromwire.fw_total_rcvd +
101                          *st->fromwire.dmac_drop + st->fromwire.fw_err_drop
102                          */
103         "tx_dropped",
104
105         "tx_total_sent",
106         "tx_total_fwd",
107         "tx_err_pko",
108         "tx_err_pki",
109         "tx_err_link",
110         "tx_err_drop",
111
112         "tx_tso",
113         "tx_tso_packets",
114         "tx_tso_err",
115         "tx_vxlan",
116
117         "mac_tx_total_pkts",
118         "mac_tx_total_bytes",
119         "mac_tx_mcast_pkts",
120         "mac_tx_bcast_pkts",
121         "mac_tx_ctl_packets",   /*oct->link_stats.fromhost.ctl_sent */
122         "mac_tx_total_collisions",
123         "mac_tx_one_collision",
124         "mac_tx_multi_collison",
125         "mac_tx_max_collision_fail",
126         "mac_tx_max_deferal_fail",
127         "mac_tx_fifo_err",
128         "mac_tx_runts",
129
130         "rx_total_rcvd",
131         "rx_total_fwd",
132         "rx_jabber_err",
133         "rx_l2_err",
134         "rx_frame_err",
135         "rx_err_pko",
136         "rx_err_link",
137         "rx_err_drop",
138
139         "rx_vxlan",
140         "rx_vxlan_err",
141
142         "rx_lro_pkts",
143         "rx_lro_bytes",
144         "rx_total_lro",
145
146         "rx_lro_aborts",
147         "rx_lro_aborts_port",
148         "rx_lro_aborts_seq",
149         "rx_lro_aborts_tsval",
150         "rx_lro_aborts_timer",
151         "rx_fwd_rate",
152
153         "mac_rx_total_rcvd",
154         "mac_rx_bytes",
155         "mac_rx_total_bcst",
156         "mac_rx_total_mcst",
157         "mac_rx_runts",
158         "mac_rx_ctl_packets",
159         "mac_rx_fifo_err",
160         "mac_rx_dma_drop",
161         "mac_rx_fcs_err",
162
163         "link_state_changes",
164 };
165
166 /* statistics of VF */
167 static const char oct_vf_stats_strings[][ETH_GSTRING_LEN] = {
168         "rx_packets",
169         "tx_packets",
170         "rx_bytes",
171         "tx_bytes",
172         "rx_errors", /* jabber_err + l2_err+frame_err */
173         "tx_errors", /* fw_err_pko + fw_err_link+fw_err_drop */
174         "rx_dropped", /* total_rcvd - fw_total_rcvd + dmac_drop + fw_err_drop */
175         "tx_dropped",
176         "link_state_changes",
177 };
178
179 /* statistics of host tx queue */
180 static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
181         "packets",              /*oct->instr_queue[iq_no]->stats.tx_done*/
182         "bytes",                /*oct->instr_queue[iq_no]->stats.tx_tot_bytes*/
183         "dropped",
184         "iq_busy",
185         "sgentry_sent",
186
187         "fw_instr_posted",
188         "fw_instr_processed",
189         "fw_instr_dropped",
190         "fw_bytes_sent",
191
192         "tso",
193         "vxlan",
194         "txq_restart",
195 };
196
197 /* statistics of host rx queue */
198 static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
199         "packets",              /*oct->droq[oq_no]->stats.rx_pkts_received */
200         "bytes",                /*oct->droq[oq_no]->stats.rx_bytes_received */
201         "dropped",              /*oct->droq[oq_no]->stats.rx_dropped+
202                                  *oct->droq[oq_no]->stats.dropped_nodispatch+
203                                  *oct->droq[oq_no]->stats.dropped_toomany+
204                                  *oct->droq[oq_no]->stats.dropped_nomem
205                                  */
206         "dropped_nomem",
207         "dropped_toomany",
208         "fw_dropped",
209         "fw_pkts_received",
210         "fw_bytes_received",
211         "fw_dropped_nodispatch",
212
213         "vxlan",
214         "buffer_alloc_failure",
215 };
216
217 /* LiquidIO driver private flags */
218 static const char oct_priv_flags_strings[][ETH_GSTRING_LEN] = {
219 };
220
221 #define OCTNIC_NCMD_AUTONEG_ON  0x1
222 #define OCTNIC_NCMD_PHY_ON      0x2
223
224 static int lio_get_link_ksettings(struct net_device *netdev,
225                                   struct ethtool_link_ksettings *ecmd)
226 {
227         struct lio *lio = GET_LIO(netdev);
228         struct octeon_device *oct = lio->oct_dev;
229         struct oct_link_info *linfo;
230         u32 supported = 0, advertising = 0;
231
232         linfo = &lio->linfo;
233
234         if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
235             linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
236             linfo->link.s.if_mode == INTERFACE_MODE_XLAUI ||
237             linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
238                 ecmd->base.port = PORT_FIBRE;
239
240                 if (linfo->link.s.speed == SPEED_10000) {
241                         supported = SUPPORTED_10000baseT_Full;
242                         advertising = ADVERTISED_10000baseT_Full;
243                 }
244
245                 supported |= SUPPORTED_FIBRE | SUPPORTED_Pause;
246                 advertising |= ADVERTISED_Pause;
247                 ethtool_convert_legacy_u32_to_link_mode(
248                         ecmd->link_modes.supported, supported);
249                 ethtool_convert_legacy_u32_to_link_mode(
250                         ecmd->link_modes.advertising, advertising);
251                 ecmd->base.autoneg = AUTONEG_DISABLE;
252
253         } else {
254                 dev_err(&oct->pci_dev->dev, "Unknown link interface reported %d\n",
255                         linfo->link.s.if_mode);
256         }
257
258         if (linfo->link.s.link_up) {
259                 ecmd->base.speed = linfo->link.s.speed;
260                 ecmd->base.duplex = linfo->link.s.duplex;
261         } else {
262                 ecmd->base.speed = SPEED_UNKNOWN;
263                 ecmd->base.duplex = DUPLEX_UNKNOWN;
264         }
265
266         return 0;
267 }
268
269 static void
270 lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
271 {
272         struct lio *lio;
273         struct octeon_device *oct;
274
275         lio = GET_LIO(netdev);
276         oct = lio->oct_dev;
277
278         memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
279         strcpy(drvinfo->driver, "liquidio");
280         strcpy(drvinfo->version, LIQUIDIO_VERSION);
281         strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
282                 ETHTOOL_FWVERS_LEN);
283         strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
284 }
285
286 static void
287 lio_get_vf_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
288 {
289         struct octeon_device *oct;
290         struct lio *lio;
291
292         lio = GET_LIO(netdev);
293         oct = lio->oct_dev;
294
295         memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
296         strcpy(drvinfo->driver, "liquidio_vf");
297         strcpy(drvinfo->version, LIQUIDIO_VERSION);
298         strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
299                 ETHTOOL_FWVERS_LEN);
300         strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
301 }
302
303 static void
304 lio_ethtool_get_channels(struct net_device *dev,
305                          struct ethtool_channels *channel)
306 {
307         struct lio *lio = GET_LIO(dev);
308         struct octeon_device *oct = lio->oct_dev;
309         u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0;
310
311         if (OCTEON_CN6XXX(oct)) {
312                 struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
313
314                 max_rx = CFG_GET_OQ_MAX_Q(conf6x);
315                 max_tx = CFG_GET_IQ_MAX_Q(conf6x);
316                 rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
317                 tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
318         } else if (OCTEON_CN23XX_PF(oct)) {
319
320                 max_rx = oct->sriov_info.num_pf_rings;
321                 max_tx = oct->sriov_info.num_pf_rings;
322                 rx_count = lio->linfo.num_rxpciq;
323                 tx_count = lio->linfo.num_txpciq;
324         } else if (OCTEON_CN23XX_VF(oct)) {
325                 max_tx = oct->sriov_info.rings_per_vf;
326                 max_rx = oct->sriov_info.rings_per_vf;
327                 rx_count = lio->linfo.num_rxpciq;
328                 tx_count = lio->linfo.num_txpciq;
329         }
330
331         channel->max_rx = max_rx;
332         channel->max_tx = max_tx;
333         channel->rx_count = rx_count;
334         channel->tx_count = tx_count;
335 }
336
337 static int lio_get_eeprom_len(struct net_device *netdev)
338 {
339         u8 buf[192];
340         struct lio *lio = GET_LIO(netdev);
341         struct octeon_device *oct_dev = lio->oct_dev;
342         struct octeon_board_info *board_info;
343         int len;
344
345         board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
346         len = sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n",
347                       board_info->name, board_info->serial_number,
348                       board_info->major, board_info->minor);
349
350         return len;
351 }
352
353 static int
354 lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
355                u8 *bytes)
356 {
357         struct lio *lio = GET_LIO(netdev);
358         struct octeon_device *oct_dev = lio->oct_dev;
359         struct octeon_board_info *board_info;
360
361         if (eeprom->offset)
362                 return -EINVAL;
363
364         eeprom->magic = oct_dev->pci_dev->vendor;
365         board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
366         sprintf((char *)bytes,
367                 "boardname:%s serialnum:%s maj:%lld min:%lld\n",
368                 board_info->name, board_info->serial_number,
369                 board_info->major, board_info->minor);
370
371         return 0;
372 }
373
374 static int octnet_gpio_access(struct net_device *netdev, int addr, int val)
375 {
376         struct lio *lio = GET_LIO(netdev);
377         struct octeon_device *oct = lio->oct_dev;
378         struct octnic_ctrl_pkt nctrl;
379         int ret = 0;
380
381         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
382
383         nctrl.ncmd.u64 = 0;
384         nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS;
385         nctrl.ncmd.s.param1 = addr;
386         nctrl.ncmd.s.param2 = val;
387         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
388         nctrl.wait_time = 100;
389         nctrl.netpndev = (u64)netdev;
390         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
391
392         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
393         if (ret < 0) {
394                 dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
395                 return -EINVAL;
396         }
397
398         return 0;
399 }
400
401 static int octnet_id_active(struct net_device *netdev, int val)
402 {
403         struct lio *lio = GET_LIO(netdev);
404         struct octeon_device *oct = lio->oct_dev;
405         struct octnic_ctrl_pkt nctrl;
406         int ret = 0;
407
408         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
409
410         nctrl.ncmd.u64 = 0;
411         nctrl.ncmd.s.cmd = OCTNET_CMD_ID_ACTIVE;
412         nctrl.ncmd.s.param1 = val;
413         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
414         nctrl.wait_time = 100;
415         nctrl.netpndev = (u64)netdev;
416         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
417
418         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
419         if (ret < 0) {
420                 dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
421                 return -EINVAL;
422         }
423
424         return 0;
425 }
426
427 /* Callback for when mdio command response arrives
428  */
429 static void octnet_mdio_resp_callback(struct octeon_device *oct,
430                                       u32 status,
431                                       void *buf)
432 {
433         struct oct_mdio_cmd_context *mdio_cmd_ctx;
434         struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
435
436         mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
437
438         oct = lio_get_device(mdio_cmd_ctx->octeon_id);
439         if (status) {
440                 dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n",
441                         CVM_CAST64(status));
442                 WRITE_ONCE(mdio_cmd_ctx->cond, -1);
443         } else {
444                 WRITE_ONCE(mdio_cmd_ctx->cond, 1);
445         }
446         wake_up_interruptible(&mdio_cmd_ctx->wc);
447 }
448
449 /* This routine provides PHY access routines for
450  * mdio  clause45 .
451  */
452 static int
453 octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
454 {
455         struct octeon_device *oct_dev = lio->oct_dev;
456         struct octeon_soft_command *sc;
457         struct oct_mdio_cmd_resp *mdio_cmd_rsp;
458         struct oct_mdio_cmd_context *mdio_cmd_ctx;
459         struct oct_mdio_cmd *mdio_cmd;
460         int retval = 0;
461
462         sc = (struct octeon_soft_command *)
463                 octeon_alloc_soft_command(oct_dev,
464                                           sizeof(struct oct_mdio_cmd),
465                                           sizeof(struct oct_mdio_cmd_resp),
466                                           sizeof(struct oct_mdio_cmd_context));
467
468         if (!sc)
469                 return -ENOMEM;
470
471         mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
472         mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
473         mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr;
474
475         WRITE_ONCE(mdio_cmd_ctx->cond, 0);
476         mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev);
477         mdio_cmd->op = op;
478         mdio_cmd->mdio_addr = loc;
479         if (op)
480                 mdio_cmd->value1 = *value;
481         octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8);
482
483         sc->iq_no = lio->linfo.txpciq[0].s.q_no;
484
485         octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45,
486                                     0, 0, 0);
487
488         sc->wait_time = 1000;
489         sc->callback = octnet_mdio_resp_callback;
490         sc->callback_arg = sc;
491
492         init_waitqueue_head(&mdio_cmd_ctx->wc);
493
494         retval = octeon_send_soft_command(oct_dev, sc);
495
496         if (retval == IQ_SEND_FAILED) {
497                 dev_err(&oct_dev->pci_dev->dev,
498                         "octnet_mdio45_access instruction failed status: %x\n",
499                         retval);
500                 retval = -EBUSY;
501         } else {
502                 /* Sleep on a wait queue till the cond flag indicates that the
503                  * response arrived
504                  */
505                 sleep_cond(&mdio_cmd_ctx->wc, &mdio_cmd_ctx->cond);
506                 retval = mdio_cmd_rsp->status;
507                 if (retval) {
508                         dev_err(&oct_dev->pci_dev->dev, "octnet mdio45 access failed\n");
509                         retval = -EBUSY;
510                 } else {
511                         octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp),
512                                             sizeof(struct oct_mdio_cmd) / 8);
513
514                         if (READ_ONCE(mdio_cmd_ctx->cond) == 1) {
515                                 if (!op)
516                                         *value = mdio_cmd_rsp->resp.value1;
517                         } else {
518                                 retval = -EINVAL;
519                         }
520                 }
521         }
522
523         octeon_free_soft_command(oct_dev, sc);
524
525         return retval;
526 }
527
528 static int lio_set_phys_id(struct net_device *netdev,
529                            enum ethtool_phys_id_state state)
530 {
531         struct lio *lio = GET_LIO(netdev);
532         struct octeon_device *oct = lio->oct_dev;
533         int value, ret;
534
535         switch (state) {
536         case ETHTOOL_ID_ACTIVE:
537                 if (oct->chip_id == OCTEON_CN66XX) {
538                         octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
539                                            VITESSE_PHY_GPIO_DRIVEON);
540                         return 2;
541
542                 } else if (oct->chip_id == OCTEON_CN68XX) {
543                         /* Save the current LED settings */
544                         ret = octnet_mdio45_access(lio, 0,
545                                                    LIO68XX_LED_BEACON_ADDR,
546                                                    &lio->phy_beacon_val);
547                         if (ret)
548                                 return ret;
549
550                         ret = octnet_mdio45_access(lio, 0,
551                                                    LIO68XX_LED_CTRL_ADDR,
552                                                    &lio->led_ctrl_val);
553                         if (ret)
554                                 return ret;
555
556                         /* Configure Beacon values */
557                         value = LIO68XX_LED_BEACON_CFGON;
558                         ret = octnet_mdio45_access(lio, 1,
559                                                    LIO68XX_LED_BEACON_ADDR,
560                                                    &value);
561                         if (ret)
562                                 return ret;
563
564                         value = LIO68XX_LED_CTRL_CFGON;
565                         ret = octnet_mdio45_access(lio, 1,
566                                                    LIO68XX_LED_CTRL_ADDR,
567                                                    &value);
568                         if (ret)
569                                 return ret;
570                 } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
571                         octnet_id_active(netdev, LED_IDENTIFICATION_ON);
572
573                         /* returns 0 since updates are asynchronous */
574                         return 0;
575                 } else {
576                         return -EINVAL;
577                 }
578                 break;
579
580         case ETHTOOL_ID_ON:
581                 if (oct->chip_id == OCTEON_CN66XX) {
582                         octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
583                                            VITESSE_PHY_GPIO_HIGH);
584
585                 } else if (oct->chip_id == OCTEON_CN68XX) {
586                         return -EINVAL;
587                 } else {
588                         return -EINVAL;
589                 }
590                 break;
591
592         case ETHTOOL_ID_OFF:
593                 if (oct->chip_id == OCTEON_CN66XX)
594                         octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
595                                            VITESSE_PHY_GPIO_LOW);
596                 else if (oct->chip_id == OCTEON_CN68XX)
597                         return -EINVAL;
598                 else
599                         return -EINVAL;
600
601                 break;
602
603         case ETHTOOL_ID_INACTIVE:
604                 if (oct->chip_id == OCTEON_CN66XX) {
605                         octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
606                                            VITESSE_PHY_GPIO_DRIVEOFF);
607                 } else if (oct->chip_id == OCTEON_CN68XX) {
608                         /* Restore LED settings */
609                         ret = octnet_mdio45_access(lio, 1,
610                                                    LIO68XX_LED_CTRL_ADDR,
611                                                    &lio->led_ctrl_val);
612                         if (ret)
613                                 return ret;
614
615                         ret = octnet_mdio45_access(lio, 1,
616                                                    LIO68XX_LED_BEACON_ADDR,
617                                                    &lio->phy_beacon_val);
618                         if (ret)
619                                 return ret;
620                 } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
621                         octnet_id_active(netdev, LED_IDENTIFICATION_OFF);
622
623                         return 0;
624                 } else {
625                         return -EINVAL;
626                 }
627                 break;
628
629         default:
630                 return -EINVAL;
631         }
632
633         return 0;
634 }
635
636 static void
637 lio_ethtool_get_ringparam(struct net_device *netdev,
638                           struct ethtool_ringparam *ering)
639 {
640         struct lio *lio = GET_LIO(netdev);
641         struct octeon_device *oct = lio->oct_dev;
642         u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0,
643             rx_pending = 0;
644
645         if (OCTEON_CN6XXX(oct)) {
646                 struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
647
648                 tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS;
649                 rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS;
650                 rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx);
651                 tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
652         } else if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) {
653                 tx_max_pending = CN23XX_MAX_IQ_DESCRIPTORS;
654                 rx_max_pending = CN23XX_MAX_OQ_DESCRIPTORS;
655                 rx_pending = oct->droq[0]->max_count;
656                 tx_pending = oct->instr_queue[0]->max_count;
657         }
658
659         ering->tx_pending = tx_pending;
660         ering->tx_max_pending = tx_max_pending;
661         ering->rx_pending = rx_pending;
662         ering->rx_max_pending = rx_max_pending;
663         ering->rx_mini_pending = 0;
664         ering->rx_jumbo_pending = 0;
665         ering->rx_mini_max_pending = 0;
666         ering->rx_jumbo_max_pending = 0;
667 }
668
669 static u32 lio_get_msglevel(struct net_device *netdev)
670 {
671         struct lio *lio = GET_LIO(netdev);
672
673         return lio->msg_enable;
674 }
675
676 static void lio_set_msglevel(struct net_device *netdev, u32 msglvl)
677 {
678         struct lio *lio = GET_LIO(netdev);
679
680         if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) {
681                 if (msglvl & NETIF_MSG_HW)
682                         liquidio_set_feature(netdev,
683                                              OCTNET_CMD_VERBOSE_ENABLE, 0);
684                 else
685                         liquidio_set_feature(netdev,
686                                              OCTNET_CMD_VERBOSE_DISABLE, 0);
687         }
688
689         lio->msg_enable = msglvl;
690 }
691
692 static void lio_vf_set_msglevel(struct net_device *netdev, u32 msglvl)
693 {
694         struct lio *lio = GET_LIO(netdev);
695
696         lio->msg_enable = msglvl;
697 }
698
699 static void
700 lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
701 {
702         /* Notes: Not supporting any auto negotiation in these
703          * drivers. Just report pause frame support.
704          */
705         struct lio *lio = GET_LIO(netdev);
706         struct octeon_device *oct = lio->oct_dev;
707
708         pause->autoneg = 0;
709
710         pause->tx_pause = oct->tx_pause;
711         pause->rx_pause = oct->rx_pause;
712 }
713
714 static int
715 lio_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
716 {
717         /* Notes: Not supporting any auto negotiation in these
718          * drivers.
719          */
720         struct lio *lio = GET_LIO(netdev);
721         struct octeon_device *oct = lio->oct_dev;
722         struct octnic_ctrl_pkt nctrl;
723         struct oct_link_info *linfo = &lio->linfo;
724
725         int ret = 0;
726
727         if (oct->chip_id != OCTEON_CN23XX_PF_VID)
728                 return -EINVAL;
729
730         if (linfo->link.s.duplex == 0) {
731                 /*no flow control for half duplex*/
732                 if (pause->rx_pause || pause->tx_pause)
733                         return -EINVAL;
734         }
735
736         /*do not support autoneg of link flow control*/
737         if (pause->autoneg == AUTONEG_ENABLE)
738                 return -EINVAL;
739
740         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
741
742         nctrl.ncmd.u64 = 0;
743         nctrl.ncmd.s.cmd = OCTNET_CMD_SET_FLOW_CTL;
744         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
745         nctrl.wait_time = 100;
746         nctrl.netpndev = (u64)netdev;
747         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
748
749         if (pause->rx_pause) {
750                 /*enable rx pause*/
751                 nctrl.ncmd.s.param1 = 1;
752         } else {
753                 /*disable rx pause*/
754                 nctrl.ncmd.s.param1 = 0;
755         }
756
757         if (pause->tx_pause) {
758                 /*enable tx pause*/
759                 nctrl.ncmd.s.param2 = 1;
760         } else {
761                 /*disable tx pause*/
762                 nctrl.ncmd.s.param2 = 0;
763         }
764
765         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
766         if (ret < 0) {
767                 dev_err(&oct->pci_dev->dev, "Failed to set pause parameter\n");
768                 return -EINVAL;
769         }
770
771         oct->rx_pause = pause->rx_pause;
772         oct->tx_pause = pause->tx_pause;
773
774         return 0;
775 }
776
777 static void
778 lio_get_ethtool_stats(struct net_device *netdev,
779                       struct ethtool_stats *stats  __attribute__((unused)),
780                       u64 *data)
781 {
782         struct lio *lio = GET_LIO(netdev);
783         struct octeon_device *oct_dev = lio->oct_dev;
784         struct net_device_stats *netstats = &netdev->stats;
785         int i = 0, j;
786
787         netdev->netdev_ops->ndo_get_stats(netdev);
788         octnet_get_link_stats(netdev);
789
790         /*sum of oct->droq[oq_no]->stats->rx_pkts_received */
791         data[i++] = CVM_CAST64(netstats->rx_packets);
792         /*sum of oct->instr_queue[iq_no]->stats.tx_done */
793         data[i++] = CVM_CAST64(netstats->tx_packets);
794         /*sum of oct->droq[oq_no]->stats->rx_bytes_received */
795         data[i++] = CVM_CAST64(netstats->rx_bytes);
796         /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
797         data[i++] = CVM_CAST64(netstats->tx_bytes);
798         data[i++] = CVM_CAST64(netstats->rx_errors);
799         data[i++] = CVM_CAST64(netstats->tx_errors);
800         /*sum of oct->droq[oq_no]->stats->rx_dropped +
801          *oct->droq[oq_no]->stats->dropped_nodispatch +
802          *oct->droq[oq_no]->stats->dropped_toomany +
803          *oct->droq[oq_no]->stats->dropped_nomem
804          */
805         data[i++] = CVM_CAST64(netstats->rx_dropped);
806         /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */
807         data[i++] = CVM_CAST64(netstats->tx_dropped);
808
809         /* firmware tx stats */
810         /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx].
811          *fromhost.fw_total_sent
812          */
813         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_sent);
814         /*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */
815         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd);
816         /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */
817         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko);
818         /*per_core_stats[j].link_stats[i].fromhost.fw_err_pki */
819         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pki);
820         /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */
821         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link);
822         /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
823          *fw_err_drop
824          */
825         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_drop);
826
827         /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */
828         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso);
829         /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
830          *fw_tso_fwd
831          */
832         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso_fwd);
833         /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
834          *fw_err_tso
835          */
836         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso);
837         /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
838          *fw_tx_vxlan
839          */
840         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan);
841
842         /* mac tx statistics */
843         /*CVMX_BGXX_CMRX_TX_STAT5 */
844         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent);
845         /*CVMX_BGXX_CMRX_TX_STAT4 */
846         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_bytes_sent);
847         /*CVMX_BGXX_CMRX_TX_STAT15 */
848         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.mcast_pkts_sent);
849         /*CVMX_BGXX_CMRX_TX_STAT14 */
850         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.bcast_pkts_sent);
851         /*CVMX_BGXX_CMRX_TX_STAT17 */
852         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.ctl_sent);
853         /*CVMX_BGXX_CMRX_TX_STAT0 */
854         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_collisions);
855         /*CVMX_BGXX_CMRX_TX_STAT3 */
856         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.one_collision_sent);
857         /*CVMX_BGXX_CMRX_TX_STAT2 */
858         data[i++] =
859                 CVM_CAST64(oct_dev->link_stats.fromhost.multi_collision_sent);
860         /*CVMX_BGXX_CMRX_TX_STAT0 */
861         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_collision_fail);
862         /*CVMX_BGXX_CMRX_TX_STAT1 */
863         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_deferral_fail);
864         /*CVMX_BGXX_CMRX_TX_STAT16 */
865         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fifo_err);
866         /*CVMX_BGXX_CMRX_TX_STAT6 */
867         data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.runts);
868
869         /* RX firmware stats */
870         /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
871          *fw_total_rcvd
872          */
873         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_rcvd);
874         /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
875          *fw_total_fwd
876          */
877         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd);
878         /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */
879         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err);
880         /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */
881         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.l2_err);
882         /*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */
883         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.frame_err);
884         /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
885          *fw_err_pko
886          */
887         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_pko);
888         /*per_core_stats[j].link_stats[i].fromwire.fw_err_link */
889         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_link);
890         /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
891          *fromwire.fw_err_drop
892          */
893         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop);
894
895         /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
896          *fromwire.fw_rx_vxlan
897          */
898         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan);
899         /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
900          *fromwire.fw_rx_vxlan_err
901          */
902         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan_err);
903
904         /* LRO */
905         /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
906          *fw_lro_pkts
907          */
908         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_pkts);
909         /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
910          *fw_lro_octs
911          */
912         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_octs);
913         /*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */
914         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_lro);
915         /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
916         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts);
917         /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
918          *fw_lro_aborts_port
919          */
920         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_port);
921         /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
922          *fw_lro_aborts_seq
923          */
924         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_seq);
925         /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
926          *fw_lro_aborts_tsval
927          */
928         data[i++] =
929                 CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_tsval);
930         /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
931          *fw_lro_aborts_timer
932          */
933         /* intrmod: packet forward rate */
934         data[i++] =
935                 CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_timer);
936         /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
937         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fwd_rate);
938
939         /* mac: link-level stats */
940         /*CVMX_BGXX_CMRX_RX_STAT0 */
941         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_rcvd);
942         /*CVMX_BGXX_CMRX_RX_STAT1 */
943         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.bytes_rcvd);
944         /*CVMX_PKI_STATX_STAT5 */
945         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_bcst);
946         /*CVMX_PKI_STATX_STAT5 */
947         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_mcst);
948         /*wqe->word2.err_code or wqe->word2.err_level */
949         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.runts);
950         /*CVMX_BGXX_CMRX_RX_STAT2 */
951         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.ctl_rcvd);
952         /*CVMX_BGXX_CMRX_RX_STAT6 */
953         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fifo_err);
954         /*CVMX_BGXX_CMRX_RX_STAT4 */
955         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.dmac_drop);
956         /*wqe->word2.err_code or wqe->word2.err_level */
957         data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fcs_err);
958         /*lio->link_changes*/
959         data[i++] = CVM_CAST64(lio->link_changes);
960
961         for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) {
962                 if (!(oct_dev->io_qmask.iq & BIT_ULL(j)))
963                         continue;
964                 /*packets to network port*/
965                 /*# of packets tx to network */
966                 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
967                 /*# of bytes tx to network */
968                 data[i++] =
969                         CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes);
970                 /*# of packets dropped */
971                 data[i++] =
972                         CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped);
973                 /*# of tx fails due to queue full */
974                 data[i++] =
975                         CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy);
976                 /*XXX gather entries sent */
977                 data[i++] =
978                         CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent);
979
980                 /*instruction to firmware: data and control */
981                 /*# of instructions to the queue */
982                 data[i++] =
983                         CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted);
984                 /*# of instructions processed */
985                 data[i++] = CVM_CAST64(
986                                 oct_dev->instr_queue[j]->stats.instr_processed);
987                 /*# of instructions could not be processed */
988                 data[i++] = CVM_CAST64(
989                                 oct_dev->instr_queue[j]->stats.instr_dropped);
990                 /*bytes sent through the queue */
991                 data[i++] =
992                         CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent);
993
994                 /*tso request*/
995                 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
996                 /*vxlan request*/
997                 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
998                 /*txq restart*/
999                 data[i++] =
1000                         CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart);
1001         }
1002
1003         /* RX */
1004         for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) {
1005                 if (!(oct_dev->io_qmask.oq & BIT_ULL(j)))
1006                         continue;
1007
1008                 /*packets send to TCP/IP network stack */
1009                 /*# of packets to network stack */
1010                 data[i++] =
1011                         CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received);
1012                 /*# of bytes to network stack */
1013                 data[i++] =
1014                         CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received);
1015                 /*# of packets dropped */
1016                 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
1017                                        oct_dev->droq[j]->stats.dropped_toomany +
1018                                        oct_dev->droq[j]->stats.rx_dropped);
1019                 data[i++] =
1020                         CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
1021                 data[i++] =
1022                         CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
1023                 data[i++] =
1024                         CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
1025
1026                 /*control and data path*/
1027                 data[i++] =
1028                         CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
1029                 data[i++] =
1030                         CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
1031                 data[i++] =
1032                         CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
1033
1034                 data[i++] =
1035                         CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
1036                 data[i++] =
1037                         CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
1038         }
1039 }
1040
1041 static void lio_vf_get_ethtool_stats(struct net_device *netdev,
1042                                      struct ethtool_stats *stats
1043                                      __attribute__((unused)),
1044                                      u64 *data)
1045 {
1046         struct net_device_stats *netstats = &netdev->stats;
1047         struct lio *lio = GET_LIO(netdev);
1048         struct octeon_device *oct_dev = lio->oct_dev;
1049         int i = 0, j, vj;
1050
1051         netdev->netdev_ops->ndo_get_stats(netdev);
1052         /* sum of oct->droq[oq_no]->stats->rx_pkts_received */
1053         data[i++] = CVM_CAST64(netstats->rx_packets);
1054         /* sum of oct->instr_queue[iq_no]->stats.tx_done */
1055         data[i++] = CVM_CAST64(netstats->tx_packets);
1056         /* sum of oct->droq[oq_no]->stats->rx_bytes_received */
1057         data[i++] = CVM_CAST64(netstats->rx_bytes);
1058         /* sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
1059         data[i++] = CVM_CAST64(netstats->tx_bytes);
1060         data[i++] = CVM_CAST64(netstats->rx_errors);
1061         data[i++] = CVM_CAST64(netstats->tx_errors);
1062          /* sum of oct->droq[oq_no]->stats->rx_dropped +
1063           * oct->droq[oq_no]->stats->dropped_nodispatch +
1064           * oct->droq[oq_no]->stats->dropped_toomany +
1065           * oct->droq[oq_no]->stats->dropped_nomem
1066           */
1067         data[i++] = CVM_CAST64(netstats->rx_dropped);
1068         /* sum of oct->instr_queue[iq_no]->stats.tx_dropped */
1069         data[i++] = CVM_CAST64(netstats->tx_dropped);
1070         /* lio->link_changes */
1071         data[i++] = CVM_CAST64(lio->link_changes);
1072
1073         for (vj = 0; vj < lio->linfo.num_txpciq; vj++) {
1074                 j = lio->linfo.txpciq[vj].s.q_no;
1075
1076                 /* packets to network port */
1077                 /* # of packets tx to network */
1078                 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
1079                  /* # of bytes tx to network */
1080                 data[i++] = CVM_CAST64(
1081                                 oct_dev->instr_queue[j]->stats.tx_tot_bytes);
1082                 /* # of packets dropped */
1083                 data[i++] = CVM_CAST64(
1084                                 oct_dev->instr_queue[j]->stats.tx_dropped);
1085                 /* # of tx fails due to queue full */
1086                 data[i++] = CVM_CAST64(
1087                                 oct_dev->instr_queue[j]->stats.tx_iq_busy);
1088                 /* XXX gather entries sent */
1089                 data[i++] = CVM_CAST64(
1090                                 oct_dev->instr_queue[j]->stats.sgentry_sent);
1091
1092                 /* instruction to firmware: data and control */
1093                 /* # of instructions to the queue */
1094                 data[i++] = CVM_CAST64(
1095                                 oct_dev->instr_queue[j]->stats.instr_posted);
1096                 /* # of instructions processed */
1097                 data[i++] =
1098                     CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_processed);
1099                 /* # of instructions could not be processed */
1100                 data[i++] =
1101                     CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_dropped);
1102                 /* bytes sent through the queue */
1103                 data[i++] = CVM_CAST64(
1104                                 oct_dev->instr_queue[j]->stats.bytes_sent);
1105                 /* tso request */
1106                 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
1107                 /* vxlan request */
1108                 data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
1109                 /* txq restart */
1110                 data[i++] = CVM_CAST64(
1111                                 oct_dev->instr_queue[j]->stats.tx_restart);
1112         }
1113
1114         /* RX */
1115         for (vj = 0; vj < lio->linfo.num_rxpciq; vj++) {
1116                 j = lio->linfo.rxpciq[vj].s.q_no;
1117
1118                 /* packets send to TCP/IP network stack */
1119                 /* # of packets to network stack */
1120                 data[i++] = CVM_CAST64(
1121                                 oct_dev->droq[j]->stats.rx_pkts_received);
1122                 /* # of bytes to network stack */
1123                 data[i++] = CVM_CAST64(
1124                                 oct_dev->droq[j]->stats.rx_bytes_received);
1125                 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
1126                                        oct_dev->droq[j]->stats.dropped_toomany +
1127                                        oct_dev->droq[j]->stats.rx_dropped);
1128                 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
1129                 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
1130                 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
1131
1132                 /* control and data path */
1133                 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
1134                 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
1135                 data[i++] =
1136                         CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
1137
1138                 data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
1139                 data[i++] =
1140                     CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
1141         }
1142 }
1143
1144 static void lio_get_priv_flags_strings(struct lio *lio, u8 *data)
1145 {
1146         struct octeon_device *oct_dev = lio->oct_dev;
1147         int i;
1148
1149         switch (oct_dev->chip_id) {
1150         case OCTEON_CN23XX_PF_VID:
1151         case OCTEON_CN23XX_VF_VID:
1152                 for (i = 0; i < ARRAY_SIZE(oct_priv_flags_strings); i++) {
1153                         sprintf(data, "%s", oct_priv_flags_strings[i]);
1154                         data += ETH_GSTRING_LEN;
1155                 }
1156                 break;
1157         case OCTEON_CN68XX:
1158         case OCTEON_CN66XX:
1159                 break;
1160         default:
1161                 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
1162                 break;
1163         }
1164 }
1165
1166 static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
1167 {
1168         struct lio *lio = GET_LIO(netdev);
1169         struct octeon_device *oct_dev = lio->oct_dev;
1170         int num_iq_stats, num_oq_stats, i, j;
1171         int num_stats;
1172
1173         switch (stringset) {
1174         case ETH_SS_STATS:
1175                 num_stats = ARRAY_SIZE(oct_stats_strings);
1176                 for (j = 0; j < num_stats; j++) {
1177                         sprintf(data, "%s", oct_stats_strings[j]);
1178                         data += ETH_GSTRING_LEN;
1179                 }
1180
1181                 num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
1182                 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
1183                         if (!(oct_dev->io_qmask.iq & BIT_ULL(i)))
1184                                 continue;
1185                         for (j = 0; j < num_iq_stats; j++) {
1186                                 sprintf(data, "tx-%d-%s", i,
1187                                         oct_iq_stats_strings[j]);
1188                                 data += ETH_GSTRING_LEN;
1189                         }
1190                 }
1191
1192                 num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
1193                 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
1194                         if (!(oct_dev->io_qmask.oq & BIT_ULL(i)))
1195                                 continue;
1196                         for (j = 0; j < num_oq_stats; j++) {
1197                                 sprintf(data, "rx-%d-%s", i,
1198                                         oct_droq_stats_strings[j]);
1199                                 data += ETH_GSTRING_LEN;
1200                         }
1201                 }
1202                 break;
1203
1204         case ETH_SS_PRIV_FLAGS:
1205                 lio_get_priv_flags_strings(lio, data);
1206                 break;
1207         default:
1208                 netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
1209                 break;
1210         }
1211 }
1212
1213 static void lio_vf_get_strings(struct net_device *netdev, u32 stringset,
1214                                u8 *data)
1215 {
1216         int num_iq_stats, num_oq_stats, i, j;
1217         struct lio *lio = GET_LIO(netdev);
1218         struct octeon_device *oct_dev = lio->oct_dev;
1219         int num_stats;
1220
1221         switch (stringset) {
1222         case ETH_SS_STATS:
1223                 num_stats = ARRAY_SIZE(oct_vf_stats_strings);
1224                 for (j = 0; j < num_stats; j++) {
1225                         sprintf(data, "%s", oct_vf_stats_strings[j]);
1226                         data += ETH_GSTRING_LEN;
1227                 }
1228
1229                 num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
1230                 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
1231                         if (!(oct_dev->io_qmask.iq & BIT_ULL(i)))
1232                                 continue;
1233                         for (j = 0; j < num_iq_stats; j++) {
1234                                 sprintf(data, "tx-%d-%s", i,
1235                                         oct_iq_stats_strings[j]);
1236                                 data += ETH_GSTRING_LEN;
1237                         }
1238                 }
1239
1240                 num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
1241                 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
1242                         if (!(oct_dev->io_qmask.oq & BIT_ULL(i)))
1243                                 continue;
1244                         for (j = 0; j < num_oq_stats; j++) {
1245                                 sprintf(data, "rx-%d-%s", i,
1246                                         oct_droq_stats_strings[j]);
1247                                 data += ETH_GSTRING_LEN;
1248                         }
1249                 }
1250                 break;
1251
1252         case ETH_SS_PRIV_FLAGS:
1253                 lio_get_priv_flags_strings(lio, data);
1254                 break;
1255         default:
1256                 netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
1257                 break;
1258         }
1259 }
1260
1261 static int lio_get_priv_flags_ss_count(struct lio *lio)
1262 {
1263         struct octeon_device *oct_dev = lio->oct_dev;
1264
1265         switch (oct_dev->chip_id) {
1266         case OCTEON_CN23XX_PF_VID:
1267         case OCTEON_CN23XX_VF_VID:
1268                 return ARRAY_SIZE(oct_priv_flags_strings);
1269         case OCTEON_CN68XX:
1270         case OCTEON_CN66XX:
1271                 return -EOPNOTSUPP;
1272         default:
1273                 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
1274                 return -EOPNOTSUPP;
1275         }
1276 }
1277
1278 static int lio_get_sset_count(struct net_device *netdev, int sset)
1279 {
1280         struct lio *lio = GET_LIO(netdev);
1281         struct octeon_device *oct_dev = lio->oct_dev;
1282
1283         switch (sset) {
1284         case ETH_SS_STATS:
1285                 return (ARRAY_SIZE(oct_stats_strings) +
1286                         ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
1287                         ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
1288         case ETH_SS_PRIV_FLAGS:
1289                 return lio_get_priv_flags_ss_count(lio);
1290         default:
1291                 return -EOPNOTSUPP;
1292         }
1293 }
1294
1295 static int lio_vf_get_sset_count(struct net_device *netdev, int sset)
1296 {
1297         struct lio *lio = GET_LIO(netdev);
1298         struct octeon_device *oct_dev = lio->oct_dev;
1299
1300         switch (sset) {
1301         case ETH_SS_STATS:
1302                 return (ARRAY_SIZE(oct_vf_stats_strings) +
1303                         ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
1304                         ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
1305         case ETH_SS_PRIV_FLAGS:
1306                 return lio_get_priv_flags_ss_count(lio);
1307         default:
1308                 return -EOPNOTSUPP;
1309         }
1310 }
1311
1312 /* Callback function for intrmod */
1313 static void octnet_intrmod_callback(struct octeon_device *oct_dev,
1314                                     u32 status,
1315                                     void *ptr)
1316 {
1317         struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
1318         struct oct_intrmod_context *ctx;
1319
1320         ctx  = (struct oct_intrmod_context *)sc->ctxptr;
1321
1322         ctx->status = status;
1323
1324         WRITE_ONCE(ctx->cond, 1);
1325
1326         /* This barrier is required to be sure that the response has been
1327          * written fully before waking up the handler
1328          */
1329         wmb();
1330
1331         wake_up_interruptible(&ctx->wc);
1332 }
1333
1334 /*  get interrupt moderation parameters */
1335 static int octnet_get_intrmod_cfg(struct lio *lio,
1336                                   struct oct_intrmod_cfg *intr_cfg)
1337 {
1338         struct octeon_soft_command *sc;
1339         struct oct_intrmod_context *ctx;
1340         struct oct_intrmod_resp *resp;
1341         int retval;
1342         struct octeon_device *oct_dev = lio->oct_dev;
1343
1344         /* Alloc soft command */
1345         sc = (struct octeon_soft_command *)
1346                 octeon_alloc_soft_command(oct_dev,
1347                                           0,
1348                                           sizeof(struct oct_intrmod_resp),
1349                                           sizeof(struct oct_intrmod_context));
1350
1351         if (!sc)
1352                 return -ENOMEM;
1353
1354         resp = (struct oct_intrmod_resp *)sc->virtrptr;
1355         memset(resp, 0, sizeof(struct oct_intrmod_resp));
1356
1357         ctx = (struct oct_intrmod_context *)sc->ctxptr;
1358         memset(ctx, 0, sizeof(struct oct_intrmod_context));
1359         WRITE_ONCE(ctx->cond, 0);
1360         ctx->octeon_id = lio_get_device_id(oct_dev);
1361         init_waitqueue_head(&ctx->wc);
1362
1363         sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1364
1365         octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1366                                     OPCODE_NIC_INTRMOD_PARAMS, 0, 0, 0);
1367
1368         sc->callback = octnet_intrmod_callback;
1369         sc->callback_arg = sc;
1370         sc->wait_time = 1000;
1371
1372         retval = octeon_send_soft_command(oct_dev, sc);
1373         if (retval == IQ_SEND_FAILED) {
1374                 octeon_free_soft_command(oct_dev, sc);
1375                 return -EINVAL;
1376         }
1377
1378         /* Sleep on a wait queue till the cond flag indicates that the
1379          * response arrived or timed-out.
1380          */
1381         if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
1382                 dev_err(&oct_dev->pci_dev->dev, "Wait interrupted\n");
1383                 goto intrmod_info_wait_intr;
1384         }
1385
1386         retval = ctx->status || resp->status;
1387         if (retval) {
1388                 dev_err(&oct_dev->pci_dev->dev,
1389                         "Get interrupt moderation parameters failed\n");
1390                 goto intrmod_info_wait_fail;
1391         }
1392
1393         octeon_swap_8B_data((u64 *)&resp->intrmod,
1394                             (sizeof(struct oct_intrmod_cfg)) / 8);
1395         memcpy(intr_cfg, &resp->intrmod, sizeof(struct oct_intrmod_cfg));
1396         octeon_free_soft_command(oct_dev, sc);
1397
1398         return 0;
1399
1400 intrmod_info_wait_fail:
1401
1402         octeon_free_soft_command(oct_dev, sc);
1403
1404 intrmod_info_wait_intr:
1405
1406         return -ENODEV;
1407 }
1408
1409 /*  Configure interrupt moderation parameters */
1410 static int octnet_set_intrmod_cfg(struct lio *lio,
1411                                   struct oct_intrmod_cfg *intr_cfg)
1412 {
1413         struct octeon_soft_command *sc;
1414         struct oct_intrmod_context *ctx;
1415         struct oct_intrmod_cfg *cfg;
1416         int retval;
1417         struct octeon_device *oct_dev = lio->oct_dev;
1418
1419         /* Alloc soft command */
1420         sc = (struct octeon_soft_command *)
1421                 octeon_alloc_soft_command(oct_dev,
1422                                           sizeof(struct oct_intrmod_cfg),
1423                                           0,
1424                                           sizeof(struct oct_intrmod_context));
1425
1426         if (!sc)
1427                 return -ENOMEM;
1428
1429         ctx = (struct oct_intrmod_context *)sc->ctxptr;
1430
1431         WRITE_ONCE(ctx->cond, 0);
1432         ctx->octeon_id = lio_get_device_id(oct_dev);
1433         init_waitqueue_head(&ctx->wc);
1434
1435         cfg = (struct oct_intrmod_cfg *)sc->virtdptr;
1436
1437         memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg));
1438         octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8);
1439
1440         sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1441
1442         octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1443                                     OPCODE_NIC_INTRMOD_CFG, 0, 0, 0);
1444
1445         sc->callback = octnet_intrmod_callback;
1446         sc->callback_arg = sc;
1447         sc->wait_time = 1000;
1448
1449         retval = octeon_send_soft_command(oct_dev, sc);
1450         if (retval == IQ_SEND_FAILED) {
1451                 octeon_free_soft_command(oct_dev, sc);
1452                 return -EINVAL;
1453         }
1454
1455         /* Sleep on a wait queue till the cond flag indicates that the
1456          * response arrived or timed-out.
1457          */
1458         if (sleep_cond(&ctx->wc, &ctx->cond) != -EINTR) {
1459                 retval = ctx->status;
1460                 if (retval)
1461                         dev_err(&oct_dev->pci_dev->dev,
1462                                 "intrmod config failed. Status: %llx\n",
1463                                 CVM_CAST64(retval));
1464                 else
1465                         dev_info(&oct_dev->pci_dev->dev,
1466                                  "Rx-Adaptive Interrupt moderation %s\n",
1467                                  (intr_cfg->rx_enable) ?
1468                                  "enabled" : "disabled");
1469
1470                 octeon_free_soft_command(oct_dev, sc);
1471
1472                 return ((retval) ? -ENODEV : 0);
1473         }
1474
1475         dev_err(&oct_dev->pci_dev->dev, "iq/oq config failed\n");
1476
1477         return -EINTR;
1478 }
1479
1480 static void
1481 octnet_nic_stats_callback(struct octeon_device *oct_dev,
1482                           u32 status, void *ptr)
1483 {
1484         struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
1485         struct oct_nic_stats_resp *resp =
1486             (struct oct_nic_stats_resp *)sc->virtrptr;
1487         struct oct_nic_stats_ctrl *ctrl =
1488             (struct oct_nic_stats_ctrl *)sc->ctxptr;
1489         struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
1490         struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
1491
1492         struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
1493         struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
1494
1495         if ((status != OCTEON_REQUEST_TIMEOUT) && !resp->status) {
1496                 octeon_swap_8B_data((u64 *)&resp->stats,
1497                                     (sizeof(struct oct_link_stats)) >> 3);
1498
1499                 /* RX link-level stats */
1500                 rstats->total_rcvd = rsp_rstats->total_rcvd;
1501                 rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
1502                 rstats->total_bcst = rsp_rstats->total_bcst;
1503                 rstats->total_mcst = rsp_rstats->total_mcst;
1504                 rstats->runts      = rsp_rstats->runts;
1505                 rstats->ctl_rcvd   = rsp_rstats->ctl_rcvd;
1506                 /* Accounts for over/under-run of buffers */
1507                 rstats->fifo_err  = rsp_rstats->fifo_err;
1508                 rstats->dmac_drop = rsp_rstats->dmac_drop;
1509                 rstats->fcs_err   = rsp_rstats->fcs_err;
1510                 rstats->jabber_err = rsp_rstats->jabber_err;
1511                 rstats->l2_err    = rsp_rstats->l2_err;
1512                 rstats->frame_err = rsp_rstats->frame_err;
1513
1514                 /* RX firmware stats */
1515                 rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
1516                 rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
1517                 rstats->fw_err_pko = rsp_rstats->fw_err_pko;
1518                 rstats->fw_err_link = rsp_rstats->fw_err_link;
1519                 rstats->fw_err_drop = rsp_rstats->fw_err_drop;
1520                 rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
1521                 rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
1522
1523                 /* Number of packets that are LROed      */
1524                 rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
1525                 /* Number of octets that are LROed       */
1526                 rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
1527                 /* Number of LRO packets formed          */
1528                 rstats->fw_total_lro = rsp_rstats->fw_total_lro;
1529                 /* Number of times lRO of packet aborted */
1530                 rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
1531                 rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
1532                 rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
1533                 rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
1534                 rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
1535                 /* intrmod: packet forward rate */
1536                 rstats->fwd_rate = rsp_rstats->fwd_rate;
1537
1538                 /* TX link-level stats */
1539                 tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
1540                 tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
1541                 tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
1542                 tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
1543                 tstats->ctl_sent = rsp_tstats->ctl_sent;
1544                 /* Packets sent after one collision*/
1545                 tstats->one_collision_sent = rsp_tstats->one_collision_sent;
1546                 /* Packets sent after multiple collision*/
1547                 tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
1548                 /* Packets not sent due to max collisions */
1549                 tstats->max_collision_fail = rsp_tstats->max_collision_fail;
1550                 /* Packets not sent due to max deferrals */
1551                 tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
1552                 /* Accounts for over/under-run of buffers */
1553                 tstats->fifo_err = rsp_tstats->fifo_err;
1554                 tstats->runts = rsp_tstats->runts;
1555                 /* Total number of collisions detected */
1556                 tstats->total_collisions = rsp_tstats->total_collisions;
1557
1558                 /* firmware stats */
1559                 tstats->fw_total_sent = rsp_tstats->fw_total_sent;
1560                 tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
1561                 tstats->fw_err_pko = rsp_tstats->fw_err_pko;
1562                 tstats->fw_err_pki = rsp_tstats->fw_err_pki;
1563                 tstats->fw_err_link = rsp_tstats->fw_err_link;
1564                 tstats->fw_err_drop = rsp_tstats->fw_err_drop;
1565                 tstats->fw_tso = rsp_tstats->fw_tso;
1566                 tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
1567                 tstats->fw_err_tso = rsp_tstats->fw_err_tso;
1568                 tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
1569
1570                 resp->status = 1;
1571         } else {
1572                 resp->status = -1;
1573         }
1574         complete(&ctrl->complete);
1575 }
1576
1577 /*  Configure interrupt moderation parameters */
1578 static int octnet_get_link_stats(struct net_device *netdev)
1579 {
1580         struct lio *lio = GET_LIO(netdev);
1581         struct octeon_device *oct_dev = lio->oct_dev;
1582
1583         struct octeon_soft_command *sc;
1584         struct oct_nic_stats_ctrl *ctrl;
1585         struct oct_nic_stats_resp *resp;
1586
1587         int retval;
1588
1589         /* Alloc soft command */
1590         sc = (struct octeon_soft_command *)
1591                 octeon_alloc_soft_command(oct_dev,
1592                                           0,
1593                                           sizeof(struct oct_nic_stats_resp),
1594                                           sizeof(struct octnic_ctrl_pkt));
1595
1596         if (!sc)
1597                 return -ENOMEM;
1598
1599         resp = (struct oct_nic_stats_resp *)sc->virtrptr;
1600         memset(resp, 0, sizeof(struct oct_nic_stats_resp));
1601
1602         ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr;
1603         memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl));
1604         ctrl->netdev = netdev;
1605         init_completion(&ctrl->complete);
1606
1607         sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1608
1609         octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1610                                     OPCODE_NIC_PORT_STATS, 0, 0, 0);
1611
1612         sc->callback = octnet_nic_stats_callback;
1613         sc->callback_arg = sc;
1614         sc->wait_time = 500;    /*in milli seconds*/
1615
1616         retval = octeon_send_soft_command(oct_dev, sc);
1617         if (retval == IQ_SEND_FAILED) {
1618                 octeon_free_soft_command(oct_dev, sc);
1619                 return -EINVAL;
1620         }
1621
1622         wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000));
1623
1624         if (resp->status != 1) {
1625                 octeon_free_soft_command(oct_dev, sc);
1626
1627                 return -EINVAL;
1628         }
1629
1630         octeon_free_soft_command(oct_dev, sc);
1631
1632         return 0;
1633 }
1634
1635 static int lio_get_intr_coalesce(struct net_device *netdev,
1636                                  struct ethtool_coalesce *intr_coal)
1637 {
1638         struct lio *lio = GET_LIO(netdev);
1639         struct octeon_device *oct = lio->oct_dev;
1640         struct octeon_instr_queue *iq;
1641         struct oct_intrmod_cfg intrmod_cfg;
1642
1643         if (octnet_get_intrmod_cfg(lio, &intrmod_cfg))
1644                 return -ENODEV;
1645
1646         switch (oct->chip_id) {
1647         case OCTEON_CN23XX_PF_VID:
1648         case OCTEON_CN23XX_VF_VID: {
1649                 if (!intrmod_cfg.rx_enable) {
1650                         intr_coal->rx_coalesce_usecs = oct->rx_coalesce_usecs;
1651                         intr_coal->rx_max_coalesced_frames =
1652                                 oct->rx_max_coalesced_frames;
1653                 }
1654                 if (!intrmod_cfg.tx_enable)
1655                         intr_coal->tx_max_coalesced_frames =
1656                                 oct->tx_max_coalesced_frames;
1657                 break;
1658         }
1659         case OCTEON_CN68XX:
1660         case OCTEON_CN66XX: {
1661                 struct octeon_cn6xxx *cn6xxx =
1662                         (struct octeon_cn6xxx *)oct->chip;
1663
1664                 if (!intrmod_cfg.rx_enable) {
1665                         intr_coal->rx_coalesce_usecs =
1666                                 CFG_GET_OQ_INTR_TIME(cn6xxx->conf);
1667                         intr_coal->rx_max_coalesced_frames =
1668                                 CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
1669                 }
1670                 iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no];
1671                 intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
1672                 break;
1673         }
1674         default:
1675                 netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
1676                 return -EINVAL;
1677         }
1678         if (intrmod_cfg.rx_enable) {
1679                 intr_coal->use_adaptive_rx_coalesce =
1680                         intrmod_cfg.rx_enable;
1681                 intr_coal->rate_sample_interval =
1682                         intrmod_cfg.check_intrvl;
1683                 intr_coal->pkt_rate_high =
1684                         intrmod_cfg.maxpkt_ratethr;
1685                 intr_coal->pkt_rate_low =
1686                         intrmod_cfg.minpkt_ratethr;
1687                 intr_coal->rx_max_coalesced_frames_high =
1688                         intrmod_cfg.rx_maxcnt_trigger;
1689                 intr_coal->rx_coalesce_usecs_high =
1690                         intrmod_cfg.rx_maxtmr_trigger;
1691                 intr_coal->rx_coalesce_usecs_low =
1692                         intrmod_cfg.rx_mintmr_trigger;
1693                 intr_coal->rx_max_coalesced_frames_low =
1694                         intrmod_cfg.rx_mincnt_trigger;
1695         }
1696         if ((OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) &&
1697             (intrmod_cfg.tx_enable)) {
1698                 intr_coal->use_adaptive_tx_coalesce =
1699                         intrmod_cfg.tx_enable;
1700                 intr_coal->tx_max_coalesced_frames_high =
1701                         intrmod_cfg.tx_maxcnt_trigger;
1702                 intr_coal->tx_max_coalesced_frames_low =
1703                         intrmod_cfg.tx_mincnt_trigger;
1704         }
1705         return 0;
1706 }
1707
1708 /* Enable/Disable auto interrupt Moderation */
1709 static int oct_cfg_adaptive_intr(struct lio *lio,
1710                                  struct oct_intrmod_cfg *intrmod_cfg,
1711                                  struct ethtool_coalesce *intr_coal)
1712 {
1713         int ret = 0;
1714
1715         if (intrmod_cfg->rx_enable || intrmod_cfg->tx_enable) {
1716                 intrmod_cfg->check_intrvl = intr_coal->rate_sample_interval;
1717                 intrmod_cfg->maxpkt_ratethr = intr_coal->pkt_rate_high;
1718                 intrmod_cfg->minpkt_ratethr = intr_coal->pkt_rate_low;
1719         }
1720         if (intrmod_cfg->rx_enable) {
1721                 intrmod_cfg->rx_maxcnt_trigger =
1722                         intr_coal->rx_max_coalesced_frames_high;
1723                 intrmod_cfg->rx_maxtmr_trigger =
1724                         intr_coal->rx_coalesce_usecs_high;
1725                 intrmod_cfg->rx_mintmr_trigger =
1726                         intr_coal->rx_coalesce_usecs_low;
1727                 intrmod_cfg->rx_mincnt_trigger =
1728                         intr_coal->rx_max_coalesced_frames_low;
1729         }
1730         if (intrmod_cfg->tx_enable) {
1731                 intrmod_cfg->tx_maxcnt_trigger =
1732                         intr_coal->tx_max_coalesced_frames_high;
1733                 intrmod_cfg->tx_mincnt_trigger =
1734                         intr_coal->tx_max_coalesced_frames_low;
1735         }
1736
1737         ret = octnet_set_intrmod_cfg(lio, intrmod_cfg);
1738
1739         return ret;
1740 }
1741
1742 static int
1743 oct_cfg_rx_intrcnt(struct lio *lio,
1744                    struct oct_intrmod_cfg *intrmod,
1745                    struct ethtool_coalesce *intr_coal)
1746 {
1747         struct octeon_device *oct = lio->oct_dev;
1748         u32 rx_max_coalesced_frames;
1749
1750         /* Config Cnt based interrupt values */
1751         switch (oct->chip_id) {
1752         case OCTEON_CN68XX:
1753         case OCTEON_CN66XX: {
1754                 struct octeon_cn6xxx *cn6xxx =
1755                         (struct octeon_cn6xxx *)oct->chip;
1756
1757                 if (!intr_coal->rx_max_coalesced_frames)
1758                         rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT;
1759                 else
1760                         rx_max_coalesced_frames =
1761                                 intr_coal->rx_max_coalesced_frames;
1762                 octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
1763                                  rx_max_coalesced_frames);
1764                 CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
1765                 break;
1766         }
1767         case OCTEON_CN23XX_PF_VID: {
1768                 int q_no;
1769
1770                 if (!intr_coal->rx_max_coalesced_frames)
1771                         rx_max_coalesced_frames = intrmod->rx_frames;
1772                 else
1773                         rx_max_coalesced_frames =
1774                             intr_coal->rx_max_coalesced_frames;
1775                 for (q_no = 0; q_no < oct->num_oqs; q_no++) {
1776                         q_no += oct->sriov_info.pf_srn;
1777                         octeon_write_csr64(
1778                             oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
1779                             (octeon_read_csr64(
1780                                  oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no)) &
1781                              (0x3fffff00000000UL)) |
1782                                 (rx_max_coalesced_frames - 1));
1783                         /*consider setting resend bit*/
1784                 }
1785                 intrmod->rx_frames = rx_max_coalesced_frames;
1786                 oct->rx_max_coalesced_frames = rx_max_coalesced_frames;
1787                 break;
1788         }
1789         case OCTEON_CN23XX_VF_VID: {
1790                 int q_no;
1791
1792                 if (!intr_coal->rx_max_coalesced_frames)
1793                         rx_max_coalesced_frames = intrmod->rx_frames;
1794                 else
1795                         rx_max_coalesced_frames =
1796                             intr_coal->rx_max_coalesced_frames;
1797                 for (q_no = 0; q_no < oct->num_oqs; q_no++) {
1798                         octeon_write_csr64(
1799                             oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
1800                             (octeon_read_csr64(
1801                                  oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no)) &
1802                              (0x3fffff00000000UL)) |
1803                                 (rx_max_coalesced_frames - 1));
1804                         /*consider writing to resend bit here*/
1805                 }
1806                 intrmod->rx_frames = rx_max_coalesced_frames;
1807                 oct->rx_max_coalesced_frames = rx_max_coalesced_frames;
1808                 break;
1809         }
1810         default:
1811                 return -EINVAL;
1812         }
1813         return 0;
1814 }
1815
1816 static int oct_cfg_rx_intrtime(struct lio *lio,
1817                                struct oct_intrmod_cfg *intrmod,
1818                                struct ethtool_coalesce *intr_coal)
1819 {
1820         struct octeon_device *oct = lio->oct_dev;
1821         u32 time_threshold, rx_coalesce_usecs;
1822
1823         /* Config Time based interrupt values */
1824         switch (oct->chip_id) {
1825         case OCTEON_CN68XX:
1826         case OCTEON_CN66XX: {
1827                 struct octeon_cn6xxx *cn6xxx =
1828                         (struct octeon_cn6xxx *)oct->chip;
1829                 if (!intr_coal->rx_coalesce_usecs)
1830                         rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME;
1831                 else
1832                         rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
1833
1834                 time_threshold = lio_cn6xxx_get_oq_ticks(oct,
1835                                                          rx_coalesce_usecs);
1836                 octeon_write_csr(oct,
1837                                  CN6XXX_SLI_OQ_INT_LEVEL_TIME,
1838                                  time_threshold);
1839
1840                 CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);
1841                 break;
1842         }
1843         case OCTEON_CN23XX_PF_VID: {
1844                 u64 time_threshold;
1845                 int q_no;
1846
1847                 if (!intr_coal->rx_coalesce_usecs)
1848                         rx_coalesce_usecs = intrmod->rx_usecs;
1849                 else
1850                         rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
1851                 time_threshold =
1852                     cn23xx_pf_get_oq_ticks(oct, (u32)rx_coalesce_usecs);
1853                 for (q_no = 0; q_no < oct->num_oqs; q_no++) {
1854                         q_no += oct->sriov_info.pf_srn;
1855                         octeon_write_csr64(oct,
1856                                            CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
1857                                            (intrmod->rx_frames |
1858                                             ((u64)time_threshold << 32)));
1859                         /*consider writing to resend bit here*/
1860                 }
1861                 intrmod->rx_usecs = rx_coalesce_usecs;
1862                 oct->rx_coalesce_usecs = rx_coalesce_usecs;
1863                 break;
1864         }
1865         case OCTEON_CN23XX_VF_VID: {
1866                 u64 time_threshold;
1867                 int q_no;
1868
1869                 if (!intr_coal->rx_coalesce_usecs)
1870                         rx_coalesce_usecs = intrmod->rx_usecs;
1871                 else
1872                         rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
1873
1874                 time_threshold =
1875                     cn23xx_vf_get_oq_ticks(oct, (u32)rx_coalesce_usecs);
1876                 for (q_no = 0; q_no < oct->num_oqs; q_no++) {
1877                         octeon_write_csr64(
1878                                 oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
1879                                 (intrmod->rx_frames |
1880                                  ((u64)time_threshold << 32)));
1881                         /*consider setting resend bit*/
1882                 }
1883                 intrmod->rx_usecs = rx_coalesce_usecs;
1884                 oct->rx_coalesce_usecs = rx_coalesce_usecs;
1885                 break;
1886         }
1887         default:
1888                 return -EINVAL;
1889         }
1890
1891         return 0;
1892 }
1893
1894 static int
1895 oct_cfg_tx_intrcnt(struct lio *lio,
1896                    struct oct_intrmod_cfg *intrmod,
1897                    struct ethtool_coalesce *intr_coal)
1898 {
1899         struct octeon_device *oct = lio->oct_dev;
1900         u32 iq_intr_pkt;
1901         void __iomem *inst_cnt_reg;
1902         u64 val;
1903
1904         /* Config Cnt based interrupt values */
1905         switch (oct->chip_id) {
1906         case OCTEON_CN68XX:
1907         case OCTEON_CN66XX:
1908                 break;
1909         case OCTEON_CN23XX_VF_VID:
1910         case OCTEON_CN23XX_PF_VID: {
1911                 int q_no;
1912
1913                 if (!intr_coal->tx_max_coalesced_frames)
1914                         iq_intr_pkt = CN23XX_DEF_IQ_INTR_THRESHOLD &
1915                                       CN23XX_PKT_IN_DONE_WMARK_MASK;
1916                 else
1917                         iq_intr_pkt = intr_coal->tx_max_coalesced_frames &
1918                                       CN23XX_PKT_IN_DONE_WMARK_MASK;
1919                 for (q_no = 0; q_no < oct->num_iqs; q_no++) {
1920                         inst_cnt_reg = (oct->instr_queue[q_no])->inst_cnt_reg;
1921                         val = readq(inst_cnt_reg);
1922                         /*clear wmark and count.dont want to write count back*/
1923                         val = (val & 0xFFFF000000000000ULL) |
1924                               ((u64)(iq_intr_pkt - 1)
1925                                << CN23XX_PKT_IN_DONE_WMARK_BIT_POS);
1926                         writeq(val, inst_cnt_reg);
1927                         /*consider setting resend bit*/
1928                 }
1929                 intrmod->tx_frames = iq_intr_pkt;
1930                 oct->tx_max_coalesced_frames = iq_intr_pkt;
1931                 break;
1932         }
1933         default:
1934                 return -EINVAL;
1935         }
1936         return 0;
1937 }
1938
1939 static int lio_set_intr_coalesce(struct net_device *netdev,
1940                                  struct ethtool_coalesce *intr_coal)
1941 {
1942         struct lio *lio = GET_LIO(netdev);
1943         int ret;
1944         struct octeon_device *oct = lio->oct_dev;
1945         struct oct_intrmod_cfg intrmod = {0};
1946         u32 j, q_no;
1947         int db_max, db_min;
1948
1949         switch (oct->chip_id) {
1950         case OCTEON_CN68XX:
1951         case OCTEON_CN66XX:
1952                 db_min = CN6XXX_DB_MIN;
1953                 db_max = CN6XXX_DB_MAX;
1954                 if ((intr_coal->tx_max_coalesced_frames >= db_min) &&
1955                     (intr_coal->tx_max_coalesced_frames <= db_max)) {
1956                         for (j = 0; j < lio->linfo.num_txpciq; j++) {
1957                                 q_no = lio->linfo.txpciq[j].s.q_no;
1958                                 oct->instr_queue[q_no]->fill_threshold =
1959                                         intr_coal->tx_max_coalesced_frames;
1960                         }
1961                 } else {
1962                         dev_err(&oct->pci_dev->dev,
1963                                 "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
1964                                 intr_coal->tx_max_coalesced_frames,
1965                                 db_min, db_max);
1966                         return -EINVAL;
1967                 }
1968                 break;
1969         case OCTEON_CN23XX_PF_VID:
1970         case OCTEON_CN23XX_VF_VID:
1971                 break;
1972         default:
1973                 return -EINVAL;
1974         }
1975
1976         intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0;
1977         intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0;
1978         intrmod.rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
1979         intrmod.rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
1980         intrmod.tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
1981
1982         ret = oct_cfg_adaptive_intr(lio, &intrmod, intr_coal);
1983
1984         if (!intr_coal->use_adaptive_rx_coalesce) {
1985                 ret = oct_cfg_rx_intrtime(lio, &intrmod, intr_coal);
1986                 if (ret)
1987                         goto ret_intrmod;
1988
1989                 ret = oct_cfg_rx_intrcnt(lio, &intrmod, intr_coal);
1990                 if (ret)
1991                         goto ret_intrmod;
1992         } else {
1993                 oct->rx_coalesce_usecs =
1994                         CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
1995                 oct->rx_max_coalesced_frames =
1996                         CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
1997         }
1998
1999         if (!intr_coal->use_adaptive_tx_coalesce) {
2000                 ret = oct_cfg_tx_intrcnt(lio, &intrmod, intr_coal);
2001                 if (ret)
2002                         goto ret_intrmod;
2003         } else {
2004                 oct->tx_max_coalesced_frames =
2005                         CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
2006         }
2007
2008         return 0;
2009 ret_intrmod:
2010         return ret;
2011 }
2012
2013 static int lio_get_ts_info(struct net_device *netdev,
2014                            struct ethtool_ts_info *info)
2015 {
2016         struct lio *lio = GET_LIO(netdev);
2017
2018         info->so_timestamping =
2019 #ifdef PTP_HARDWARE_TIMESTAMPING
2020                 SOF_TIMESTAMPING_TX_HARDWARE |
2021                 SOF_TIMESTAMPING_RX_HARDWARE |
2022                 SOF_TIMESTAMPING_RAW_HARDWARE |
2023                 SOF_TIMESTAMPING_TX_SOFTWARE |
2024 #endif
2025                 SOF_TIMESTAMPING_RX_SOFTWARE |
2026                 SOF_TIMESTAMPING_SOFTWARE;
2027
2028         if (lio->ptp_clock)
2029                 info->phc_index = ptp_clock_index(lio->ptp_clock);
2030         else
2031                 info->phc_index = -1;
2032
2033 #ifdef PTP_HARDWARE_TIMESTAMPING
2034         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
2035
2036         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
2037                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
2038                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
2039                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
2040 #endif
2041
2042         return 0;
2043 }
2044
2045 /* Return register dump len. */
2046 static int lio_get_regs_len(struct net_device *dev)
2047 {
2048         struct lio *lio = GET_LIO(dev);
2049         struct octeon_device *oct = lio->oct_dev;
2050
2051         switch (oct->chip_id) {
2052         case OCTEON_CN23XX_PF_VID:
2053                 return OCT_ETHTOOL_REGDUMP_LEN_23XX;
2054         case OCTEON_CN23XX_VF_VID:
2055                 return OCT_ETHTOOL_REGDUMP_LEN_23XX_VF;
2056         default:
2057                 return OCT_ETHTOOL_REGDUMP_LEN;
2058         }
2059 }
2060
2061 static int cn23xx_read_csr_reg(char *s, struct octeon_device *oct)
2062 {
2063         u32 reg;
2064         u8 pf_num = oct->pf_num;
2065         int len = 0;
2066         int i;
2067
2068         /* PCI  Window Registers */
2069
2070         len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
2071
2072         /*0x29030 or 0x29040*/
2073         reg = CN23XX_SLI_PKT_MAC_RINFO64(oct->pcie_port, oct->pf_num);
2074         len += sprintf(s + len,
2075                        "\n[%08x] (SLI_PKT_MAC%d_PF%d_RINFO): %016llx\n",
2076                        reg, oct->pcie_port, oct->pf_num,
2077                        (u64)octeon_read_csr64(oct, reg));
2078
2079         /*0x27080 or 0x27090*/
2080         reg = CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num);
2081         len +=
2082             sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_ENB): %016llx\n",
2083                     reg, oct->pcie_port, oct->pf_num,
2084                     (u64)octeon_read_csr64(oct, reg));
2085
2086         /*0x27000 or 0x27010*/
2087         reg = CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num);
2088         len +=
2089             sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_SUM): %016llx\n",
2090                     reg, oct->pcie_port, oct->pf_num,
2091                     (u64)octeon_read_csr64(oct, reg));
2092
2093         /*0x29120*/
2094         reg = 0x29120;
2095         len += sprintf(s + len, "\n[%08x] (SLI_PKT_MEM_CTL): %016llx\n", reg,
2096                        (u64)octeon_read_csr64(oct, reg));
2097
2098         /*0x27300*/
2099         reg = 0x27300 + oct->pcie_port * CN23XX_MAC_INT_OFFSET +
2100               (oct->pf_num) * CN23XX_PF_INT_OFFSET;
2101         len += sprintf(
2102             s + len, "\n[%08x] (SLI_MAC%d_PF%d_PKT_VF_INT): %016llx\n", reg,
2103             oct->pcie_port, oct->pf_num, (u64)octeon_read_csr64(oct, reg));
2104
2105         /*0x27200*/
2106         reg = 0x27200 + oct->pcie_port * CN23XX_MAC_INT_OFFSET +
2107               (oct->pf_num) * CN23XX_PF_INT_OFFSET;
2108         len += sprintf(s + len,
2109                        "\n[%08x] (SLI_MAC%d_PF%d_PP_VF_INT): %016llx\n",
2110                        reg, oct->pcie_port, oct->pf_num,
2111                        (u64)octeon_read_csr64(oct, reg));
2112
2113         /*29130*/
2114         reg = CN23XX_SLI_PKT_CNT_INT;
2115         len += sprintf(s + len, "\n[%08x] (SLI_PKT_CNT_INT): %016llx\n", reg,
2116                        (u64)octeon_read_csr64(oct, reg));
2117
2118         /*0x29140*/
2119         reg = CN23XX_SLI_PKT_TIME_INT;
2120         len += sprintf(s + len, "\n[%08x] (SLI_PKT_TIME_INT): %016llx\n", reg,
2121                        (u64)octeon_read_csr64(oct, reg));
2122
2123         /*0x29160*/
2124         reg = 0x29160;
2125         len += sprintf(s + len, "\n[%08x] (SLI_PKT_INT): %016llx\n", reg,
2126                        (u64)octeon_read_csr64(oct, reg));
2127
2128         /*0x29180*/
2129         reg = CN23XX_SLI_OQ_WMARK;
2130         len += sprintf(s + len, "\n[%08x] (SLI_PKT_OUTPUT_WMARK): %016llx\n",
2131                        reg, (u64)octeon_read_csr64(oct, reg));
2132
2133         /*0x291E0*/
2134         reg = CN23XX_SLI_PKT_IOQ_RING_RST;
2135         len += sprintf(s + len, "\n[%08x] (SLI_PKT_RING_RST): %016llx\n", reg,
2136                        (u64)octeon_read_csr64(oct, reg));
2137
2138         /*0x29210*/
2139         reg = CN23XX_SLI_GBL_CONTROL;
2140         len += sprintf(s + len,
2141                        "\n[%08x] (SLI_PKT_GBL_CONTROL): %016llx\n", reg,
2142                        (u64)octeon_read_csr64(oct, reg));
2143
2144         /*0x29220*/
2145         reg = 0x29220;
2146         len += sprintf(s + len, "\n[%08x] (SLI_PKT_BIST_STATUS): %016llx\n",
2147                        reg, (u64)octeon_read_csr64(oct, reg));
2148
2149         /*PF only*/
2150         if (pf_num == 0) {
2151                 /*0x29260*/
2152                 reg = CN23XX_SLI_OUT_BP_EN_W1S;
2153                 len += sprintf(s + len,
2154                                "\n[%08x] (SLI_PKT_OUT_BP_EN_W1S):  %016llx\n",
2155                                reg, (u64)octeon_read_csr64(oct, reg));
2156         } else if (pf_num == 1) {
2157                 /*0x29270*/
2158                 reg = CN23XX_SLI_OUT_BP_EN2_W1S;
2159                 len += sprintf(s + len,
2160                                "\n[%08x] (SLI_PKT_OUT_BP_EN2_W1S): %016llx\n",
2161                                reg, (u64)octeon_read_csr64(oct, reg));
2162         }
2163
2164         for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2165                 reg = CN23XX_SLI_OQ_BUFF_INFO_SIZE(i);
2166                 len +=
2167                     sprintf(s + len, "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
2168                             reg, i, (u64)octeon_read_csr64(oct, reg));
2169         }
2170
2171         /*0x10040*/
2172         for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2173                 reg = CN23XX_SLI_IQ_INSTR_COUNT64(i);
2174                 len += sprintf(s + len,
2175                                "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2176                                reg, i, (u64)octeon_read_csr64(oct, reg));
2177         }
2178
2179         /*0x10080*/
2180         for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2181                 reg = CN23XX_SLI_OQ_PKTS_CREDIT(i);
2182                 len += sprintf(s + len,
2183                                "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
2184                                reg, i, (u64)octeon_read_csr64(oct, reg));
2185         }
2186
2187         /*0x10090*/
2188         for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2189                 reg = CN23XX_SLI_OQ_SIZE(i);
2190                 len += sprintf(
2191                     s + len, "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
2192                     reg, i, (u64)octeon_read_csr64(oct, reg));
2193         }
2194
2195         /*0x10050*/
2196         for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2197                 reg = CN23XX_SLI_OQ_PKT_CONTROL(i);
2198                 len += sprintf(
2199                         s + len,
2200                         "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
2201                         reg, i, (u64)octeon_read_csr64(oct, reg));
2202         }
2203
2204         /*0x10070*/
2205         for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2206                 reg = CN23XX_SLI_OQ_BASE_ADDR64(i);
2207                 len += sprintf(s + len,
2208                                "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
2209                                reg, i, (u64)octeon_read_csr64(oct, reg));
2210         }
2211
2212         /*0x100a0*/
2213         for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2214                 reg = CN23XX_SLI_OQ_PKT_INT_LEVELS(i);
2215                 len += sprintf(s + len,
2216                                "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
2217                                reg, i, (u64)octeon_read_csr64(oct, reg));
2218         }
2219
2220         /*0x100b0*/
2221         for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2222                 reg = CN23XX_SLI_OQ_PKTS_SENT(i);
2223                 len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
2224                                reg, i, (u64)octeon_read_csr64(oct, reg));
2225         }
2226
2227         /*0x100c0*/
2228         for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
2229                 reg = 0x100c0 + i * CN23XX_OQ_OFFSET;
2230                 len += sprintf(s + len,
2231                                "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
2232                                reg, i, (u64)octeon_read_csr64(oct, reg));
2233
2234                 /*0x10000*/
2235                 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2236                         reg = CN23XX_SLI_IQ_PKT_CONTROL64(i);
2237                         len += sprintf(
2238                                 s + len,
2239                                 "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
2240                                 reg, i, (u64)octeon_read_csr64(oct, reg));
2241                 }
2242
2243                 /*0x10010*/
2244                 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2245                         reg = CN23XX_SLI_IQ_BASE_ADDR64(i);
2246                         len += sprintf(
2247                             s + len,
2248                             "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", reg,
2249                             i, (u64)octeon_read_csr64(oct, reg));
2250                 }
2251
2252                 /*0x10020*/
2253                 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2254                         reg = CN23XX_SLI_IQ_DOORBELL(i);
2255                         len += sprintf(
2256                             s + len,
2257                             "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
2258                             reg, i, (u64)octeon_read_csr64(oct, reg));
2259                 }
2260
2261                 /*0x10030*/
2262                 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
2263                         reg = CN23XX_SLI_IQ_SIZE(i);
2264                         len += sprintf(
2265                             s + len,
2266                             "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
2267                             reg, i, (u64)octeon_read_csr64(oct, reg));
2268                 }
2269
2270                 /*0x10040*/
2271                 for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++)
2272                         reg = CN23XX_SLI_IQ_INSTR_COUNT64(i);
2273                 len += sprintf(s + len,
2274                                "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2275                                reg, i, (u64)octeon_read_csr64(oct, reg));
2276         }
2277
2278         return len;
2279 }
2280
2281 static int cn23xx_vf_read_csr_reg(char *s, struct octeon_device *oct)
2282 {
2283         int len = 0;
2284         u32 reg;
2285         int i;
2286
2287         /* PCI  Window Registers */
2288
2289         len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
2290
2291         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2292                 reg = CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(i);
2293                 len += sprintf(s + len,
2294                                "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
2295                                reg, i, (u64)octeon_read_csr64(oct, reg));
2296         }
2297
2298         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2299                 reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i);
2300                 len += sprintf(s + len,
2301                                "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2302                                reg, i, (u64)octeon_read_csr64(oct, reg));
2303         }
2304
2305         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2306                 reg = CN23XX_VF_SLI_OQ_PKTS_CREDIT(i);
2307                 len += sprintf(s + len,
2308                                "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
2309                                reg, i, (u64)octeon_read_csr64(oct, reg));
2310         }
2311
2312         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2313                 reg = CN23XX_VF_SLI_OQ_SIZE(i);
2314                 len += sprintf(s + len,
2315                                "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
2316                                reg, i, (u64)octeon_read_csr64(oct, reg));
2317         }
2318
2319         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2320                 reg = CN23XX_VF_SLI_OQ_PKT_CONTROL(i);
2321                 len += sprintf(s + len,
2322                                "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
2323                                reg, i, (u64)octeon_read_csr64(oct, reg));
2324         }
2325
2326         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2327                 reg = CN23XX_VF_SLI_OQ_BASE_ADDR64(i);
2328                 len += sprintf(s + len,
2329                                "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
2330                                reg, i, (u64)octeon_read_csr64(oct, reg));
2331         }
2332
2333         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2334                 reg = CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(i);
2335                 len += sprintf(s + len,
2336                                "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
2337                                reg, i, (u64)octeon_read_csr64(oct, reg));
2338         }
2339
2340         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2341                 reg = CN23XX_VF_SLI_OQ_PKTS_SENT(i);
2342                 len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
2343                                reg, i, (u64)octeon_read_csr64(oct, reg));
2344         }
2345
2346         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2347                 reg = 0x100c0 + i * CN23XX_VF_OQ_OFFSET;
2348                 len += sprintf(s + len,
2349                                "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
2350                                reg, i, (u64)octeon_read_csr64(oct, reg));
2351         }
2352
2353         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2354                 reg = 0x100d0 + i * CN23XX_VF_IQ_OFFSET;
2355                 len += sprintf(s + len,
2356                                "\n[%08x] (SLI_PKT%d_VF_INT_SUM): %016llx\n",
2357                                reg, i, (u64)octeon_read_csr64(oct, reg));
2358         }
2359
2360         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2361                 reg = CN23XX_VF_SLI_IQ_PKT_CONTROL64(i);
2362                 len += sprintf(s + len,
2363                                "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
2364                                reg, i, (u64)octeon_read_csr64(oct, reg));
2365         }
2366
2367         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2368                 reg = CN23XX_VF_SLI_IQ_BASE_ADDR64(i);
2369                 len += sprintf(s + len,
2370                                "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n",
2371                                reg, i, (u64)octeon_read_csr64(oct, reg));
2372         }
2373
2374         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2375                 reg = CN23XX_VF_SLI_IQ_DOORBELL(i);
2376                 len += sprintf(s + len,
2377                                "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
2378                                reg, i, (u64)octeon_read_csr64(oct, reg));
2379         }
2380
2381         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2382                 reg = CN23XX_VF_SLI_IQ_SIZE(i);
2383                 len += sprintf(s + len,
2384                                "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
2385                                reg, i, (u64)octeon_read_csr64(oct, reg));
2386         }
2387
2388         for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
2389                 reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i);
2390                 len += sprintf(s + len,
2391                                "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
2392                                reg, i, (u64)octeon_read_csr64(oct, reg));
2393         }
2394
2395         return len;
2396 }
2397
2398 static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct)
2399 {
2400         u32 reg;
2401         int i, len = 0;
2402
2403         /* PCI  Window Registers */
2404
2405         len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
2406         reg = CN6XXX_WIN_WR_ADDR_LO;
2407         len += sprintf(s + len, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n",
2408                        CN6XXX_WIN_WR_ADDR_LO, octeon_read_csr(oct, reg));
2409         reg = CN6XXX_WIN_WR_ADDR_HI;
2410         len += sprintf(s + len, "[%02x] (WIN_WR_ADDR_HI): %08x\n",
2411                        CN6XXX_WIN_WR_ADDR_HI, octeon_read_csr(oct, reg));
2412         reg = CN6XXX_WIN_RD_ADDR_LO;
2413         len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_LO): %08x\n",
2414                        CN6XXX_WIN_RD_ADDR_LO, octeon_read_csr(oct, reg));
2415         reg = CN6XXX_WIN_RD_ADDR_HI;
2416         len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_HI): %08x\n",
2417                        CN6XXX_WIN_RD_ADDR_HI, octeon_read_csr(oct, reg));
2418         reg = CN6XXX_WIN_WR_DATA_LO;
2419         len += sprintf(s + len, "[%02x] (WIN_WR_DATA_LO): %08x\n",
2420                        CN6XXX_WIN_WR_DATA_LO, octeon_read_csr(oct, reg));
2421         reg = CN6XXX_WIN_WR_DATA_HI;
2422         len += sprintf(s + len, "[%02x] (WIN_WR_DATA_HI): %08x\n",
2423                        CN6XXX_WIN_WR_DATA_HI, octeon_read_csr(oct, reg));
2424         len += sprintf(s + len, "[%02x] (WIN_WR_MASK_REG): %08x\n",
2425                        CN6XXX_WIN_WR_MASK_REG,
2426                        octeon_read_csr(oct, CN6XXX_WIN_WR_MASK_REG));
2427
2428         /* PCI  Interrupt Register */
2429         len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 0): %08x\n",
2430                        CN6XXX_SLI_INT_ENB64_PORT0, octeon_read_csr(oct,
2431                                                 CN6XXX_SLI_INT_ENB64_PORT0));
2432         len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 1): %08x\n",
2433                        CN6XXX_SLI_INT_ENB64_PORT1,
2434                        octeon_read_csr(oct, CN6XXX_SLI_INT_ENB64_PORT1));
2435         len += sprintf(s + len, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64,
2436                        octeon_read_csr(oct, CN6XXX_SLI_INT_SUM64));
2437
2438         /* PCI  Output queue registers */
2439         for (i = 0; i < oct->num_oqs; i++) {
2440                 reg = CN6XXX_SLI_OQ_PKTS_SENT(i);
2441                 len += sprintf(s + len, "\n[%x] (PKTS_SENT_%d): %08x\n",
2442                                reg, i, octeon_read_csr(oct, reg));
2443                 reg = CN6XXX_SLI_OQ_PKTS_CREDIT(i);
2444                 len += sprintf(s + len, "[%x] (PKT_CREDITS_%d): %08x\n",
2445                                reg, i, octeon_read_csr(oct, reg));
2446         }
2447         reg = CN6XXX_SLI_OQ_INT_LEVEL_PKTS;
2448         len += sprintf(s + len, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n",
2449                        reg, octeon_read_csr(oct, reg));
2450         reg = CN6XXX_SLI_OQ_INT_LEVEL_TIME;
2451         len += sprintf(s + len, "[%x] (PKTS_SENT_TIME): %08x\n",
2452                        reg, octeon_read_csr(oct, reg));
2453
2454         /* PCI  Input queue registers */
2455         for (i = 0; i <= 3; i++) {
2456                 u32 reg;
2457
2458                 reg = CN6XXX_SLI_IQ_DOORBELL(i);
2459                 len += sprintf(s + len, "\n[%x] (INSTR_DOORBELL_%d): %08x\n",
2460                                reg, i, octeon_read_csr(oct, reg));
2461                 reg = CN6XXX_SLI_IQ_INSTR_COUNT(i);
2462                 len += sprintf(s + len, "[%x] (INSTR_COUNT_%d): %08x\n",
2463                                reg, i, octeon_read_csr(oct, reg));
2464         }
2465
2466         /* PCI  DMA registers */
2467
2468         len += sprintf(s + len, "\n[%x] (DMA_CNT_0): %08x\n",
2469                        CN6XXX_DMA_CNT(0),
2470                        octeon_read_csr(oct, CN6XXX_DMA_CNT(0)));
2471         reg = CN6XXX_DMA_PKT_INT_LEVEL(0);
2472         len += sprintf(s + len, "[%x] (DMA_INT_LEV_0): %08x\n",
2473                        CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct, reg));
2474         reg = CN6XXX_DMA_TIME_INT_LEVEL(0);
2475         len += sprintf(s + len, "[%x] (DMA_TIME_0): %08x\n",
2476                        CN6XXX_DMA_TIME_INT_LEVEL(0),
2477                        octeon_read_csr(oct, reg));
2478
2479         len += sprintf(s + len, "\n[%x] (DMA_CNT_1): %08x\n",
2480                        CN6XXX_DMA_CNT(1),
2481                        octeon_read_csr(oct, CN6XXX_DMA_CNT(1)));
2482         reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
2483         len += sprintf(s + len, "[%x] (DMA_INT_LEV_1): %08x\n",
2484                        CN6XXX_DMA_PKT_INT_LEVEL(1),
2485                        octeon_read_csr(oct, reg));
2486         reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
2487         len += sprintf(s + len, "[%x] (DMA_TIME_1): %08x\n",
2488                        CN6XXX_DMA_TIME_INT_LEVEL(1),
2489                        octeon_read_csr(oct, reg));
2490
2491         /* PCI  Index registers */
2492
2493         len += sprintf(s + len, "\n");
2494
2495         for (i = 0; i < 16; i++) {
2496                 reg = lio_pci_readq(oct, CN6XXX_BAR1_REG(i, oct->pcie_port));
2497                 len += sprintf(s + len, "[%llx] (BAR1_INDEX_%02d): %08x\n",
2498                                CN6XXX_BAR1_REG(i, oct->pcie_port), i, reg);
2499         }
2500
2501         return len;
2502 }
2503
2504 static int cn6xxx_read_config_reg(char *s, struct octeon_device *oct)
2505 {
2506         u32 val;
2507         int i, len = 0;
2508
2509         /* PCI CONFIG Registers */
2510
2511         len += sprintf(s + len,
2512                        "\n\t Octeon Config space Registers\n\n");
2513
2514         for (i = 0; i <= 13; i++) {
2515                 pci_read_config_dword(oct->pci_dev, (i * 4), &val);
2516                 len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
2517                                (i * 4), i, val);
2518         }
2519
2520         for (i = 30; i <= 34; i++) {
2521                 pci_read_config_dword(oct->pci_dev, (i * 4), &val);
2522                 len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
2523                                (i * 4), i, val);
2524         }
2525
2526         return len;
2527 }
2528
2529 /*  Return register dump user app.  */
2530 static void lio_get_regs(struct net_device *dev,
2531                          struct ethtool_regs *regs, void *regbuf)
2532 {
2533         struct lio *lio = GET_LIO(dev);
2534         int len = 0;
2535         struct octeon_device *oct = lio->oct_dev;
2536
2537         regs->version = OCT_ETHTOOL_REGSVER;
2538
2539         switch (oct->chip_id) {
2540         case OCTEON_CN23XX_PF_VID:
2541                 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX);
2542                 len += cn23xx_read_csr_reg(regbuf + len, oct);
2543                 break;
2544         case OCTEON_CN23XX_VF_VID:
2545                 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX_VF);
2546                 len += cn23xx_vf_read_csr_reg(regbuf + len, oct);
2547                 break;
2548         case OCTEON_CN68XX:
2549         case OCTEON_CN66XX:
2550                 memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
2551                 len += cn6xxx_read_csr_reg(regbuf + len, oct);
2552                 len += cn6xxx_read_config_reg(regbuf + len, oct);
2553                 break;
2554         default:
2555                 dev_err(&oct->pci_dev->dev, "%s Unknown chipid: %d\n",
2556                         __func__, oct->chip_id);
2557         }
2558 }
2559
2560 static u32 lio_get_priv_flags(struct net_device *netdev)
2561 {
2562         struct lio *lio = GET_LIO(netdev);
2563
2564         return lio->oct_dev->priv_flags;
2565 }
2566
2567 static int lio_set_priv_flags(struct net_device *netdev, u32 flags)
2568 {
2569         struct lio *lio = GET_LIO(netdev);
2570         bool intr_by_tx_bytes = !!(flags & (0x1 << OCT_PRIV_FLAG_TX_BYTES));
2571
2572         lio_set_priv_flag(lio->oct_dev, OCT_PRIV_FLAG_TX_BYTES,
2573                           intr_by_tx_bytes);
2574         return 0;
2575 }
2576
2577 static const struct ethtool_ops lio_ethtool_ops = {
2578         .get_link_ksettings     = lio_get_link_ksettings,
2579         .get_link               = ethtool_op_get_link,
2580         .get_drvinfo            = lio_get_drvinfo,
2581         .get_ringparam          = lio_ethtool_get_ringparam,
2582         .get_channels           = lio_ethtool_get_channels,
2583         .set_phys_id            = lio_set_phys_id,
2584         .get_eeprom_len         = lio_get_eeprom_len,
2585         .get_eeprom             = lio_get_eeprom,
2586         .get_strings            = lio_get_strings,
2587         .get_ethtool_stats      = lio_get_ethtool_stats,
2588         .get_pauseparam         = lio_get_pauseparam,
2589         .set_pauseparam         = lio_set_pauseparam,
2590         .get_regs_len           = lio_get_regs_len,
2591         .get_regs               = lio_get_regs,
2592         .get_msglevel           = lio_get_msglevel,
2593         .set_msglevel           = lio_set_msglevel,
2594         .get_sset_count         = lio_get_sset_count,
2595         .get_coalesce           = lio_get_intr_coalesce,
2596         .set_coalesce           = lio_set_intr_coalesce,
2597         .get_priv_flags         = lio_get_priv_flags,
2598         .set_priv_flags         = lio_set_priv_flags,
2599         .get_ts_info            = lio_get_ts_info,
2600 };
2601
2602 static const struct ethtool_ops lio_vf_ethtool_ops = {
2603         .get_link_ksettings     = lio_get_link_ksettings,
2604         .get_link               = ethtool_op_get_link,
2605         .get_drvinfo            = lio_get_vf_drvinfo,
2606         .get_ringparam          = lio_ethtool_get_ringparam,
2607         .get_channels           = lio_ethtool_get_channels,
2608         .get_strings            = lio_vf_get_strings,
2609         .get_ethtool_stats      = lio_vf_get_ethtool_stats,
2610         .get_regs_len           = lio_get_regs_len,
2611         .get_regs               = lio_get_regs,
2612         .get_msglevel           = lio_get_msglevel,
2613         .set_msglevel           = lio_vf_set_msglevel,
2614         .get_sset_count         = lio_vf_get_sset_count,
2615         .get_coalesce           = lio_get_intr_coalesce,
2616         .set_coalesce           = lio_set_intr_coalesce,
2617         .get_priv_flags         = lio_get_priv_flags,
2618         .set_priv_flags         = lio_set_priv_flags,
2619         .get_ts_info            = lio_get_ts_info,
2620 };
2621
2622 void liquidio_set_ethtool_ops(struct net_device *netdev)
2623 {
2624         struct lio *lio = GET_LIO(netdev);
2625         struct octeon_device *oct = lio->oct_dev;
2626
2627         if (OCTEON_CN23XX_VF(oct))
2628                 netdev->ethtool_ops = &lio_vf_ethtool_ops;
2629         else
2630                 netdev->ethtool_ops = &lio_ethtool_ops;
2631 }