1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qede NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
7 #include <linux/crash_dump.h>
8 #include <linux/module.h>
10 #include <linux/device.h>
11 #include <linux/netdevice.h>
12 #include <linux/etherdevice.h>
13 #include <linux/skbuff.h>
14 #include <linux/errno.h>
15 #include <linux/list.h>
16 #include <linux/string.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/interrupt.h>
19 #include <asm/byteorder.h>
20 #include <asm/param.h>
22 #include <linux/netdev_features.h>
23 #include <linux/udp.h>
24 #include <linux/tcp.h>
25 #include <net/udp_tunnel.h>
29 #include <linux/if_ether.h>
30 #include <linux/if_vlan.h>
31 #include <linux/pkt_sched.h>
32 #include <linux/ethtool.h>
34 #include <linux/random.h>
35 #include <net/ip6_checksum.h>
36 #include <linux/bitops.h>
37 #include <linux/vmalloc.h>
41 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
42 MODULE_LICENSE("GPL");
45 module_param(debug, uint, 0);
46 MODULE_PARM_DESC(debug, " Default debug msglevel");
48 static const struct qed_eth_ops *qed_ops;
50 #define CHIP_NUM_57980S_40 0x1634
51 #define CHIP_NUM_57980S_10 0x1666
52 #define CHIP_NUM_57980S_MF 0x1636
53 #define CHIP_NUM_57980S_100 0x1644
54 #define CHIP_NUM_57980S_50 0x1654
55 #define CHIP_NUM_57980S_25 0x1656
56 #define CHIP_NUM_57980S_IOV 0x1664
57 #define CHIP_NUM_AH 0x8070
58 #define CHIP_NUM_AH_IOV 0x8090
60 #ifndef PCI_DEVICE_ID_NX2_57980E
61 #define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
62 #define PCI_DEVICE_ID_57980S_10 CHIP_NUM_57980S_10
63 #define PCI_DEVICE_ID_57980S_MF CHIP_NUM_57980S_MF
64 #define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
65 #define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
66 #define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
67 #define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV
68 #define PCI_DEVICE_ID_AH CHIP_NUM_AH
69 #define PCI_DEVICE_ID_AH_IOV CHIP_NUM_AH_IOV
73 enum qede_pci_private {
78 static const struct pci_device_id qede_pci_tbl[] = {
79 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF},
80 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF},
81 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF},
82 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
83 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
84 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
85 #ifdef CONFIG_QED_SRIOV
86 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
88 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH), QEDE_PRIVATE_PF},
89 #ifdef CONFIG_QED_SRIOV
90 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH_IOV), QEDE_PRIVATE_VF},
95 MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
97 static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
98 static pci_ers_result_t
99 qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state);
101 #define TX_TIMEOUT (5 * HZ)
103 /* Utilize last protocol index for XDP */
106 static void qede_remove(struct pci_dev *pdev);
107 static void qede_shutdown(struct pci_dev *pdev);
108 static void qede_link_update(void *dev, struct qed_link_output *link);
109 static void qede_schedule_recovery_handler(void *dev);
110 static void qede_recovery_handler(struct qede_dev *edev);
111 static void qede_schedule_hw_err_handler(void *dev,
112 enum qed_hw_err_type err_type);
113 static void qede_get_eth_tlv_data(void *edev, void *data);
114 static void qede_get_generic_tlv_data(void *edev,
115 struct qed_generic_tlvs *data);
116 static void qede_generic_hw_err_handler(struct qede_dev *edev);
117 #ifdef CONFIG_QED_SRIOV
118 static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
121 struct qede_dev *edev = netdev_priv(ndev);
124 DP_NOTICE(edev, "Illegal vlan value %d\n", vlan);
128 if (vlan_proto != htons(ETH_P_8021Q))
129 return -EPROTONOSUPPORT;
131 DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
134 return edev->ops->iov->set_vlan(edev->cdev, vlan, vf);
137 static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
139 struct qede_dev *edev = netdev_priv(ndev);
141 DP_VERBOSE(edev, QED_MSG_IOV, "Setting MAC %pM to VF [%d]\n", mac, vfidx);
143 if (!is_valid_ether_addr(mac)) {
144 DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
148 return edev->ops->iov->set_mac(edev->cdev, mac, vfidx);
151 static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
153 struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
154 struct qed_dev_info *qed_info = &edev->dev_info.common;
155 struct qed_update_vport_params *vport_params;
158 vport_params = vzalloc(sizeof(*vport_params));
161 DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
163 rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
165 /* Enable/Disable Tx switching for PF */
166 if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
167 !qed_info->b_inter_pf_switch && qed_info->tx_switching) {
168 vport_params->vport_id = 0;
169 vport_params->update_tx_switching_flg = 1;
170 vport_params->tx_switching_flg = num_vfs_param ? 1 : 0;
171 edev->ops->vport_update(edev->cdev, vport_params);
179 static const struct pci_error_handlers qede_err_handler = {
180 .error_detected = qede_io_error_detected,
183 static struct pci_driver qede_pci_driver = {
185 .id_table = qede_pci_tbl,
187 .remove = qede_remove,
188 .shutdown = qede_shutdown,
189 #ifdef CONFIG_QED_SRIOV
190 .sriov_configure = qede_sriov_configure,
192 .err_handler = &qede_err_handler,
195 static struct qed_eth_cb_ops qede_ll_ops = {
197 #ifdef CONFIG_RFS_ACCEL
198 .arfs_filter_op = qede_arfs_filter_op,
200 .link_update = qede_link_update,
201 .schedule_recovery_handler = qede_schedule_recovery_handler,
202 .schedule_hw_err_handler = qede_schedule_hw_err_handler,
203 .get_generic_tlv_data = qede_get_generic_tlv_data,
204 .get_protocol_tlv_data = qede_get_eth_tlv_data,
206 .force_mac = qede_force_mac,
207 .ports_update = qede_udp_ports_update,
210 static int qede_netdev_event(struct notifier_block *this, unsigned long event,
213 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
214 struct ethtool_drvinfo drvinfo;
215 struct qede_dev *edev;
217 if (event != NETDEV_CHANGENAME && event != NETDEV_CHANGEADDR)
220 /* Check whether this is a qede device */
221 if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo)
224 memset(&drvinfo, 0, sizeof(drvinfo));
225 ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo);
226 if (strcmp(drvinfo.driver, "qede"))
228 edev = netdev_priv(ndev);
231 case NETDEV_CHANGENAME:
232 /* Notify qed of the name change */
233 if (!edev->ops || !edev->ops->common)
235 edev->ops->common->set_name(edev->cdev, edev->ndev->name);
237 case NETDEV_CHANGEADDR:
238 edev = netdev_priv(ndev);
239 qede_rdma_event_changeaddr(edev);
247 static struct notifier_block qede_netdev_notifier = {
248 .notifier_call = qede_netdev_event,
252 int __init qede_init(void)
256 pr_info("qede init: QLogic FastLinQ 4xxxx Ethernet Driver qede\n");
258 qede_forced_speed_maps_init();
260 qed_ops = qed_get_eth_ops();
262 pr_notice("Failed to get qed ethtool operations\n");
266 /* Must register notifier before pci ops, since we might miss
267 * interface rename after pci probe and netdev registration.
269 ret = register_netdevice_notifier(&qede_netdev_notifier);
271 pr_notice("Failed to register netdevice_notifier\n");
276 ret = pci_register_driver(&qede_pci_driver);
278 pr_notice("Failed to register driver\n");
279 unregister_netdevice_notifier(&qede_netdev_notifier);
287 static void __exit qede_cleanup(void)
289 if (debug & QED_LOG_INFO_MASK)
290 pr_info("qede_cleanup called\n");
292 unregister_netdevice_notifier(&qede_netdev_notifier);
293 pci_unregister_driver(&qede_pci_driver);
297 module_init(qede_init);
298 module_exit(qede_cleanup);
300 static int qede_open(struct net_device *ndev);
301 static int qede_close(struct net_device *ndev);
303 void qede_fill_by_demand_stats(struct qede_dev *edev)
305 struct qede_stats_common *p_common = &edev->stats.common;
306 struct qed_eth_stats stats;
308 edev->ops->get_vport_stats(edev->cdev, &stats);
310 spin_lock(&edev->stats_lock);
312 p_common->no_buff_discards = stats.common.no_buff_discards;
313 p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
314 p_common->ttl0_discard = stats.common.ttl0_discard;
315 p_common->rx_ucast_bytes = stats.common.rx_ucast_bytes;
316 p_common->rx_mcast_bytes = stats.common.rx_mcast_bytes;
317 p_common->rx_bcast_bytes = stats.common.rx_bcast_bytes;
318 p_common->rx_ucast_pkts = stats.common.rx_ucast_pkts;
319 p_common->rx_mcast_pkts = stats.common.rx_mcast_pkts;
320 p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts;
321 p_common->mftag_filter_discards = stats.common.mftag_filter_discards;
322 p_common->mac_filter_discards = stats.common.mac_filter_discards;
323 p_common->gft_filter_drop = stats.common.gft_filter_drop;
325 p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes;
326 p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes;
327 p_common->tx_bcast_bytes = stats.common.tx_bcast_bytes;
328 p_common->tx_ucast_pkts = stats.common.tx_ucast_pkts;
329 p_common->tx_mcast_pkts = stats.common.tx_mcast_pkts;
330 p_common->tx_bcast_pkts = stats.common.tx_bcast_pkts;
331 p_common->tx_err_drop_pkts = stats.common.tx_err_drop_pkts;
332 p_common->coalesced_pkts = stats.common.tpa_coalesced_pkts;
333 p_common->coalesced_events = stats.common.tpa_coalesced_events;
334 p_common->coalesced_aborts_num = stats.common.tpa_aborts_num;
335 p_common->non_coalesced_pkts = stats.common.tpa_not_coalesced_pkts;
336 p_common->coalesced_bytes = stats.common.tpa_coalesced_bytes;
338 p_common->rx_64_byte_packets = stats.common.rx_64_byte_packets;
339 p_common->rx_65_to_127_byte_packets =
340 stats.common.rx_65_to_127_byte_packets;
341 p_common->rx_128_to_255_byte_packets =
342 stats.common.rx_128_to_255_byte_packets;
343 p_common->rx_256_to_511_byte_packets =
344 stats.common.rx_256_to_511_byte_packets;
345 p_common->rx_512_to_1023_byte_packets =
346 stats.common.rx_512_to_1023_byte_packets;
347 p_common->rx_1024_to_1518_byte_packets =
348 stats.common.rx_1024_to_1518_byte_packets;
349 p_common->rx_crc_errors = stats.common.rx_crc_errors;
350 p_common->rx_mac_crtl_frames = stats.common.rx_mac_crtl_frames;
351 p_common->rx_pause_frames = stats.common.rx_pause_frames;
352 p_common->rx_pfc_frames = stats.common.rx_pfc_frames;
353 p_common->rx_align_errors = stats.common.rx_align_errors;
354 p_common->rx_carrier_errors = stats.common.rx_carrier_errors;
355 p_common->rx_oversize_packets = stats.common.rx_oversize_packets;
356 p_common->rx_jabbers = stats.common.rx_jabbers;
357 p_common->rx_undersize_packets = stats.common.rx_undersize_packets;
358 p_common->rx_fragments = stats.common.rx_fragments;
359 p_common->tx_64_byte_packets = stats.common.tx_64_byte_packets;
360 p_common->tx_65_to_127_byte_packets =
361 stats.common.tx_65_to_127_byte_packets;
362 p_common->tx_128_to_255_byte_packets =
363 stats.common.tx_128_to_255_byte_packets;
364 p_common->tx_256_to_511_byte_packets =
365 stats.common.tx_256_to_511_byte_packets;
366 p_common->tx_512_to_1023_byte_packets =
367 stats.common.tx_512_to_1023_byte_packets;
368 p_common->tx_1024_to_1518_byte_packets =
369 stats.common.tx_1024_to_1518_byte_packets;
370 p_common->tx_pause_frames = stats.common.tx_pause_frames;
371 p_common->tx_pfc_frames = stats.common.tx_pfc_frames;
372 p_common->brb_truncates = stats.common.brb_truncates;
373 p_common->brb_discards = stats.common.brb_discards;
374 p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames;
375 p_common->link_change_count = stats.common.link_change_count;
376 p_common->ptp_skip_txts = edev->ptp_skip_txts;
378 if (QEDE_IS_BB(edev)) {
379 struct qede_stats_bb *p_bb = &edev->stats.bb;
381 p_bb->rx_1519_to_1522_byte_packets =
382 stats.bb.rx_1519_to_1522_byte_packets;
383 p_bb->rx_1519_to_2047_byte_packets =
384 stats.bb.rx_1519_to_2047_byte_packets;
385 p_bb->rx_2048_to_4095_byte_packets =
386 stats.bb.rx_2048_to_4095_byte_packets;
387 p_bb->rx_4096_to_9216_byte_packets =
388 stats.bb.rx_4096_to_9216_byte_packets;
389 p_bb->rx_9217_to_16383_byte_packets =
390 stats.bb.rx_9217_to_16383_byte_packets;
391 p_bb->tx_1519_to_2047_byte_packets =
392 stats.bb.tx_1519_to_2047_byte_packets;
393 p_bb->tx_2048_to_4095_byte_packets =
394 stats.bb.tx_2048_to_4095_byte_packets;
395 p_bb->tx_4096_to_9216_byte_packets =
396 stats.bb.tx_4096_to_9216_byte_packets;
397 p_bb->tx_9217_to_16383_byte_packets =
398 stats.bb.tx_9217_to_16383_byte_packets;
399 p_bb->tx_lpi_entry_count = stats.bb.tx_lpi_entry_count;
400 p_bb->tx_total_collisions = stats.bb.tx_total_collisions;
402 struct qede_stats_ah *p_ah = &edev->stats.ah;
404 p_ah->rx_1519_to_max_byte_packets =
405 stats.ah.rx_1519_to_max_byte_packets;
406 p_ah->tx_1519_to_max_byte_packets =
407 stats.ah.tx_1519_to_max_byte_packets;
410 spin_unlock(&edev->stats_lock);
413 static void qede_get_stats64(struct net_device *dev,
414 struct rtnl_link_stats64 *stats)
416 struct qede_dev *edev = netdev_priv(dev);
417 struct qede_stats_common *p_common;
419 p_common = &edev->stats.common;
421 spin_lock(&edev->stats_lock);
423 stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
424 p_common->rx_bcast_pkts;
425 stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
426 p_common->tx_bcast_pkts;
428 stats->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
429 p_common->rx_bcast_bytes;
430 stats->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
431 p_common->tx_bcast_bytes;
433 stats->tx_errors = p_common->tx_err_drop_pkts;
434 stats->multicast = p_common->rx_mcast_pkts + p_common->rx_bcast_pkts;
436 stats->rx_fifo_errors = p_common->no_buff_discards;
438 if (QEDE_IS_BB(edev))
439 stats->collisions = edev->stats.bb.tx_total_collisions;
440 stats->rx_crc_errors = p_common->rx_crc_errors;
441 stats->rx_frame_errors = p_common->rx_align_errors;
443 spin_unlock(&edev->stats_lock);
446 #ifdef CONFIG_QED_SRIOV
447 static int qede_get_vf_config(struct net_device *dev, int vfidx,
448 struct ifla_vf_info *ivi)
450 struct qede_dev *edev = netdev_priv(dev);
455 return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
458 static int qede_set_vf_rate(struct net_device *dev, int vfidx,
459 int min_tx_rate, int max_tx_rate)
461 struct qede_dev *edev = netdev_priv(dev);
463 return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
467 static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
469 struct qede_dev *edev = netdev_priv(dev);
474 return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
477 static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
480 struct qede_dev *edev = netdev_priv(dev);
485 return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
488 static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting)
490 struct qede_dev *edev = netdev_priv(dev);
495 return edev->ops->iov->set_trust(edev->cdev, vfidx, setting);
499 static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
501 struct qede_dev *edev = netdev_priv(dev);
503 if (!netif_running(dev))
508 return qede_ptp_hw_ts(edev, ifr);
510 DP_VERBOSE(edev, QED_MSG_DEBUG,
511 "default IOCTL cmd 0x%x\n", cmd);
518 static void qede_fp_sb_dump(struct qede_dev *edev, struct qede_fastpath *fp)
520 char *p_sb = (char *)fp->sb_info->sb_virt;
523 sb_size = sizeof(struct status_block);
525 for (i = 0; i < sb_size; i += 8)
527 "%02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX\n",
528 p_sb[i], p_sb[i + 1], p_sb[i + 2], p_sb[i + 3],
529 p_sb[i + 4], p_sb[i + 5], p_sb[i + 6], p_sb[i + 7]);
533 qede_txq_fp_log_metadata(struct qede_dev *edev,
534 struct qede_fastpath *fp, struct qede_tx_queue *txq)
536 struct qed_chain *p_chain = &txq->tx_pbl;
538 /* Dump txq/fp/sb ids etc. other metadata */
540 "fpid 0x%x sbid 0x%x txqid [0x%x] ndev_qid [0x%x] cos [0x%x] p_chain %p cap %d size %d jiffies %lu HZ 0x%x\n",
541 fp->id, fp->sb_info->igu_sb_id, txq->index, txq->ndev_txq_id, txq->cos,
542 p_chain, p_chain->capacity, p_chain->size, jiffies, HZ);
544 /* Dump all the relevant prod/cons indexes */
546 "hw cons %04x sw_tx_prod=0x%x, sw_tx_cons=0x%x, bd_prod 0x%x bd_cons 0x%x\n",
547 le16_to_cpu(*txq->hw_cons_ptr), txq->sw_tx_prod, txq->sw_tx_cons,
548 qed_chain_get_prod_idx(p_chain), qed_chain_get_cons_idx(p_chain));
552 qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp, struct qede_tx_queue *txq)
554 struct qed_sb_info_dbg sb_dbg;
558 qede_fp_sb_dump(edev, fp);
560 memset(&sb_dbg, 0, sizeof(sb_dbg));
561 rc = edev->ops->common->get_sb_info(edev->cdev, fp->sb_info, (u16)fp->id, &sb_dbg);
563 DP_NOTICE(edev, "IGU: prod %08x cons %08x CAU Tx %04x\n",
564 sb_dbg.igu_prod, sb_dbg.igu_cons, sb_dbg.pi[TX_PI(txq->cos)]);
567 edev->ops->common->mfw_report(edev->cdev,
568 "Txq[%d]: FW cons [host] %04x, SW cons %04x, SW prod %04x [Jiffies %lu]\n",
569 txq->index, le16_to_cpu(*txq->hw_cons_ptr),
570 qed_chain_get_cons_idx(&txq->tx_pbl),
571 qed_chain_get_prod_idx(&txq->tx_pbl), jiffies);
573 edev->ops->common->mfw_report(edev->cdev,
574 "Txq[%d]: SB[0x%04x] - IGU: prod %08x cons %08x CAU Tx %04x\n",
575 txq->index, fp->sb_info->igu_sb_id,
576 sb_dbg.igu_prod, sb_dbg.igu_cons,
577 sb_dbg.pi[TX_PI(txq->cos)]);
580 static void qede_tx_timeout(struct net_device *dev, unsigned int txqueue)
582 struct qede_dev *edev = netdev_priv(dev);
585 netif_carrier_off(dev);
586 DP_NOTICE(edev, "TX timeout on queue %u!\n", txqueue);
589 struct qede_tx_queue *txq;
590 struct qede_fastpath *fp;
593 fp = &edev->fp_array[i];
594 if (!(fp->type & QEDE_FASTPATH_TX))
597 for_each_cos_in_txq(edev, cos) {
600 /* Dump basic metadata for all queues */
601 qede_txq_fp_log_metadata(edev, fp, txq);
603 if (qed_chain_get_cons_idx(&txq->tx_pbl) !=
604 qed_chain_get_prod_idx(&txq->tx_pbl))
605 qede_tx_log_print(edev, fp, txq);
612 if (test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
613 edev->state == QEDE_STATE_RECOVERY) {
615 "Avoid handling a Tx timeout while another HW error is being handled\n");
619 set_bit(QEDE_ERR_GET_DBG_INFO, &edev->err_flags);
620 set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
621 schedule_delayed_work(&edev->sp_task, 0);
624 static int qede_setup_tc(struct net_device *ndev, u8 num_tc)
626 struct qede_dev *edev = netdev_priv(ndev);
627 int cos, count, offset;
629 if (num_tc > edev->dev_info.num_tc)
632 netdev_reset_tc(ndev);
633 netdev_set_num_tc(ndev, num_tc);
635 for_each_cos_in_txq(edev, cos) {
636 count = QEDE_TSS_COUNT(edev);
637 offset = cos * QEDE_TSS_COUNT(edev);
638 netdev_set_tc_queue(ndev, cos, count, offset);
645 qede_set_flower(struct qede_dev *edev, struct flow_cls_offload *f,
648 switch (f->command) {
649 case FLOW_CLS_REPLACE:
650 return qede_add_tc_flower_fltr(edev, proto, f);
651 case FLOW_CLS_DESTROY:
652 return qede_delete_flow_filter(edev, f->cookie);
658 static int qede_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
661 struct flow_cls_offload *f;
662 struct qede_dev *edev = cb_priv;
664 if (!tc_cls_can_offload_and_chain0(edev->ndev, type_data))
668 case TC_SETUP_CLSFLOWER:
670 return qede_set_flower(edev, f, f->common.protocol);
676 static LIST_HEAD(qede_block_cb_list);
679 qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type,
682 struct qede_dev *edev = netdev_priv(dev);
683 struct tc_mqprio_qopt *mqprio;
687 return flow_block_cb_setup_simple(type_data,
689 qede_setup_tc_block_cb,
691 case TC_SETUP_QDISC_MQPRIO:
694 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
695 return qede_setup_tc(dev, mqprio->num_tc);
701 static const struct net_device_ops qede_netdev_ops = {
702 .ndo_open = qede_open,
703 .ndo_stop = qede_close,
704 .ndo_start_xmit = qede_start_xmit,
705 .ndo_select_queue = qede_select_queue,
706 .ndo_set_rx_mode = qede_set_rx_mode,
707 .ndo_set_mac_address = qede_set_mac_addr,
708 .ndo_validate_addr = eth_validate_addr,
709 .ndo_change_mtu = qede_change_mtu,
710 .ndo_eth_ioctl = qede_ioctl,
711 .ndo_tx_timeout = qede_tx_timeout,
712 #ifdef CONFIG_QED_SRIOV
713 .ndo_set_vf_mac = qede_set_vf_mac,
714 .ndo_set_vf_vlan = qede_set_vf_vlan,
715 .ndo_set_vf_trust = qede_set_vf_trust,
717 .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
718 .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
719 .ndo_fix_features = qede_fix_features,
720 .ndo_set_features = qede_set_features,
721 .ndo_get_stats64 = qede_get_stats64,
722 #ifdef CONFIG_QED_SRIOV
723 .ndo_set_vf_link_state = qede_set_vf_link_state,
724 .ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
725 .ndo_get_vf_config = qede_get_vf_config,
726 .ndo_set_vf_rate = qede_set_vf_rate,
728 .ndo_features_check = qede_features_check,
730 #ifdef CONFIG_RFS_ACCEL
731 .ndo_rx_flow_steer = qede_rx_flow_steer,
733 .ndo_xdp_xmit = qede_xdp_transmit,
734 .ndo_setup_tc = qede_setup_tc_offload,
737 static const struct net_device_ops qede_netdev_vf_ops = {
738 .ndo_open = qede_open,
739 .ndo_stop = qede_close,
740 .ndo_start_xmit = qede_start_xmit,
741 .ndo_select_queue = qede_select_queue,
742 .ndo_set_rx_mode = qede_set_rx_mode,
743 .ndo_set_mac_address = qede_set_mac_addr,
744 .ndo_validate_addr = eth_validate_addr,
745 .ndo_change_mtu = qede_change_mtu,
746 .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
747 .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
748 .ndo_fix_features = qede_fix_features,
749 .ndo_set_features = qede_set_features,
750 .ndo_get_stats64 = qede_get_stats64,
751 .ndo_features_check = qede_features_check,
754 static const struct net_device_ops qede_netdev_vf_xdp_ops = {
755 .ndo_open = qede_open,
756 .ndo_stop = qede_close,
757 .ndo_start_xmit = qede_start_xmit,
758 .ndo_select_queue = qede_select_queue,
759 .ndo_set_rx_mode = qede_set_rx_mode,
760 .ndo_set_mac_address = qede_set_mac_addr,
761 .ndo_validate_addr = eth_validate_addr,
762 .ndo_change_mtu = qede_change_mtu,
763 .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
764 .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
765 .ndo_fix_features = qede_fix_features,
766 .ndo_set_features = qede_set_features,
767 .ndo_get_stats64 = qede_get_stats64,
768 .ndo_features_check = qede_features_check,
770 .ndo_xdp_xmit = qede_xdp_transmit,
773 /* -------------------------------------------------------------------------
774 * START OF PROBE / REMOVE
775 * -------------------------------------------------------------------------
778 static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
779 struct pci_dev *pdev,
780 struct qed_dev_eth_info *info,
781 u32 dp_module, u8 dp_level)
783 struct net_device *ndev;
784 struct qede_dev *edev;
786 ndev = alloc_etherdev_mqs(sizeof(*edev),
787 info->num_queues * info->num_tc,
790 pr_err("etherdev allocation failed\n");
794 edev = netdev_priv(ndev);
798 edev->dp_module = dp_module;
799 edev->dp_level = dp_level;
802 if (is_kdump_kernel()) {
803 edev->q_num_rx_buffers = NUM_RX_BDS_KDUMP_MIN;
804 edev->q_num_tx_buffers = NUM_TX_BDS_KDUMP_MIN;
806 edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
807 edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
810 DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
811 info->num_queues, info->num_queues);
813 SET_NETDEV_DEV(ndev, &pdev->dev);
815 memset(&edev->stats, 0, sizeof(edev->stats));
816 memcpy(&edev->dev_info, info, sizeof(*info));
818 /* As ethtool doesn't have the ability to show WoL behavior as
819 * 'default', if device supports it declare it's enabled.
821 if (edev->dev_info.common.wol_support)
822 edev->wol_enabled = true;
824 INIT_LIST_HEAD(&edev->vlan_list);
829 static void qede_init_ndev(struct qede_dev *edev)
831 struct net_device *ndev = edev->ndev;
832 struct pci_dev *pdev = edev->pdev;
833 bool udp_tunnel_enable = false;
834 netdev_features_t hw_features;
836 pci_set_drvdata(pdev, ndev);
838 ndev->mem_start = edev->dev_info.common.pci_mem_start;
839 ndev->base_addr = ndev->mem_start;
840 ndev->mem_end = edev->dev_info.common.pci_mem_end;
841 ndev->irq = edev->dev_info.common.pci_irq;
843 ndev->watchdog_timeo = TX_TIMEOUT;
846 if (edev->dev_info.xdp_supported)
847 ndev->netdev_ops = &qede_netdev_vf_xdp_ops;
849 ndev->netdev_ops = &qede_netdev_vf_ops;
851 ndev->netdev_ops = &qede_netdev_ops;
854 qede_set_ethtool_ops(ndev);
856 ndev->priv_flags |= IFF_UNICAST_FLT;
858 /* user-changeble features */
859 hw_features = NETIF_F_GRO | NETIF_F_GRO_HW | NETIF_F_SG |
860 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
861 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_TC;
863 if (edev->dev_info.common.b_arfs_capable)
864 hw_features |= NETIF_F_NTUPLE;
866 if (edev->dev_info.common.vxlan_enable ||
867 edev->dev_info.common.geneve_enable)
868 udp_tunnel_enable = true;
870 if (udp_tunnel_enable || edev->dev_info.common.gre_enable) {
871 hw_features |= NETIF_F_TSO_ECN;
872 ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
873 NETIF_F_SG | NETIF_F_TSO |
874 NETIF_F_TSO_ECN | NETIF_F_TSO6 |
878 if (udp_tunnel_enable) {
879 hw_features |= (NETIF_F_GSO_UDP_TUNNEL |
880 NETIF_F_GSO_UDP_TUNNEL_CSUM);
881 ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL |
882 NETIF_F_GSO_UDP_TUNNEL_CSUM);
884 qede_set_udp_tunnels(edev);
887 if (edev->dev_info.common.gre_enable) {
888 hw_features |= (NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM);
889 ndev->hw_enc_features |= (NETIF_F_GSO_GRE |
890 NETIF_F_GSO_GRE_CSUM);
893 ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
895 ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
896 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
897 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX;
899 ndev->hw_features = hw_features;
901 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
902 NETDEV_XDP_ACT_NDO_XMIT;
904 /* MTU range: 46 - 9600 */
905 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
906 ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE;
908 /* Set network device HW mac */
909 eth_hw_addr_set(edev->ndev, edev->dev_info.common.hw_mac);
911 ndev->mtu = edev->dev_info.common.mtu;
914 /* This function converts from 32b param to two params of level and module
915 * Input 32b decoding:
916 * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
917 * 'happy' flow, e.g. memory allocation failed.
918 * b30 - enable all INFO prints. INFO prints are for major steps in the flow
919 * and provide important parameters.
920 * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
921 * module. VERBOSE prints are for tracking the specific flow in low level.
923 * Notice that the level should be that of the lowest required logs.
925 void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level)
927 *p_dp_level = QED_LEVEL_NOTICE;
930 if (debug & QED_LOG_VERBOSE_MASK) {
931 *p_dp_level = QED_LEVEL_VERBOSE;
932 *p_dp_module = (debug & 0x3FFFFFFF);
933 } else if (debug & QED_LOG_INFO_MASK) {
934 *p_dp_level = QED_LEVEL_INFO;
935 } else if (debug & QED_LOG_NOTICE_MASK) {
936 *p_dp_level = QED_LEVEL_NOTICE;
940 static void qede_free_fp_array(struct qede_dev *edev)
942 if (edev->fp_array) {
943 struct qede_fastpath *fp;
947 fp = &edev->fp_array[i];
950 /* Handle mem alloc failure case where qede_init_fp
951 * didn't register xdp_rxq_info yet.
952 * Implicit only (fp->type & QEDE_FASTPATH_RX)
954 if (fp->rxq && xdp_rxq_info_is_reg(&fp->rxq->xdp_rxq))
955 xdp_rxq_info_unreg(&fp->rxq->xdp_rxq);
960 kfree(edev->fp_array);
963 edev->num_queues = 0;
968 static int qede_alloc_fp_array(struct qede_dev *edev)
970 u8 fp_combined, fp_rx = edev->fp_num_rx;
971 struct qede_fastpath *fp;
974 edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
975 sizeof(*edev->fp_array), GFP_KERNEL);
976 if (!edev->fp_array) {
977 DP_NOTICE(edev, "fp array allocation failed\n");
981 if (!edev->coal_entry) {
982 edev->coal_entry = kcalloc(QEDE_MAX_RSS_CNT(edev),
983 sizeof(*edev->coal_entry),
985 if (!edev->coal_entry) {
986 DP_ERR(edev, "coalesce entry allocation failed\n");
991 fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
993 /* Allocate the FP elements for Rx queues followed by combined and then
994 * the Tx. This ordering should be maintained so that the respective
995 * queues (Rx or Tx) will be together in the fastpath array and the
996 * associated ids will be sequential.
999 fp = &edev->fp_array[i];
1001 fp->sb_info = kzalloc(sizeof(*fp->sb_info), GFP_KERNEL);
1003 DP_NOTICE(edev, "sb info struct allocation failed\n");
1008 fp->type = QEDE_FASTPATH_RX;
1010 } else if (fp_combined) {
1011 fp->type = QEDE_FASTPATH_COMBINED;
1014 fp->type = QEDE_FASTPATH_TX;
1017 if (fp->type & QEDE_FASTPATH_TX) {
1018 fp->txq = kcalloc(edev->dev_info.num_tc,
1019 sizeof(*fp->txq), GFP_KERNEL);
1024 if (fp->type & QEDE_FASTPATH_RX) {
1025 fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL);
1029 if (edev->xdp_prog) {
1030 fp->xdp_tx = kzalloc(sizeof(*fp->xdp_tx),
1034 fp->type |= QEDE_FASTPATH_XDP;
1041 qede_free_fp_array(edev);
1045 /* The qede lock is used to protect driver state change and driver flows that
1046 * are not reentrant.
1048 void __qede_lock(struct qede_dev *edev)
1050 mutex_lock(&edev->qede_lock);
1053 void __qede_unlock(struct qede_dev *edev)
1055 mutex_unlock(&edev->qede_lock);
1058 /* This version of the lock should be used when acquiring the RTNL lock is also
1059 * needed in addition to the internal qede lock.
1061 static void qede_lock(struct qede_dev *edev)
1067 static void qede_unlock(struct qede_dev *edev)
1069 __qede_unlock(edev);
1073 static void qede_periodic_task(struct work_struct *work)
1075 struct qede_dev *edev = container_of(work, struct qede_dev,
1076 periodic_task.work);
1078 qede_fill_by_demand_stats(edev);
1079 schedule_delayed_work(&edev->periodic_task, edev->stats_coal_ticks);
1082 static void qede_init_periodic_task(struct qede_dev *edev)
1084 INIT_DELAYED_WORK(&edev->periodic_task, qede_periodic_task);
1085 spin_lock_init(&edev->stats_lock);
1086 edev->stats_coal_usecs = USEC_PER_SEC;
1087 edev->stats_coal_ticks = usecs_to_jiffies(USEC_PER_SEC);
1090 static void qede_sp_task(struct work_struct *work)
1092 struct qede_dev *edev = container_of(work, struct qede_dev,
1095 /* Disable execution of this deferred work once
1096 * qede removal is in progress, this stop any future
1097 * scheduling of sp_task.
1099 if (test_bit(QEDE_SP_DISABLE, &edev->sp_flags))
1102 /* The locking scheme depends on the specific flag:
1103 * In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to
1104 * ensure that ongoing flows are ended and new ones are not started.
1105 * In other cases - only the internal qede lock should be acquired.
1108 if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) {
1109 cancel_delayed_work_sync(&edev->periodic_task);
1110 #ifdef CONFIG_QED_SRIOV
1111 /* SRIOV must be disabled outside the lock to avoid a deadlock.
1112 * The recovery of the active VFs is currently not supported.
1114 if (pci_num_vf(edev->pdev))
1115 qede_sriov_configure(edev->pdev, 0);
1118 qede_recovery_handler(edev);
1124 if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
1125 if (edev->state == QEDE_STATE_OPEN)
1126 qede_config_rx_mode(edev->ndev);
1128 #ifdef CONFIG_RFS_ACCEL
1129 if (test_and_clear_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags)) {
1130 if (edev->state == QEDE_STATE_OPEN)
1131 qede_process_arfs_filters(edev, false);
1134 if (test_and_clear_bit(QEDE_SP_HW_ERR, &edev->sp_flags))
1135 qede_generic_hw_err_handler(edev);
1136 __qede_unlock(edev);
1138 if (test_and_clear_bit(QEDE_SP_AER, &edev->sp_flags)) {
1139 #ifdef CONFIG_QED_SRIOV
1140 /* SRIOV must be disabled outside the lock to avoid a deadlock.
1141 * The recovery of the active VFs is currently not supported.
1143 if (pci_num_vf(edev->pdev))
1144 qede_sriov_configure(edev->pdev, 0);
1146 edev->ops->common->recovery_process(edev->cdev);
1150 static void qede_update_pf_params(struct qed_dev *cdev)
1152 struct qed_pf_params pf_params;
1155 /* 64 rx + 64 tx + 64 XDP */
1156 memset(&pf_params, 0, sizeof(struct qed_pf_params));
1158 /* 1 rx + 1 xdp + max tx cos */
1159 num_cons = QED_MIN_L2_CONS;
1161 pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * num_cons;
1163 /* Same for VFs - make sure they'll have sufficient connections
1164 * to support XDP Tx queues.
1166 pf_params.eth_pf_params.num_vf_cons = 48;
1168 pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
1169 qed_ops->common->update_pf_params(cdev, &pf_params);
1172 #define QEDE_FW_VER_STR_SIZE 80
1174 static void qede_log_probe(struct qede_dev *edev)
1176 struct qed_dev_info *p_dev_info = &edev->dev_info.common;
1177 u8 buf[QEDE_FW_VER_STR_SIZE];
1180 snprintf(buf, QEDE_FW_VER_STR_SIZE,
1181 "Storm FW %d.%d.%d.%d, Management FW %d.%d.%d.%d",
1182 p_dev_info->fw_major, p_dev_info->fw_minor, p_dev_info->fw_rev,
1184 (p_dev_info->mfw_rev & QED_MFW_VERSION_3_MASK) >>
1185 QED_MFW_VERSION_3_OFFSET,
1186 (p_dev_info->mfw_rev & QED_MFW_VERSION_2_MASK) >>
1187 QED_MFW_VERSION_2_OFFSET,
1188 (p_dev_info->mfw_rev & QED_MFW_VERSION_1_MASK) >>
1189 QED_MFW_VERSION_1_OFFSET,
1190 (p_dev_info->mfw_rev & QED_MFW_VERSION_0_MASK) >>
1191 QED_MFW_VERSION_0_OFFSET);
1193 left_size = QEDE_FW_VER_STR_SIZE - strlen(buf);
1194 if (p_dev_info->mbi_version && left_size)
1195 snprintf(buf + strlen(buf), left_size,
1197 (p_dev_info->mbi_version & QED_MBI_VERSION_2_MASK) >>
1198 QED_MBI_VERSION_2_OFFSET,
1199 (p_dev_info->mbi_version & QED_MBI_VERSION_1_MASK) >>
1200 QED_MBI_VERSION_1_OFFSET,
1201 (p_dev_info->mbi_version & QED_MBI_VERSION_0_MASK) >>
1202 QED_MBI_VERSION_0_OFFSET);
1204 pr_info("qede %02x:%02x.%02x: %s [%s]\n", edev->pdev->bus->number,
1205 PCI_SLOT(edev->pdev->devfn), PCI_FUNC(edev->pdev->devfn),
1206 buf, edev->ndev->name);
1209 enum qede_probe_mode {
1211 QEDE_PROBE_RECOVERY,
1214 static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
1215 bool is_vf, enum qede_probe_mode mode)
1217 struct qed_probe_params probe_params;
1218 struct qed_slowpath_params sp_params;
1219 struct qed_dev_eth_info dev_info;
1220 struct qede_dev *edev;
1221 struct qed_dev *cdev;
1224 if (unlikely(dp_level & QED_LEVEL_INFO))
1225 pr_notice("Starting qede probe\n");
1227 memset(&probe_params, 0, sizeof(probe_params));
1228 probe_params.protocol = QED_PROTOCOL_ETH;
1229 probe_params.dp_module = dp_module;
1230 probe_params.dp_level = dp_level;
1231 probe_params.is_vf = is_vf;
1232 probe_params.recov_in_prog = (mode == QEDE_PROBE_RECOVERY);
1233 cdev = qed_ops->common->probe(pdev, &probe_params);
1239 qede_update_pf_params(cdev);
1241 /* Start the Slowpath-process */
1242 memset(&sp_params, 0, sizeof(sp_params));
1243 sp_params.int_mode = QED_INT_MODE_MSIX;
1244 strscpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
1245 rc = qed_ops->common->slowpath_start(cdev, &sp_params);
1247 pr_notice("Cannot start slowpath\n");
1251 /* Learn information crucial for qede to progress */
1252 rc = qed_ops->fill_dev_info(cdev, &dev_info);
1256 if (mode != QEDE_PROBE_RECOVERY) {
1257 edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
1264 edev->devlink = qed_ops->common->devlink_register(cdev);
1265 if (IS_ERR(edev->devlink)) {
1266 DP_NOTICE(edev, "Cannot register devlink\n");
1267 rc = PTR_ERR(edev->devlink);
1268 edev->devlink = NULL;
1272 struct net_device *ndev = pci_get_drvdata(pdev);
1273 struct qed_devlink *qdl;
1275 edev = netdev_priv(ndev);
1276 qdl = devlink_priv(edev->devlink);
1279 memset(&edev->stats, 0, sizeof(edev->stats));
1280 memcpy(&edev->dev_info, &dev_info, sizeof(dev_info));
1284 set_bit(QEDE_FLAGS_IS_VF, &edev->flags);
1286 qede_init_ndev(edev);
1288 rc = qede_rdma_dev_add(edev, (mode == QEDE_PROBE_RECOVERY));
1292 if (mode != QEDE_PROBE_RECOVERY) {
1293 /* Prepare the lock prior to the registration of the netdev,
1294 * as once it's registered we might reach flows requiring it
1295 * [it's even possible to reach a flow needing it directly
1296 * from there, although it's unlikely].
1298 INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
1299 mutex_init(&edev->qede_lock);
1300 qede_init_periodic_task(edev);
1302 rc = register_netdev(edev->ndev);
1304 DP_NOTICE(edev, "Cannot register net-device\n");
1309 edev->ops->common->set_name(cdev, edev->ndev->name);
1311 /* PTP not supported on VFs */
1313 qede_ptp_enable(edev);
1315 edev->ops->register_ops(cdev, &qede_ll_ops, edev);
1319 qede_set_dcbnl_ops(edev->ndev);
1322 edev->rx_copybreak = QEDE_RX_HDR_SIZE;
1324 qede_log_probe(edev);
1326 /* retain user config (for example - after recovery) */
1327 if (edev->stats_coal_usecs)
1328 schedule_delayed_work(&edev->periodic_task, 0);
1333 qede_rdma_dev_remove(edev, (mode == QEDE_PROBE_RECOVERY));
1335 if (mode != QEDE_PROBE_RECOVERY)
1336 free_netdev(edev->ndev);
1340 qed_ops->common->slowpath_stop(cdev);
1342 qed_ops->common->remove(cdev);
1347 static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1353 switch ((enum qede_pci_private)id->driver_data) {
1354 case QEDE_PRIVATE_VF:
1355 if (debug & QED_LOG_VERBOSE_MASK)
1356 dev_err(&pdev->dev, "Probing a VF\n");
1360 if (debug & QED_LOG_VERBOSE_MASK)
1361 dev_err(&pdev->dev, "Probing a PF\n");
1364 qede_config_debug(debug, &dp_module, &dp_level);
1366 return __qede_probe(pdev, dp_module, dp_level, is_vf,
1370 enum qede_remove_mode {
1372 QEDE_REMOVE_RECOVERY,
1375 static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
1377 struct net_device *ndev = pci_get_drvdata(pdev);
1378 struct qede_dev *edev;
1379 struct qed_dev *cdev;
1382 dev_info(&pdev->dev, "Device has already been removed\n");
1386 edev = netdev_priv(ndev);
1389 DP_INFO(edev, "Starting qede_remove\n");
1391 qede_rdma_dev_remove(edev, (mode == QEDE_REMOVE_RECOVERY));
1393 if (mode != QEDE_REMOVE_RECOVERY) {
1394 set_bit(QEDE_SP_DISABLE, &edev->sp_flags);
1395 unregister_netdev(ndev);
1397 cancel_delayed_work_sync(&edev->sp_task);
1398 cancel_delayed_work_sync(&edev->periodic_task);
1400 edev->ops->common->set_power_state(cdev, PCI_D0);
1402 pci_set_drvdata(pdev, NULL);
1405 qede_ptp_disable(edev);
1407 /* Use global ops since we've freed edev */
1408 qed_ops->common->slowpath_stop(cdev);
1409 if (system_state == SYSTEM_POWER_OFF)
1412 if (mode != QEDE_REMOVE_RECOVERY && edev->devlink) {
1413 qed_ops->common->devlink_unregister(edev->devlink);
1414 edev->devlink = NULL;
1416 qed_ops->common->remove(cdev);
1419 /* Since this can happen out-of-sync with other flows,
1420 * don't release the netdevice until after slowpath stop
1421 * has been called to guarantee various other contexts
1422 * [e.g., QED register callbacks] won't break anything when
1423 * accessing the netdevice.
1425 if (mode != QEDE_REMOVE_RECOVERY) {
1426 kfree(edev->coal_entry);
1430 dev_info(&pdev->dev, "Ending qede_remove successfully\n");
1433 static void qede_remove(struct pci_dev *pdev)
1435 __qede_remove(pdev, QEDE_REMOVE_NORMAL);
1438 static void qede_shutdown(struct pci_dev *pdev)
1440 __qede_remove(pdev, QEDE_REMOVE_NORMAL);
1443 /* -------------------------------------------------------------------------
1444 * START OF LOAD / UNLOAD
1445 * -------------------------------------------------------------------------
1448 static int qede_set_num_queues(struct qede_dev *edev)
1453 /* Setup queues according to possible resources*/
1454 if (edev->req_queues)
1455 rss_num = edev->req_queues;
1457 rss_num = netif_get_num_default_rss_queues() *
1458 edev->dev_info.common.num_hwfns;
1460 rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num);
1462 rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
1464 /* Managed to request interrupts for our queues */
1465 edev->num_queues = rc;
1466 DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
1467 QEDE_QUEUE_CNT(edev), rss_num);
1471 edev->fp_num_tx = edev->req_num_tx;
1472 edev->fp_num_rx = edev->req_num_rx;
1477 static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info,
1480 if (sb_info->sb_virt) {
1481 edev->ops->common->sb_release(edev->cdev, sb_info, sb_id,
1482 QED_SB_TYPE_L2_QUEUE);
1483 dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
1484 (void *)sb_info->sb_virt, sb_info->sb_phys);
1485 memset(sb_info, 0, sizeof(*sb_info));
1489 /* This function allocates fast-path status block memory */
1490 static int qede_alloc_mem_sb(struct qede_dev *edev,
1491 struct qed_sb_info *sb_info, u16 sb_id)
1493 struct status_block *sb_virt;
1497 sb_virt = dma_alloc_coherent(&edev->pdev->dev,
1498 sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
1500 DP_ERR(edev, "Status block allocation failed\n");
1504 rc = edev->ops->common->sb_init(edev->cdev, sb_info,
1505 sb_virt, sb_phys, sb_id,
1506 QED_SB_TYPE_L2_QUEUE);
1508 DP_ERR(edev, "Status block initialization failed\n");
1509 dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt),
1517 static void qede_free_rx_buffers(struct qede_dev *edev,
1518 struct qede_rx_queue *rxq)
1522 for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
1523 struct sw_rx_data *rx_buf;
1526 rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
1527 data = rx_buf->data;
1529 dma_unmap_page(&edev->pdev->dev,
1530 rx_buf->mapping, PAGE_SIZE, rxq->data_direction);
1532 rx_buf->data = NULL;
1537 static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
1539 /* Free rx buffers */
1540 qede_free_rx_buffers(edev, rxq);
1542 /* Free the parallel SW ring */
1543 kfree(rxq->sw_rx_ring);
1545 /* Free the real RQ ring used by FW */
1546 edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
1547 edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
1550 static void qede_set_tpa_param(struct qede_rx_queue *rxq)
1554 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
1555 struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
1557 tpa_info->state = QEDE_AGG_STATE_NONE;
1561 /* This function allocates all memory needed per Rx queue */
1562 static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
1564 struct qed_chain_init_params params = {
1565 .cnt_type = QED_CHAIN_CNT_TYPE_U16,
1566 .num_elems = RX_RING_SIZE,
1568 struct qed_dev *cdev = edev->cdev;
1571 rxq->num_rx_buffers = edev->q_num_rx_buffers;
1573 rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
1575 rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : NET_SKB_PAD;
1576 size = rxq->rx_headroom +
1577 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1579 /* Make sure that the headroom and payload fit in a single page */
1580 if (rxq->rx_buf_size + size > PAGE_SIZE)
1581 rxq->rx_buf_size = PAGE_SIZE - size;
1583 /* Segment size to split a page in multiple equal parts,
1584 * unless XDP is used in which case we'd use the entire page.
1586 if (!edev->xdp_prog) {
1587 size = size + rxq->rx_buf_size;
1588 rxq->rx_buf_seg_size = roundup_pow_of_two(size);
1590 rxq->rx_buf_seg_size = PAGE_SIZE;
1591 edev->ndev->features &= ~NETIF_F_GRO_HW;
1594 /* Allocate the parallel driver ring for Rx buffers */
1595 size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
1596 rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
1597 if (!rxq->sw_rx_ring) {
1598 DP_ERR(edev, "Rx buffers ring allocation failed\n");
1603 /* Allocate FW Rx ring */
1604 params.mode = QED_CHAIN_MODE_NEXT_PTR;
1605 params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
1606 params.elem_size = sizeof(struct eth_rx_bd);
1608 rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_bd_ring, ¶ms);
1612 /* Allocate FW completion ring */
1613 params.mode = QED_CHAIN_MODE_PBL;
1614 params.intended_use = QED_CHAIN_USE_TO_CONSUME;
1615 params.elem_size = sizeof(union eth_rx_cqe);
1617 rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_comp_ring, ¶ms);
1621 /* Allocate buffers for the Rx ring */
1622 rxq->filled_buffers = 0;
1623 for (i = 0; i < rxq->num_rx_buffers; i++) {
1624 rc = qede_alloc_rx_buffer(rxq, false);
1627 "Rx buffers allocation failed at index %d\n", i);
1632 edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW);
1633 if (!edev->gro_disable)
1634 qede_set_tpa_param(rxq);
1639 static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
1641 /* Free the parallel SW ring */
1643 kfree(txq->sw_tx_ring.xdp);
1645 kfree(txq->sw_tx_ring.skbs);
1647 /* Free the real RQ ring used by FW */
1648 edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
1651 /* This function allocates all memory needed per Tx queue */
1652 static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
1654 struct qed_chain_init_params params = {
1655 .mode = QED_CHAIN_MODE_PBL,
1656 .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1657 .cnt_type = QED_CHAIN_CNT_TYPE_U16,
1658 .num_elems = edev->q_num_tx_buffers,
1659 .elem_size = sizeof(union eth_tx_bd_types),
1663 txq->num_tx_buffers = edev->q_num_tx_buffers;
1665 /* Allocate the parallel driver ring for Tx buffers */
1667 size = sizeof(*txq->sw_tx_ring.xdp) * txq->num_tx_buffers;
1668 txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL);
1669 if (!txq->sw_tx_ring.xdp)
1672 size = sizeof(*txq->sw_tx_ring.skbs) * txq->num_tx_buffers;
1673 txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL);
1674 if (!txq->sw_tx_ring.skbs)
1678 rc = edev->ops->common->chain_alloc(edev->cdev, &txq->tx_pbl, ¶ms);
1685 qede_free_mem_txq(edev, txq);
1689 /* This function frees all memory of a single fp */
1690 static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
1692 qede_free_mem_sb(edev, fp->sb_info, fp->id);
1694 if (fp->type & QEDE_FASTPATH_RX)
1695 qede_free_mem_rxq(edev, fp->rxq);
1697 if (fp->type & QEDE_FASTPATH_XDP)
1698 qede_free_mem_txq(edev, fp->xdp_tx);
1700 if (fp->type & QEDE_FASTPATH_TX) {
1703 for_each_cos_in_txq(edev, cos)
1704 qede_free_mem_txq(edev, &fp->txq[cos]);
1708 /* This function allocates all memory needed for a single fp (i.e. an entity
1709 * which contains status block, one rx queue and/or multiple per-TC tx queues.
1711 static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
1715 rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id);
1719 if (fp->type & QEDE_FASTPATH_RX) {
1720 rc = qede_alloc_mem_rxq(edev, fp->rxq);
1725 if (fp->type & QEDE_FASTPATH_XDP) {
1726 rc = qede_alloc_mem_txq(edev, fp->xdp_tx);
1731 if (fp->type & QEDE_FASTPATH_TX) {
1734 for_each_cos_in_txq(edev, cos) {
1735 rc = qede_alloc_mem_txq(edev, &fp->txq[cos]);
1745 static void qede_free_mem_load(struct qede_dev *edev)
1750 struct qede_fastpath *fp = &edev->fp_array[i];
1752 qede_free_mem_fp(edev, fp);
1756 /* This function allocates all qede memory at NIC load. */
1757 static int qede_alloc_mem_load(struct qede_dev *edev)
1759 int rc = 0, queue_id;
1761 for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) {
1762 struct qede_fastpath *fp = &edev->fp_array[queue_id];
1764 rc = qede_alloc_mem_fp(edev, fp);
1767 "Failed to allocate memory for fastpath - rss id = %d\n",
1769 qede_free_mem_load(edev);
1777 static void qede_empty_tx_queue(struct qede_dev *edev,
1778 struct qede_tx_queue *txq)
1780 unsigned int pkts_compl = 0, bytes_compl = 0;
1781 struct netdev_queue *netdev_txq;
1784 netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
1786 while (qed_chain_get_cons_idx(&txq->tx_pbl) !=
1787 qed_chain_get_prod_idx(&txq->tx_pbl)) {
1788 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
1789 "Freeing a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n",
1790 txq->index, qed_chain_get_cons_idx(&txq->tx_pbl),
1791 qed_chain_get_prod_idx(&txq->tx_pbl));
1793 rc = qede_free_tx_pkt(edev, txq, &len);
1796 "Failed to free a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n",
1798 qed_chain_get_cons_idx(&txq->tx_pbl),
1799 qed_chain_get_prod_idx(&txq->tx_pbl));
1808 netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
1811 static void qede_empty_tx_queues(struct qede_dev *edev)
1816 if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
1819 for_each_cos_in_txq(edev, cos) {
1820 struct qede_fastpath *fp;
1822 fp = &edev->fp_array[i];
1823 qede_empty_tx_queue(edev,
1829 /* This function inits fp content and resets the SB, RXQ and TXQ structures */
1830 static void qede_init_fp(struct qede_dev *edev)
1832 int queue_id, rxq_index = 0, txq_index = 0;
1833 struct qede_fastpath *fp;
1834 bool init_xdp = false;
1836 for_each_queue(queue_id) {
1837 fp = &edev->fp_array[queue_id];
1842 if (fp->type & QEDE_FASTPATH_XDP) {
1843 fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev,
1845 fp->xdp_tx->is_xdp = 1;
1847 spin_lock_init(&fp->xdp_tx->xdp_tx_lock);
1851 if (fp->type & QEDE_FASTPATH_RX) {
1852 fp->rxq->rxq_id = rxq_index++;
1854 /* Determine how to map buffers for this queue */
1855 if (fp->type & QEDE_FASTPATH_XDP)
1856 fp->rxq->data_direction = DMA_BIDIRECTIONAL;
1858 fp->rxq->data_direction = DMA_FROM_DEVICE;
1859 fp->rxq->dev = &edev->pdev->dev;
1861 /* Driver have no error path from here */
1862 WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev,
1863 fp->rxq->rxq_id, 0) < 0);
1865 if (xdp_rxq_info_reg_mem_model(&fp->rxq->xdp_rxq,
1866 MEM_TYPE_PAGE_ORDER0,
1869 "Failed to register XDP memory model\n");
1873 if (fp->type & QEDE_FASTPATH_TX) {
1876 for_each_cos_in_txq(edev, cos) {
1877 struct qede_tx_queue *txq = &fp->txq[cos];
1881 txq->index = txq_index;
1882 ndev_tx_id = QEDE_TXQ_TO_NDEV_TXQ_ID(edev, txq);
1883 txq->ndev_txq_id = ndev_tx_id;
1885 if (edev->dev_info.is_legacy)
1886 txq->is_legacy = true;
1887 txq->dev = &edev->pdev->dev;
1893 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1894 edev->ndev->name, queue_id);
1898 edev->total_xdp_queues = QEDE_RSS_COUNT(edev);
1899 DP_INFO(edev, "Total XDP queues: %u\n", edev->total_xdp_queues);
1903 static int qede_set_real_num_queues(struct qede_dev *edev)
1907 rc = netif_set_real_num_tx_queues(edev->ndev,
1908 QEDE_TSS_COUNT(edev) *
1909 edev->dev_info.num_tc);
1911 DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
1915 rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev));
1917 DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
1924 static void qede_napi_disable_remove(struct qede_dev *edev)
1929 napi_disable(&edev->fp_array[i].napi);
1931 netif_napi_del(&edev->fp_array[i].napi);
1935 static void qede_napi_add_enable(struct qede_dev *edev)
1939 /* Add NAPI objects */
1941 netif_napi_add(edev->ndev, &edev->fp_array[i].napi, qede_poll);
1942 napi_enable(&edev->fp_array[i].napi);
1946 static void qede_sync_free_irqs(struct qede_dev *edev)
1950 for (i = 0; i < edev->int_info.used_cnt; i++) {
1951 if (edev->int_info.msix_cnt) {
1952 free_irq(edev->int_info.msix[i].vector,
1953 &edev->fp_array[i]);
1955 edev->ops->common->simd_handler_clean(edev->cdev, i);
1959 edev->int_info.used_cnt = 0;
1960 edev->int_info.msix_cnt = 0;
1963 static int qede_req_msix_irqs(struct qede_dev *edev)
1967 /* Sanitize number of interrupts == number of prepared RSS queues */
1968 if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) {
1970 "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
1971 QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt);
1975 for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
1976 #ifdef CONFIG_RFS_ACCEL
1977 struct qede_fastpath *fp = &edev->fp_array[i];
1979 if (edev->ndev->rx_cpu_rmap && (fp->type & QEDE_FASTPATH_RX)) {
1980 rc = irq_cpu_rmap_add(edev->ndev->rx_cpu_rmap,
1981 edev->int_info.msix[i].vector);
1983 DP_ERR(edev, "Failed to add CPU rmap\n");
1984 qede_free_arfs(edev);
1988 rc = request_irq(edev->int_info.msix[i].vector,
1989 qede_msix_fp_int, 0, edev->fp_array[i].name,
1990 &edev->fp_array[i]);
1992 DP_ERR(edev, "Request fp %d irq failed\n", i);
1993 #ifdef CONFIG_RFS_ACCEL
1994 if (edev->ndev->rx_cpu_rmap)
1995 free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
1997 edev->ndev->rx_cpu_rmap = NULL;
1999 qede_sync_free_irqs(edev);
2002 DP_VERBOSE(edev, NETIF_MSG_INTR,
2003 "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
2004 edev->fp_array[i].name, i,
2005 &edev->fp_array[i]);
2006 edev->int_info.used_cnt++;
2012 static void qede_simd_fp_handler(void *cookie)
2014 struct qede_fastpath *fp = (struct qede_fastpath *)cookie;
2016 napi_schedule_irqoff(&fp->napi);
2019 static int qede_setup_irqs(struct qede_dev *edev)
2023 /* Learn Interrupt configuration */
2024 rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info);
2028 if (edev->int_info.msix_cnt) {
2029 rc = qede_req_msix_irqs(edev);
2032 edev->ndev->irq = edev->int_info.msix[0].vector;
2034 const struct qed_common_ops *ops;
2036 /* qed should learn receive the RSS ids and callbacks */
2037 ops = edev->ops->common;
2038 for (i = 0; i < QEDE_QUEUE_CNT(edev); i++)
2039 ops->simd_handler_config(edev->cdev,
2040 &edev->fp_array[i], i,
2041 qede_simd_fp_handler);
2042 edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev);
2047 static int qede_drain_txq(struct qede_dev *edev,
2048 struct qede_tx_queue *txq, bool allow_drain)
2052 while (txq->sw_tx_cons != txq->sw_tx_prod) {
2056 "Tx queue[%d] is stuck, requesting MCP to drain\n",
2058 rc = edev->ops->common->drain(edev->cdev);
2061 return qede_drain_txq(edev, txq, false);
2064 "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
2065 txq->index, txq->sw_tx_prod,
2070 usleep_range(1000, 2000);
2074 /* FW finished processing, wait for HW to transmit all tx packets */
2075 usleep_range(1000, 2000);
2080 static int qede_stop_txq(struct qede_dev *edev,
2081 struct qede_tx_queue *txq, int rss_id)
2083 /* delete doorbell from doorbell recovery mechanism */
2084 edev->ops->common->db_recovery_del(edev->cdev, txq->doorbell_addr,
2087 return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle);
2090 static int qede_stop_queues(struct qede_dev *edev)
2092 struct qed_update_vport_params *vport_update_params;
2093 struct qed_dev *cdev = edev->cdev;
2094 struct qede_fastpath *fp;
2097 /* Disable the vport */
2098 vport_update_params = vzalloc(sizeof(*vport_update_params));
2099 if (!vport_update_params)
2102 vport_update_params->vport_id = 0;
2103 vport_update_params->update_vport_active_flg = 1;
2104 vport_update_params->vport_active_flg = 0;
2105 vport_update_params->update_rss_flg = 0;
2107 rc = edev->ops->vport_update(cdev, vport_update_params);
2108 vfree(vport_update_params);
2111 DP_ERR(edev, "Failed to update vport\n");
2115 /* Flush Tx queues. If needed, request drain from MCP */
2117 fp = &edev->fp_array[i];
2119 if (fp->type & QEDE_FASTPATH_TX) {
2122 for_each_cos_in_txq(edev, cos) {
2123 rc = qede_drain_txq(edev, &fp->txq[cos], true);
2129 if (fp->type & QEDE_FASTPATH_XDP) {
2130 rc = qede_drain_txq(edev, fp->xdp_tx, true);
2136 /* Stop all Queues in reverse order */
2137 for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) {
2138 fp = &edev->fp_array[i];
2140 /* Stop the Tx Queue(s) */
2141 if (fp->type & QEDE_FASTPATH_TX) {
2144 for_each_cos_in_txq(edev, cos) {
2145 rc = qede_stop_txq(edev, &fp->txq[cos], i);
2151 /* Stop the Rx Queue */
2152 if (fp->type & QEDE_FASTPATH_RX) {
2153 rc = edev->ops->q_rx_stop(cdev, i, fp->rxq->handle);
2155 DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
2160 /* Stop the XDP forwarding queue */
2161 if (fp->type & QEDE_FASTPATH_XDP) {
2162 rc = qede_stop_txq(edev, fp->xdp_tx, i);
2166 bpf_prog_put(fp->rxq->xdp_prog);
2170 /* Stop the vport */
2171 rc = edev->ops->vport_stop(cdev, 0);
2173 DP_ERR(edev, "Failed to stop VPORT\n");
2178 static int qede_start_txq(struct qede_dev *edev,
2179 struct qede_fastpath *fp,
2180 struct qede_tx_queue *txq, u8 rss_id, u16 sb_idx)
2182 dma_addr_t phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
2183 u32 page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
2184 struct qed_queue_start_common_params params;
2185 struct qed_txq_start_ret_params ret_params;
2188 memset(¶ms, 0, sizeof(params));
2189 memset(&ret_params, 0, sizeof(ret_params));
2191 /* Let the XDP queue share the queue-zone with one of the regular txq.
2192 * We don't really care about its coalescing.
2195 params.queue_id = QEDE_TXQ_XDP_TO_IDX(edev, txq);
2197 params.queue_id = txq->index;
2199 params.p_sb = fp->sb_info;
2200 params.sb_idx = sb_idx;
2201 params.tc = txq->cos;
2203 rc = edev->ops->q_tx_start(edev->cdev, rss_id, ¶ms, phys_table,
2204 page_cnt, &ret_params);
2206 DP_ERR(edev, "Start TXQ #%d failed %d\n", txq->index, rc);
2210 txq->doorbell_addr = ret_params.p_doorbell;
2211 txq->handle = ret_params.p_handle;
2213 /* Determine the FW consumer address associated */
2214 txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[sb_idx];
2216 /* Prepare the doorbell parameters */
2217 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, DB_DEST_XCM);
2218 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
2219 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_VAL_SEL,
2220 DQ_XCM_ETH_TX_BD_PROD_CMD);
2221 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
2223 /* register doorbell with doorbell recovery mechanism */
2224 rc = edev->ops->common->db_recovery_add(edev->cdev, txq->doorbell_addr,
2225 &txq->tx_db, DB_REC_WIDTH_32B,
2231 static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
2233 int vlan_removal_en = 1;
2234 struct qed_dev *cdev = edev->cdev;
2235 struct qed_dev_info *qed_info = &edev->dev_info.common;
2236 struct qed_update_vport_params *vport_update_params;
2237 struct qed_queue_start_common_params q_params;
2238 struct qed_start_vport_params start = {0};
2241 if (!edev->num_queues) {
2243 "Cannot update V-VPORT as active as there are no Rx queues\n");
2247 vport_update_params = vzalloc(sizeof(*vport_update_params));
2248 if (!vport_update_params)
2251 start.handle_ptp_pkts = !!(edev->ptp);
2252 start.gro_enable = !edev->gro_disable;
2253 start.mtu = edev->ndev->mtu;
2255 start.drop_ttl0 = true;
2256 start.remove_inner_vlan = vlan_removal_en;
2257 start.clear_stats = clear_stats;
2259 rc = edev->ops->vport_start(cdev, &start);
2262 DP_ERR(edev, "Start V-PORT failed %d\n", rc);
2266 DP_VERBOSE(edev, NETIF_MSG_IFUP,
2267 "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
2268 start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
2271 struct qede_fastpath *fp = &edev->fp_array[i];
2272 dma_addr_t p_phys_table;
2275 if (fp->type & QEDE_FASTPATH_RX) {
2276 struct qed_rxq_start_ret_params ret_params;
2277 struct qede_rx_queue *rxq = fp->rxq;
2280 memset(&ret_params, 0, sizeof(ret_params));
2281 memset(&q_params, 0, sizeof(q_params));
2282 q_params.queue_id = rxq->rxq_id;
2283 q_params.vport_id = 0;
2284 q_params.p_sb = fp->sb_info;
2285 q_params.sb_idx = RX_PI;
2288 qed_chain_get_pbl_phys(&rxq->rx_comp_ring);
2289 page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring);
2291 rc = edev->ops->q_rx_start(cdev, i, &q_params,
2293 rxq->rx_bd_ring.p_phys_addr,
2295 page_cnt, &ret_params);
2297 DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
2302 /* Use the return parameters */
2303 rxq->hw_rxq_prod_addr = ret_params.p_prod;
2304 rxq->handle = ret_params.p_handle;
2306 val = &fp->sb_info->sb_virt->pi_array[RX_PI];
2307 rxq->hw_cons_ptr = val;
2309 qede_update_rx_prod(edev, rxq);
2312 if (fp->type & QEDE_FASTPATH_XDP) {
2313 rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI);
2317 bpf_prog_add(edev->xdp_prog, 1);
2318 fp->rxq->xdp_prog = edev->xdp_prog;
2321 if (fp->type & QEDE_FASTPATH_TX) {
2324 for_each_cos_in_txq(edev, cos) {
2325 rc = qede_start_txq(edev, fp, &fp->txq[cos], i,
2333 /* Prepare and send the vport enable */
2334 vport_update_params->vport_id = start.vport_id;
2335 vport_update_params->update_vport_active_flg = 1;
2336 vport_update_params->vport_active_flg = 1;
2338 if ((qed_info->b_inter_pf_switch || pci_num_vf(edev->pdev)) &&
2339 qed_info->tx_switching) {
2340 vport_update_params->update_tx_switching_flg = 1;
2341 vport_update_params->tx_switching_flg = 1;
2344 qede_fill_rss_params(edev, &vport_update_params->rss_params,
2345 &vport_update_params->update_rss_flg);
2347 rc = edev->ops->vport_update(cdev, vport_update_params);
2349 DP_ERR(edev, "Update V-PORT failed %d\n", rc);
2352 vfree(vport_update_params);
2356 enum qede_unload_mode {
2358 QEDE_UNLOAD_RECOVERY,
2361 static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
2364 struct qed_link_params link_params;
2367 DP_INFO(edev, "Starting qede unload\n");
2372 clear_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags);
2374 if (mode != QEDE_UNLOAD_RECOVERY)
2375 edev->state = QEDE_STATE_CLOSED;
2377 qede_rdma_dev_event_close(edev);
2380 netif_tx_disable(edev->ndev);
2381 netif_carrier_off(edev->ndev);
2383 if (mode != QEDE_UNLOAD_RECOVERY) {
2384 /* Reset the link */
2385 memset(&link_params, 0, sizeof(link_params));
2386 link_params.link_up = false;
2387 edev->ops->common->set_link(edev->cdev, &link_params);
2389 rc = qede_stop_queues(edev);
2391 #ifdef CONFIG_RFS_ACCEL
2392 if (edev->dev_info.common.b_arfs_capable) {
2393 qede_poll_for_freeing_arfs_filters(edev);
2394 if (edev->ndev->rx_cpu_rmap)
2395 free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
2397 edev->ndev->rx_cpu_rmap = NULL;
2400 qede_sync_free_irqs(edev);
2404 DP_INFO(edev, "Stopped Queues\n");
2407 qede_vlan_mark_nonconfigured(edev);
2408 edev->ops->fastpath_stop(edev->cdev);
2410 if (edev->dev_info.common.b_arfs_capable) {
2411 qede_poll_for_freeing_arfs_filters(edev);
2412 qede_free_arfs(edev);
2415 /* Release the interrupts */
2416 qede_sync_free_irqs(edev);
2417 edev->ops->common->set_fp_int(edev->cdev, 0);
2419 qede_napi_disable_remove(edev);
2421 if (mode == QEDE_UNLOAD_RECOVERY)
2422 qede_empty_tx_queues(edev);
2424 qede_free_mem_load(edev);
2425 qede_free_fp_array(edev);
2429 __qede_unlock(edev);
2431 if (mode != QEDE_UNLOAD_RECOVERY)
2432 DP_NOTICE(edev, "Link is down\n");
2434 edev->ptp_skip_txts = 0;
2436 DP_INFO(edev, "Ending qede unload\n");
2439 enum qede_load_mode {
2445 static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
2448 struct qed_link_params link_params;
2449 struct ethtool_coalesce coal = {};
2453 DP_INFO(edev, "Starting qede load\n");
2458 rc = qede_set_num_queues(edev);
2462 rc = qede_alloc_fp_array(edev);
2468 rc = qede_alloc_mem_load(edev);
2471 DP_INFO(edev, "Allocated %d Rx, %d Tx queues\n",
2472 QEDE_RSS_COUNT(edev), QEDE_TSS_COUNT(edev));
2474 rc = qede_set_real_num_queues(edev);
2478 if (qede_alloc_arfs(edev)) {
2479 edev->ndev->features &= ~NETIF_F_NTUPLE;
2480 edev->dev_info.common.b_arfs_capable = false;
2483 qede_napi_add_enable(edev);
2484 DP_INFO(edev, "Napi added and enabled\n");
2486 rc = qede_setup_irqs(edev);
2489 DP_INFO(edev, "Setup IRQs succeeded\n");
2491 rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
2494 DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
2496 num_tc = netdev_get_num_tc(edev->ndev);
2497 num_tc = num_tc ? num_tc : edev->dev_info.num_tc;
2498 qede_setup_tc(edev->ndev, num_tc);
2500 /* Program un-configured VLANs */
2501 qede_configure_vlan_filters(edev);
2503 set_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags);
2505 /* Ask for link-up using current configuration */
2506 memset(&link_params, 0, sizeof(link_params));
2507 link_params.link_up = true;
2508 edev->ops->common->set_link(edev->cdev, &link_params);
2510 edev->state = QEDE_STATE_OPEN;
2512 coal.rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
2513 coal.tx_coalesce_usecs = QED_DEFAULT_TX_USECS;
2516 if (edev->coal_entry[i].isvalid) {
2517 coal.rx_coalesce_usecs = edev->coal_entry[i].rxc;
2518 coal.tx_coalesce_usecs = edev->coal_entry[i].txc;
2520 __qede_unlock(edev);
2521 qede_set_per_coalesce(edev->ndev, i, &coal);
2524 DP_INFO(edev, "Ending successfully qede load\n");
2528 qede_sync_free_irqs(edev);
2530 qede_napi_disable_remove(edev);
2532 qede_free_mem_load(edev);
2534 edev->ops->common->set_fp_int(edev->cdev, 0);
2535 qede_free_fp_array(edev);
2536 edev->num_queues = 0;
2537 edev->fp_num_tx = 0;
2538 edev->fp_num_rx = 0;
2541 __qede_unlock(edev);
2546 /* 'func' should be able to run between unload and reload assuming interface
2547 * is actually running, or afterwards in case it's currently DOWN.
2549 void qede_reload(struct qede_dev *edev,
2550 struct qede_reload_args *args, bool is_locked)
2555 /* Since qede_lock is held, internal state wouldn't change even
2556 * if netdev state would start transitioning. Check whether current
2557 * internal configuration indicates device is up, then reload.
2559 if (edev->state == QEDE_STATE_OPEN) {
2560 qede_unload(edev, QEDE_UNLOAD_NORMAL, true);
2562 args->func(edev, args);
2563 qede_load(edev, QEDE_LOAD_RELOAD, true);
2565 /* Since no one is going to do it for us, re-configure */
2566 qede_config_rx_mode(edev->ndev);
2568 args->func(edev, args);
2572 __qede_unlock(edev);
2575 /* called with rtnl_lock */
2576 static int qede_open(struct net_device *ndev)
2578 struct qede_dev *edev = netdev_priv(ndev);
2581 netif_carrier_off(ndev);
2583 edev->ops->common->set_power_state(edev->cdev, PCI_D0);
2585 rc = qede_load(edev, QEDE_LOAD_NORMAL, false);
2589 udp_tunnel_nic_reset_ntf(ndev);
2591 edev->ops->common->update_drv_state(edev->cdev, true);
2596 static int qede_close(struct net_device *ndev)
2598 struct qede_dev *edev = netdev_priv(ndev);
2600 qede_unload(edev, QEDE_UNLOAD_NORMAL, false);
2603 edev->ops->common->update_drv_state(edev->cdev, false);
2608 static void qede_link_update(void *dev, struct qed_link_output *link)
2610 struct qede_dev *edev = dev;
2612 if (!test_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags)) {
2613 DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not ready\n");
2617 if (link->link_up) {
2618 if (!netif_carrier_ok(edev->ndev)) {
2619 DP_NOTICE(edev, "Link is up\n");
2620 netif_tx_start_all_queues(edev->ndev);
2621 netif_carrier_on(edev->ndev);
2622 qede_rdma_dev_event_open(edev);
2625 if (netif_carrier_ok(edev->ndev)) {
2626 DP_NOTICE(edev, "Link is down\n");
2627 netif_tx_disable(edev->ndev);
2628 netif_carrier_off(edev->ndev);
2629 qede_rdma_dev_event_close(edev);
2634 static void qede_schedule_recovery_handler(void *dev)
2636 struct qede_dev *edev = dev;
2638 if (edev->state == QEDE_STATE_RECOVERY) {
2640 "Avoid scheduling a recovery handling since already in recovery state\n");
2644 set_bit(QEDE_SP_RECOVERY, &edev->sp_flags);
2645 schedule_delayed_work(&edev->sp_task, 0);
2647 DP_INFO(edev, "Scheduled a recovery handler\n");
2650 static void qede_recovery_failed(struct qede_dev *edev)
2652 netdev_err(edev->ndev, "Recovery handling has failed. Power cycle is needed.\n");
2654 netif_device_detach(edev->ndev);
2657 edev->ops->common->set_power_state(edev->cdev, PCI_D3hot);
2660 static void qede_recovery_handler(struct qede_dev *edev)
2662 u32 curr_state = edev->state;
2665 DP_NOTICE(edev, "Starting a recovery process\n");
2667 /* No need to acquire first the qede_lock since is done by qede_sp_task
2668 * before calling this function.
2670 edev->state = QEDE_STATE_RECOVERY;
2672 edev->ops->common->recovery_prolog(edev->cdev);
2674 if (curr_state == QEDE_STATE_OPEN)
2675 qede_unload(edev, QEDE_UNLOAD_RECOVERY, true);
2677 __qede_remove(edev->pdev, QEDE_REMOVE_RECOVERY);
2679 rc = __qede_probe(edev->pdev, edev->dp_module, edev->dp_level,
2680 IS_VF(edev), QEDE_PROBE_RECOVERY);
2686 if (curr_state == QEDE_STATE_OPEN) {
2687 rc = qede_load(edev, QEDE_LOAD_RECOVERY, true);
2691 qede_config_rx_mode(edev->ndev);
2692 udp_tunnel_nic_reset_ntf(edev->ndev);
2695 edev->state = curr_state;
2697 DP_NOTICE(edev, "Recovery handling is done\n");
2702 qede_recovery_failed(edev);
2705 static void qede_atomic_hw_err_handler(struct qede_dev *edev)
2707 struct qed_dev *cdev = edev->cdev;
2710 "Generic non-sleepable HW error handling started - err_flags 0x%lx\n",
2713 /* Get a call trace of the flow that led to the error */
2714 WARN_ON(test_bit(QEDE_ERR_WARN, &edev->err_flags));
2716 /* Prevent HW attentions from being reasserted */
2717 if (test_bit(QEDE_ERR_ATTN_CLR_EN, &edev->err_flags))
2718 edev->ops->common->attn_clr_enable(cdev, true);
2720 DP_NOTICE(edev, "Generic non-sleepable HW error handling is done\n");
2723 static void qede_generic_hw_err_handler(struct qede_dev *edev)
2726 "Generic sleepable HW error handling started - err_flags 0x%lx\n",
2729 if (edev->devlink) {
2730 DP_NOTICE(edev, "Reporting fatal error to devlink\n");
2731 edev->ops->common->report_fatal_error(edev->devlink, edev->last_err_type);
2734 clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
2736 DP_NOTICE(edev, "Generic sleepable HW error handling is done\n");
2739 static void qede_set_hw_err_flags(struct qede_dev *edev,
2740 enum qed_hw_err_type err_type)
2742 unsigned long err_flags = 0;
2745 case QED_HW_ERR_DMAE_FAIL:
2746 set_bit(QEDE_ERR_WARN, &err_flags);
2748 case QED_HW_ERR_MFW_RESP_FAIL:
2749 case QED_HW_ERR_HW_ATTN:
2750 case QED_HW_ERR_RAMROD_FAIL:
2751 case QED_HW_ERR_FW_ASSERT:
2752 set_bit(QEDE_ERR_ATTN_CLR_EN, &err_flags);
2753 set_bit(QEDE_ERR_GET_DBG_INFO, &err_flags);
2754 /* make this error as recoverable and start recovery*/
2755 set_bit(QEDE_ERR_IS_RECOVERABLE, &err_flags);
2759 DP_NOTICE(edev, "Unexpected HW error [%d]\n", err_type);
2763 edev->err_flags |= err_flags;
2766 static void qede_schedule_hw_err_handler(void *dev,
2767 enum qed_hw_err_type err_type)
2769 struct qede_dev *edev = dev;
2771 /* Fan failure cannot be masked by handling of another HW error or by a
2772 * concurrent recovery process.
2774 if ((test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
2775 edev->state == QEDE_STATE_RECOVERY) &&
2776 err_type != QED_HW_ERR_FAN_FAIL) {
2778 "Avoid scheduling an error handling while another HW error is being handled\n");
2782 if (err_type >= QED_HW_ERR_LAST) {
2783 DP_NOTICE(edev, "Unknown HW error [%d]\n", err_type);
2784 clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
2788 edev->last_err_type = err_type;
2789 qede_set_hw_err_flags(edev, err_type);
2790 qede_atomic_hw_err_handler(edev);
2791 set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
2792 schedule_delayed_work(&edev->sp_task, 0);
2794 DP_INFO(edev, "Scheduled a error handler [err_type %d]\n", err_type);
2797 static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq)
2799 struct netdev_queue *netdev_txq;
2801 netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
2802 if (netif_xmit_stopped(netdev_txq))
2808 static void qede_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
2810 struct qede_dev *edev = dev;
2811 struct netdev_hw_addr *ha;
2814 if (edev->ndev->features & NETIF_F_IP_CSUM)
2815 data->feat_flags |= QED_TLV_IP_CSUM;
2816 if (edev->ndev->features & NETIF_F_TSO)
2817 data->feat_flags |= QED_TLV_LSO;
2819 ether_addr_copy(data->mac[0], edev->ndev->dev_addr);
2820 eth_zero_addr(data->mac[1]);
2821 eth_zero_addr(data->mac[2]);
2822 /* Copy the first two UC macs */
2823 netif_addr_lock_bh(edev->ndev);
2825 netdev_for_each_uc_addr(ha, edev->ndev) {
2826 ether_addr_copy(data->mac[i++], ha->addr);
2827 if (i == QED_TLV_MAC_COUNT)
2831 netif_addr_unlock_bh(edev->ndev);
2834 static void qede_get_eth_tlv_data(void *dev, void *data)
2836 struct qed_mfw_tlv_eth *etlv = data;
2837 struct qede_dev *edev = dev;
2838 struct qede_fastpath *fp;
2841 etlv->lso_maxoff_size = 0XFFFF;
2842 etlv->lso_maxoff_size_set = true;
2843 etlv->lso_minseg_size = (u16)ETH_TX_LSO_WINDOW_MIN_LEN;
2844 etlv->lso_minseg_size_set = true;
2845 etlv->prom_mode = !!(edev->ndev->flags & IFF_PROMISC);
2846 etlv->prom_mode_set = true;
2847 etlv->tx_descr_size = QEDE_TSS_COUNT(edev);
2848 etlv->tx_descr_size_set = true;
2849 etlv->rx_descr_size = QEDE_RSS_COUNT(edev);
2850 etlv->rx_descr_size_set = true;
2851 etlv->iov_offload = QED_MFW_TLV_IOV_OFFLOAD_VEB;
2852 etlv->iov_offload_set = true;
2854 /* Fill information regarding queues; Should be done under the qede
2855 * lock to guarantee those don't change beneath our feet.
2857 etlv->txqs_empty = true;
2858 etlv->rxqs_empty = true;
2859 etlv->num_txqs_full = 0;
2860 etlv->num_rxqs_full = 0;
2864 fp = &edev->fp_array[i];
2865 if (fp->type & QEDE_FASTPATH_TX) {
2866 struct qede_tx_queue *txq = QEDE_FP_TC0_TXQ(fp);
2868 if (txq->sw_tx_cons != txq->sw_tx_prod)
2869 etlv->txqs_empty = false;
2870 if (qede_is_txq_full(edev, txq))
2871 etlv->num_txqs_full++;
2873 if (fp->type & QEDE_FASTPATH_RX) {
2874 if (qede_has_rx_work(fp->rxq))
2875 etlv->rxqs_empty = false;
2877 /* This one is a bit tricky; Firmware might stop
2878 * placing packets if ring is not yet full.
2879 * Give an approximation.
2881 if (le16_to_cpu(*fp->rxq->hw_cons_ptr) -
2882 qed_chain_get_cons_idx(&fp->rxq->rx_comp_ring) >
2884 etlv->num_rxqs_full++;
2887 __qede_unlock(edev);
2889 etlv->txqs_empty_set = true;
2890 etlv->rxqs_empty_set = true;
2891 etlv->num_txqs_full_set = true;
2892 etlv->num_rxqs_full_set = true;
2896 * qede_io_error_detected(): Called when PCI error is detected
2898 * @pdev: Pointer to PCI device
2899 * @state: The current pci connection state
2901 *Return: pci_ers_result_t.
2903 * This function is called after a PCI bus error affecting
2904 * this device has been detected.
2906 static pci_ers_result_t
2907 qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
2909 struct net_device *dev = pci_get_drvdata(pdev);
2910 struct qede_dev *edev = netdev_priv(dev);
2913 return PCI_ERS_RESULT_NONE;
2915 DP_NOTICE(edev, "IO error detected [%d]\n", state);
2918 if (edev->state == QEDE_STATE_RECOVERY) {
2919 DP_NOTICE(edev, "Device already in the recovery state\n");
2920 __qede_unlock(edev);
2921 return PCI_ERS_RESULT_NONE;
2924 /* PF handles the recovery of its VFs */
2926 DP_VERBOSE(edev, QED_MSG_IOV,
2927 "VF recovery is handled by its PF\n");
2928 __qede_unlock(edev);
2929 return PCI_ERS_RESULT_RECOVERED;
2933 netif_tx_disable(edev->ndev);
2934 netif_carrier_off(edev->ndev);
2936 set_bit(QEDE_SP_AER, &edev->sp_flags);
2937 schedule_delayed_work(&edev->sp_task, 0);
2939 __qede_unlock(edev);
2941 return PCI_ERS_RESULT_CAN_RECOVER;