1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/dma-mapping.h>
5 #include <linux/etherdevice.h>
6 #include <linux/interrupt.h>
7 #ifdef CONFIG_RFS_ACCEL
8 #include <linux/cpu_rmap.h>
10 #include <linux/if_vlan.h>
12 #include <linux/ipv6.h>
13 #include <linux/module.h>
14 #include <linux/pci.h>
15 #include <linux/aer.h>
16 #include <linux/skbuff.h>
17 #include <linux/sctp.h>
18 #include <linux/vermagic.h>
20 #include <net/ip6_checksum.h>
21 #include <net/pkt_cls.h>
23 #include <net/vxlan.h>
26 #include "hns3_enet.h"
28 #define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift)))
29 #define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
31 static void hns3_clear_all_ring(struct hnae3_handle *h);
32 static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
33 static void hns3_remove_hw_addr(struct net_device *netdev);
35 static const char hns3_driver_name[] = "hns3";
36 const char hns3_driver_version[] = VERMAGIC_STRING;
37 static const char hns3_driver_string[] =
38 "Hisilicon Ethernet Network Driver for Hip08 Family";
39 static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
40 static struct hnae3_client client;
42 static int debug = -1;
43 module_param(debug, int, 0);
44 MODULE_PARM_DESC(debug, " Network interface message level setting");
46 #define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \
47 NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
49 /* hns3_pci_tbl - PCI Device ID Table
51 * Last entry must be all 0s
53 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
54 * Class, Class Mask, private data (not used) }
56 static const struct pci_device_id hns3_pci_tbl[] = {
57 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
58 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
59 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
60 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
61 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
62 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
63 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
64 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
65 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
66 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
67 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
68 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
69 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
70 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF),
71 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
72 /* required last entry */
75 MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
77 static irqreturn_t hns3_irq_handle(int irq, void *vector)
79 struct hns3_enet_tqp_vector *tqp_vector = vector;
81 napi_schedule_irqoff(&tqp_vector->napi);
86 static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
88 struct hns3_enet_tqp_vector *tqp_vectors;
91 for (i = 0; i < priv->vector_num; i++) {
92 tqp_vectors = &priv->tqp_vector[i];
94 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
97 /* clear the affinity mask */
98 irq_set_affinity_hint(tqp_vectors->vector_irq, NULL);
100 /* release the irq resource */
101 free_irq(tqp_vectors->vector_irq, tqp_vectors);
102 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
106 static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
108 struct hns3_enet_tqp_vector *tqp_vectors;
109 int txrx_int_idx = 0;
115 for (i = 0; i < priv->vector_num; i++) {
116 tqp_vectors = &priv->tqp_vector[i];
118 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
121 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
122 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
123 "%s-%s-%d", priv->netdev->name, "TxRx",
126 } else if (tqp_vectors->rx_group.ring) {
127 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
128 "%s-%s-%d", priv->netdev->name, "Rx",
130 } else if (tqp_vectors->tx_group.ring) {
131 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
132 "%s-%s-%d", priv->netdev->name, "Tx",
135 /* Skip this unused q_vector */
139 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
141 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
145 netdev_err(priv->netdev, "request irq(%d) fail\n",
146 tqp_vectors->vector_irq);
150 irq_set_affinity_hint(tqp_vectors->vector_irq,
151 &tqp_vectors->affinity_mask);
153 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
159 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
162 writel(mask_en, tqp_vector->mask_addr);
165 static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
167 napi_enable(&tqp_vector->napi);
170 hns3_mask_vector_irq(tqp_vector, 1);
173 static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
176 hns3_mask_vector_irq(tqp_vector, 0);
178 disable_irq(tqp_vector->vector_irq);
179 napi_disable(&tqp_vector->napi);
182 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
185 u32 rl_reg = hns3_rl_usec_to_reg(rl_value);
187 /* this defines the configuration for RL (Interrupt Rate Limiter).
188 * Rl defines rate of interrupts i.e. number of interrupts-per-second
189 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
192 if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable &&
193 !tqp_vector->rx_group.coal.gl_adapt_enable)
194 /* According to the hardware, the range of rl_reg is
195 * 0-59 and the unit is 4.
197 rl_reg |= HNS3_INT_RL_ENABLE_MASK;
199 writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
202 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
205 u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value);
207 writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
210 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
213 u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value);
215 writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
218 static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
219 struct hns3_nic_priv *priv)
221 /* initialize the configuration for interrupt coalescing.
222 * 1. GL (Interrupt Gap Limiter)
223 * 2. RL (Interrupt Rate Limiter)
226 /* Default: enable interrupt coalescing self-adaptive and GL */
227 tqp_vector->tx_group.coal.gl_adapt_enable = 1;
228 tqp_vector->rx_group.coal.gl_adapt_enable = 1;
230 tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K;
231 tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K;
233 tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW;
234 tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW;
237 static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector,
238 struct hns3_nic_priv *priv)
240 struct hnae3_handle *h = priv->ae_handle;
242 hns3_set_vector_coalesce_tx_gl(tqp_vector,
243 tqp_vector->tx_group.coal.int_gl);
244 hns3_set_vector_coalesce_rx_gl(tqp_vector,
245 tqp_vector->rx_group.coal.int_gl);
246 hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
249 static int hns3_nic_set_real_num_queue(struct net_device *netdev)
251 struct hnae3_handle *h = hns3_get_handle(netdev);
252 struct hnae3_knic_private_info *kinfo = &h->kinfo;
253 unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
256 if (kinfo->num_tc <= 1) {
257 netdev_reset_tc(netdev);
259 ret = netdev_set_num_tc(netdev, kinfo->num_tc);
262 "netdev_set_num_tc fail, ret=%d!\n", ret);
266 for (i = 0; i < HNAE3_MAX_TC; i++) {
267 if (!kinfo->tc_info[i].enable)
270 netdev_set_tc_queue(netdev,
271 kinfo->tc_info[i].tc,
272 kinfo->tc_info[i].tqp_count,
273 kinfo->tc_info[i].tqp_offset);
277 ret = netif_set_real_num_tx_queues(netdev, queue_size);
280 "netif_set_real_num_tx_queues fail, ret=%d!\n",
285 ret = netif_set_real_num_rx_queues(netdev, queue_size);
288 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
295 static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
297 u16 alloc_tqps, max_rss_size, rss_size;
299 h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size);
300 rss_size = alloc_tqps / h->kinfo.num_tc;
302 return min_t(u16, rss_size, max_rss_size);
305 static void hns3_tqp_enable(struct hnae3_queue *tqp)
309 rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
310 rcb_reg |= BIT(HNS3_RING_EN_B);
311 hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
314 static void hns3_tqp_disable(struct hnae3_queue *tqp)
318 rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
319 rcb_reg &= ~BIT(HNS3_RING_EN_B);
320 hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
323 static void hns3_free_rx_cpu_rmap(struct net_device *netdev)
325 #ifdef CONFIG_RFS_ACCEL
326 free_irq_cpu_rmap(netdev->rx_cpu_rmap);
327 netdev->rx_cpu_rmap = NULL;
331 static int hns3_set_rx_cpu_rmap(struct net_device *netdev)
333 #ifdef CONFIG_RFS_ACCEL
334 struct hns3_nic_priv *priv = netdev_priv(netdev);
335 struct hns3_enet_tqp_vector *tqp_vector;
338 if (!netdev->rx_cpu_rmap) {
339 netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->vector_num);
340 if (!netdev->rx_cpu_rmap)
344 for (i = 0; i < priv->vector_num; i++) {
345 tqp_vector = &priv->tqp_vector[i];
346 ret = irq_cpu_rmap_add(netdev->rx_cpu_rmap,
347 tqp_vector->vector_irq);
349 hns3_free_rx_cpu_rmap(netdev);
357 static int hns3_nic_net_up(struct net_device *netdev)
359 struct hns3_nic_priv *priv = netdev_priv(netdev);
360 struct hnae3_handle *h = priv->ae_handle;
364 ret = hns3_nic_reset_all_ring(h);
368 /* the device can work without cpu rmap, only aRFS needs it */
369 ret = hns3_set_rx_cpu_rmap(netdev);
371 netdev_warn(netdev, "set rx cpu rmap fail, ret=%d!\n", ret);
373 /* get irq resource for all vectors */
374 ret = hns3_nic_init_irq(priv);
376 netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
380 clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
382 /* enable the vectors */
383 for (i = 0; i < priv->vector_num; i++)
384 hns3_vector_enable(&priv->tqp_vector[i]);
387 for (j = 0; j < h->kinfo.num_tqps; j++)
388 hns3_tqp_enable(h->kinfo.tqp[j]);
390 /* start the ae_dev */
391 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
398 set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
400 hns3_tqp_disable(h->kinfo.tqp[j]);
402 for (j = i - 1; j >= 0; j--)
403 hns3_vector_disable(&priv->tqp_vector[j]);
405 hns3_nic_uninit_irq(priv);
407 hns3_free_rx_cpu_rmap(netdev);
411 static void hns3_config_xps(struct hns3_nic_priv *priv)
415 for (i = 0; i < priv->vector_num; i++) {
416 struct hns3_enet_tqp_vector *tqp_vector = &priv->tqp_vector[i];
417 struct hns3_enet_ring *ring = tqp_vector->tx_group.ring;
422 ret = netif_set_xps_queue(priv->netdev,
423 &tqp_vector->affinity_mask,
424 ring->tqp->tqp_index);
426 netdev_warn(priv->netdev,
427 "set xps queue failed: %d", ret);
434 static int hns3_nic_net_open(struct net_device *netdev)
436 struct hns3_nic_priv *priv = netdev_priv(netdev);
437 struct hnae3_handle *h = hns3_get_handle(netdev);
438 struct hnae3_knic_private_info *kinfo;
441 if (hns3_nic_resetting(netdev))
444 netif_carrier_off(netdev);
446 ret = hns3_nic_set_real_num_queue(netdev);
450 ret = hns3_nic_net_up(netdev);
453 "hns net up fail, ret=%d!\n", ret);
458 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
459 netdev_set_prio_tc_map(netdev, i,
463 if (h->ae_algo->ops->set_timer_task)
464 h->ae_algo->ops->set_timer_task(priv->ae_handle, true);
466 hns3_config_xps(priv);
470 static void hns3_nic_net_down(struct net_device *netdev)
472 struct hns3_nic_priv *priv = netdev_priv(netdev);
473 struct hnae3_handle *h = hns3_get_handle(netdev);
474 const struct hnae3_ae_ops *ops;
477 /* disable vectors */
478 for (i = 0; i < priv->vector_num; i++)
479 hns3_vector_disable(&priv->tqp_vector[i]);
482 for (i = 0; i < h->kinfo.num_tqps; i++)
483 hns3_tqp_disable(h->kinfo.tqp[i]);
486 ops = priv->ae_handle->ae_algo->ops;
488 ops->stop(priv->ae_handle);
490 hns3_free_rx_cpu_rmap(netdev);
492 /* free irq resources */
493 hns3_nic_uninit_irq(priv);
495 hns3_clear_all_ring(priv->ae_handle);
498 static int hns3_nic_net_stop(struct net_device *netdev)
500 struct hns3_nic_priv *priv = netdev_priv(netdev);
501 struct hnae3_handle *h = hns3_get_handle(netdev);
503 if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
506 if (h->ae_algo->ops->set_timer_task)
507 h->ae_algo->ops->set_timer_task(priv->ae_handle, false);
509 netif_tx_stop_all_queues(netdev);
510 netif_carrier_off(netdev);
512 hns3_nic_net_down(netdev);
517 static int hns3_nic_uc_sync(struct net_device *netdev,
518 const unsigned char *addr)
520 struct hnae3_handle *h = hns3_get_handle(netdev);
522 if (h->ae_algo->ops->add_uc_addr)
523 return h->ae_algo->ops->add_uc_addr(h, addr);
528 static int hns3_nic_uc_unsync(struct net_device *netdev,
529 const unsigned char *addr)
531 struct hnae3_handle *h = hns3_get_handle(netdev);
533 if (h->ae_algo->ops->rm_uc_addr)
534 return h->ae_algo->ops->rm_uc_addr(h, addr);
539 static int hns3_nic_mc_sync(struct net_device *netdev,
540 const unsigned char *addr)
542 struct hnae3_handle *h = hns3_get_handle(netdev);
544 if (h->ae_algo->ops->add_mc_addr)
545 return h->ae_algo->ops->add_mc_addr(h, addr);
550 static int hns3_nic_mc_unsync(struct net_device *netdev,
551 const unsigned char *addr)
553 struct hnae3_handle *h = hns3_get_handle(netdev);
555 if (h->ae_algo->ops->rm_mc_addr)
556 return h->ae_algo->ops->rm_mc_addr(h, addr);
561 static u8 hns3_get_netdev_flags(struct net_device *netdev)
565 if (netdev->flags & IFF_PROMISC) {
566 flags = HNAE3_USER_UPE | HNAE3_USER_MPE | HNAE3_BPE;
568 flags |= HNAE3_VLAN_FLTR;
569 if (netdev->flags & IFF_ALLMULTI)
570 flags |= HNAE3_USER_MPE;
576 static void hns3_nic_set_rx_mode(struct net_device *netdev)
578 struct hnae3_handle *h = hns3_get_handle(netdev);
582 new_flags = hns3_get_netdev_flags(netdev);
584 ret = __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync);
586 netdev_err(netdev, "sync uc address fail\n");
588 new_flags |= HNAE3_OVERFLOW_UPE;
591 if (netdev->flags & IFF_MULTICAST) {
592 ret = __dev_mc_sync(netdev, hns3_nic_mc_sync,
595 netdev_err(netdev, "sync mc address fail\n");
597 new_flags |= HNAE3_OVERFLOW_MPE;
601 /* User mode Promisc mode enable and vlan filtering is disabled to
602 * let all packets in. MAC-VLAN Table overflow Promisc enabled and
603 * vlan fitering is enabled
605 hns3_enable_vlan_filter(netdev, new_flags & HNAE3_VLAN_FLTR);
606 h->netdev_flags = new_flags;
607 hns3_update_promisc_mode(netdev, new_flags);
610 int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags)
612 struct hns3_nic_priv *priv = netdev_priv(netdev);
613 struct hnae3_handle *h = priv->ae_handle;
615 if (h->ae_algo->ops->set_promisc_mode) {
616 return h->ae_algo->ops->set_promisc_mode(h,
617 promisc_flags & HNAE3_UPE,
618 promisc_flags & HNAE3_MPE);
624 void hns3_enable_vlan_filter(struct net_device *netdev, bool enable)
626 struct hns3_nic_priv *priv = netdev_priv(netdev);
627 struct hnae3_handle *h = priv->ae_handle;
630 if (h->pdev->revision >= 0x21 && h->ae_algo->ops->enable_vlan_filter) {
631 last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false;
632 if (enable != last_state) {
635 enable ? "enable" : "disable");
636 h->ae_algo->ops->enable_vlan_filter(h, enable);
641 static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
642 u16 *mss, u32 *type_cs_vlan_tso)
644 u32 l4_offset, hdr_len;
645 union l3_hdr_info l3;
646 union l4_hdr_info l4;
650 if (!skb_is_gso(skb))
653 ret = skb_cow_head(skb, 0);
657 l3.hdr = skb_network_header(skb);
658 l4.hdr = skb_transport_header(skb);
660 /* Software should clear the IPv4's checksum field when tso is
663 if (l3.v4->version == 4)
667 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
670 SKB_GSO_UDP_TUNNEL_CSUM)) {
671 if ((!(skb_shinfo(skb)->gso_type &
673 (skb_shinfo(skb)->gso_type &
674 SKB_GSO_UDP_TUNNEL_CSUM)) {
675 /* Software should clear the udp's checksum
676 * field when tso is needed.
680 /* reset l3&l4 pointers from outer to inner headers */
681 l3.hdr = skb_inner_network_header(skb);
682 l4.hdr = skb_inner_transport_header(skb);
684 /* Software should clear the IPv4's checksum field when
687 if (l3.v4->version == 4)
691 /* normal or tunnel packet*/
692 l4_offset = l4.hdr - skb->data;
693 hdr_len = (l4.tcp->doff << 2) + l4_offset;
695 /* remove payload length from inner pseudo checksum when tso*/
696 l4_paylen = skb->len - l4_offset;
697 csum_replace_by_diff(&l4.tcp->check,
698 (__force __wsum)htonl(l4_paylen));
700 /* find the txbd field values */
701 *paylen = skb->len - hdr_len;
702 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1);
704 /* get MSS for TSO */
705 *mss = skb_shinfo(skb)->gso_size;
710 static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
713 union l3_hdr_info l3;
714 unsigned char *l4_hdr;
715 unsigned char *exthdr;
719 /* find outer header point */
720 l3.hdr = skb_network_header(skb);
721 l4_hdr = skb_transport_header(skb);
723 if (skb->protocol == htons(ETH_P_IPV6)) {
724 exthdr = l3.hdr + sizeof(*l3.v6);
725 l4_proto_tmp = l3.v6->nexthdr;
726 if (l4_hdr != exthdr)
727 ipv6_skip_exthdr(skb, exthdr - skb->data,
728 &l4_proto_tmp, &frag_off);
729 } else if (skb->protocol == htons(ETH_P_IP)) {
730 l4_proto_tmp = l3.v4->protocol;
735 *ol4_proto = l4_proto_tmp;
738 if (!skb->encapsulation) {
743 /* find inner header point */
744 l3.hdr = skb_inner_network_header(skb);
745 l4_hdr = skb_inner_transport_header(skb);
747 if (l3.v6->version == 6) {
748 exthdr = l3.hdr + sizeof(*l3.v6);
749 l4_proto_tmp = l3.v6->nexthdr;
750 if (l4_hdr != exthdr)
751 ipv6_skip_exthdr(skb, exthdr - skb->data,
752 &l4_proto_tmp, &frag_off);
753 } else if (l3.v4->version == 4) {
754 l4_proto_tmp = l3.v4->protocol;
757 *il4_proto = l4_proto_tmp;
762 /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
763 * and it is udp packet, which has a dest port as the IANA assigned.
764 * the hardware is expected to do the checksum offload, but the
765 * hardware will not do the checksum offload when udp dest port is
768 static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
770 union l4_hdr_info l4;
772 l4.hdr = skb_transport_header(skb);
774 if (!(!skb->encapsulation &&
775 l4.udp->dest == htons(IANA_VXLAN_UDP_PORT)))
778 skb_checksum_help(skb);
783 static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
784 u32 *ol_type_vlan_len_msec)
786 u32 l2_len, l3_len, l4_len;
787 unsigned char *il2_hdr;
788 union l3_hdr_info l3;
789 union l4_hdr_info l4;
791 l3.hdr = skb_network_header(skb);
792 l4.hdr = skb_transport_header(skb);
794 /* compute OL2 header size, defined in 2 Bytes */
795 l2_len = l3.hdr - skb->data;
796 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L2LEN_S, l2_len >> 1);
798 /* compute OL3 header size, defined in 4 Bytes */
799 l3_len = l4.hdr - l3.hdr;
800 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2);
802 il2_hdr = skb_inner_mac_header(skb);
803 /* compute OL4 header size, defined in 4 Bytes. */
804 l4_len = il2_hdr - l4.hdr;
805 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2);
807 /* define outer network header type */
808 if (skb->protocol == htons(ETH_P_IP)) {
810 hns3_set_field(*ol_type_vlan_len_msec,
812 HNS3_OL3T_IPV4_CSUM);
814 hns3_set_field(*ol_type_vlan_len_msec,
816 HNS3_OL3T_IPV4_NO_CSUM);
818 } else if (skb->protocol == htons(ETH_P_IPV6)) {
819 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S,
823 if (ol4_proto == IPPROTO_UDP)
824 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S,
825 HNS3_TUN_MAC_IN_UDP);
826 else if (ol4_proto == IPPROTO_GRE)
827 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S,
831 static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
832 u8 il4_proto, u32 *type_cs_vlan_tso,
833 u32 *ol_type_vlan_len_msec)
835 unsigned char *l2_hdr = skb->data;
836 u32 l4_proto = ol4_proto;
837 union l4_hdr_info l4;
838 union l3_hdr_info l3;
841 l4.hdr = skb_transport_header(skb);
842 l3.hdr = skb_network_header(skb);
844 /* handle encapsulation skb */
845 if (skb->encapsulation) {
846 /* If this is a not UDP/GRE encapsulation skb */
847 if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) {
848 /* drop the skb tunnel packet if hardware don't support,
849 * because hardware can't calculate csum when TSO.
854 /* the stack computes the IP header already,
855 * driver calculate l4 checksum when not TSO.
857 skb_checksum_help(skb);
861 hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec);
863 /* switch to inner header */
864 l2_hdr = skb_inner_mac_header(skb);
865 l3.hdr = skb_inner_network_header(skb);
866 l4.hdr = skb_inner_transport_header(skb);
867 l4_proto = il4_proto;
870 if (l3.v4->version == 4) {
871 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
874 /* the stack computes the IP header already, the only time we
875 * need the hardware to recompute it is in the case of TSO.
878 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
879 } else if (l3.v6->version == 6) {
880 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
884 /* compute inner(/normal) L2 header size, defined in 2 Bytes */
885 l2_len = l3.hdr - l2_hdr;
886 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1);
888 /* compute inner(/normal) L3 header size, defined in 4 Bytes */
889 l3_len = l4.hdr - l3.hdr;
890 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2);
892 /* compute inner(/normal) L4 header size, defined in 4 Bytes */
895 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
896 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
898 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
902 if (hns3_tunnel_csum_bug(skb))
905 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
906 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
908 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
909 (sizeof(struct udphdr) >> 2));
912 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
913 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
915 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
916 (sizeof(struct sctphdr) >> 2));
919 /* drop the skb tunnel packet if hardware don't support,
920 * because hardware can't calculate csum when TSO.
925 /* the stack computes the IP header already,
926 * driver calculate l4 checksum when not TSO.
928 skb_checksum_help(skb);
935 static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
937 /* Config bd buffer end */
938 hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
939 hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
942 static int hns3_fill_desc_vtags(struct sk_buff *skb,
943 struct hns3_enet_ring *tx_ring,
944 u32 *inner_vlan_flag,
949 #define HNS3_TX_VLAN_PRIO_SHIFT 13
951 struct hnae3_handle *handle = tx_ring->tqp->handle;
953 /* Since HW limitation, if port based insert VLAN enabled, only one VLAN
954 * header is allowed in skb, otherwise it will cause RAS error.
956 if (unlikely(skb_vlan_tagged_multi(skb) &&
957 handle->port_base_vlan_state ==
958 HNAE3_PORT_BASE_VLAN_ENABLE))
961 if (skb->protocol == htons(ETH_P_8021Q) &&
962 !(tx_ring->tqp->handle->kinfo.netdev->features &
963 NETIF_F_HW_VLAN_CTAG_TX)) {
964 /* When HW VLAN acceleration is turned off, and the stack
965 * sets the protocol to 802.1q, the driver just need to
966 * set the protocol to the encapsulated ethertype.
968 skb->protocol = vlan_get_protocol(skb);
972 if (skb_vlan_tag_present(skb)) {
975 vlan_tag = skb_vlan_tag_get(skb);
976 vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
978 /* Based on hw strategy, use out_vtag in two layer tag case,
979 * and use inner_vtag in one tag case.
981 if (skb->protocol == htons(ETH_P_8021Q)) {
982 if (handle->port_base_vlan_state ==
983 HNAE3_PORT_BASE_VLAN_DISABLE){
984 hns3_set_field(*out_vlan_flag,
985 HNS3_TXD_OVLAN_B, 1);
986 *out_vtag = vlan_tag;
988 hns3_set_field(*inner_vlan_flag,
990 *inner_vtag = vlan_tag;
993 hns3_set_field(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
994 *inner_vtag = vlan_tag;
996 } else if (skb->protocol == htons(ETH_P_8021Q)) {
997 struct vlan_ethhdr *vhdr;
1000 rc = skb_cow_head(skb, 0);
1001 if (unlikely(rc < 0))
1003 vhdr = (struct vlan_ethhdr *)skb->data;
1004 vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
1005 << HNS3_TX_VLAN_PRIO_SHIFT);
1008 skb->protocol = vlan_get_protocol(skb);
1012 static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
1013 int size, int frag_end, enum hns_desc_type type)
1015 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
1016 struct hns3_desc *desc = &ring->desc[ring->next_to_use];
1017 struct device *dev = ring_to_dev(ring);
1018 struct skb_frag_struct *frag;
1019 unsigned int frag_buf_num;
1023 if (type == DESC_TYPE_SKB) {
1024 struct sk_buff *skb = (struct sk_buff *)priv;
1025 u32 ol_type_vlan_len_msec = 0;
1026 u32 type_cs_vlan_tso = 0;
1027 u32 paylen = skb->len;
1033 ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
1034 &ol_type_vlan_len_msec,
1035 &inner_vtag, &out_vtag);
1039 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1040 u8 ol4_proto, il4_proto;
1042 skb_reset_mac_len(skb);
1044 ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
1048 ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
1050 &ol_type_vlan_len_msec);
1054 ret = hns3_set_tso(skb, &paylen, &mss,
1061 desc->tx.ol_type_vlan_len_msec =
1062 cpu_to_le32(ol_type_vlan_len_msec);
1063 desc->tx.type_cs_vlan_tso_len =
1064 cpu_to_le32(type_cs_vlan_tso);
1065 desc->tx.paylen = cpu_to_le32(paylen);
1066 desc->tx.mss = cpu_to_le16(mss);
1067 desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
1068 desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
1070 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
1072 frag = (struct skb_frag_struct *)priv;
1073 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
1076 if (unlikely(dma_mapping_error(dev, dma))) {
1077 ring->stats.sw_err_cnt++;
1081 desc_cb->length = size;
1083 if (likely(size <= HNS3_MAX_BD_SIZE)) {
1084 u16 bdtp_fe_sc_vld_ra_ri = 0;
1086 desc_cb->priv = priv;
1088 desc_cb->type = type;
1089 desc->addr = cpu_to_le64(dma);
1090 desc->tx.send_size = cpu_to_le16(size);
1091 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
1092 desc->tx.bdtp_fe_sc_vld_ra_ri =
1093 cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
1095 ring_ptr_move_fw(ring, next_to_use);
1099 frag_buf_num = hns3_tx_bd_count(size);
1100 sizeoflast = size & HNS3_TX_LAST_SIZE_M;
1101 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
1103 /* When frag size is bigger than hardware limit, split this frag */
1104 for (k = 0; k < frag_buf_num; k++) {
1105 u16 bdtp_fe_sc_vld_ra_ri = 0;
1107 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
1108 desc_cb->priv = priv;
1109 desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
1110 desc_cb->type = (type == DESC_TYPE_SKB && !k) ?
1111 DESC_TYPE_SKB : DESC_TYPE_PAGE;
1113 /* now, fill the descriptor */
1114 desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
1115 desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
1116 (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
1117 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri,
1118 frag_end && (k == frag_buf_num - 1) ?
1120 desc->tx.bdtp_fe_sc_vld_ra_ri =
1121 cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
1123 /* move ring pointer to next.*/
1124 ring_ptr_move_fw(ring, next_to_use);
1126 desc_cb = &ring->desc_cb[ring->next_to_use];
1127 desc = &ring->desc[ring->next_to_use];
1133 static int hns3_nic_bd_num(struct sk_buff *skb)
1135 int size = skb_headlen(skb);
1138 /* if the total len is within the max bd limit */
1139 if (likely(skb->len <= HNS3_MAX_BD_SIZE))
1140 return skb_shinfo(skb)->nr_frags + 1;
1142 bd_num = hns3_tx_bd_count(size);
1144 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1145 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1148 size = skb_frag_size(frag);
1149 frag_bd_num = hns3_tx_bd_count(size);
1151 if (unlikely(frag_bd_num > HNS3_MAX_BD_PER_FRAG))
1154 bd_num += frag_bd_num;
1160 static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
1162 if (!skb->encapsulation)
1163 return skb_transport_offset(skb) + tcp_hdrlen(skb);
1165 return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
1168 /* HW need every continuous 8 buffer data to be larger than MSS,
1169 * we simplify it by ensuring skb_headlen + the first continuous
1170 * 7 frags to to be larger than gso header len + mss, and the remaining
1171 * continuous 7 frags to be larger than MSS except the last 7 frags.
1173 static bool hns3_skb_need_linearized(struct sk_buff *skb)
1175 int bd_limit = HNS3_MAX_BD_PER_FRAG - 1;
1176 unsigned int tot_len = 0;
1179 for (i = 0; i < bd_limit; i++)
1180 tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
1182 /* ensure headlen + the first 7 frags is greater than mss + header
1183 * and the first 7 frags is greater than mss.
1185 if (((tot_len + skb_headlen(skb)) < (skb_shinfo(skb)->gso_size +
1186 hns3_gso_hdr_len(skb))) || (tot_len < skb_shinfo(skb)->gso_size))
1189 /* ensure the remaining continuous 7 buffer is greater than mss */
1190 for (i = 0; i < (skb_shinfo(skb)->nr_frags - bd_limit - 1); i++) {
1191 tot_len -= skb_frag_size(&skb_shinfo(skb)->frags[i]);
1192 tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i + bd_limit]);
1194 if (tot_len < skb_shinfo(skb)->gso_size)
1201 static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
1202 struct sk_buff **out_skb)
1204 struct sk_buff *skb = *out_skb;
1207 bd_num = hns3_nic_bd_num(skb);
1211 if (unlikely(bd_num > HNS3_MAX_BD_PER_FRAG)) {
1212 struct sk_buff *new_skb;
1214 if (skb_is_gso(skb) && !hns3_skb_need_linearized(skb))
1217 bd_num = hns3_tx_bd_count(skb->len);
1218 if (unlikely(ring_space(ring) < bd_num))
1220 /* manual split the send packet */
1221 new_skb = skb_copy(skb, GFP_ATOMIC);
1224 dev_kfree_skb_any(skb);
1227 u64_stats_update_begin(&ring->syncp);
1228 ring->stats.tx_copy++;
1229 u64_stats_update_end(&ring->syncp);
1233 if (unlikely(ring_space(ring) < bd_num))
1239 static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
1241 struct device *dev = ring_to_dev(ring);
1244 for (i = 0; i < ring->desc_num; i++) {
1245 /* check if this is where we started */
1246 if (ring->next_to_use == next_to_use_orig)
1250 ring_ptr_move_bw(ring, next_to_use);
1252 /* unmap the descriptor dma address */
1253 if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
1254 dma_unmap_single(dev,
1255 ring->desc_cb[ring->next_to_use].dma,
1256 ring->desc_cb[ring->next_to_use].length,
1258 else if (ring->desc_cb[ring->next_to_use].length)
1260 ring->desc_cb[ring->next_to_use].dma,
1261 ring->desc_cb[ring->next_to_use].length,
1264 ring->desc_cb[ring->next_to_use].length = 0;
1265 ring->desc_cb[ring->next_to_use].dma = 0;
1269 netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
1271 struct hns3_nic_priv *priv = netdev_priv(netdev);
1272 struct hns3_nic_ring_data *ring_data =
1273 &tx_ring_data(priv, skb->queue_mapping);
1274 struct hns3_enet_ring *ring = ring_data->ring;
1275 struct netdev_queue *dev_queue;
1276 struct skb_frag_struct *frag;
1277 int next_to_use_head;
1284 /* Prefetch the data used later */
1285 prefetch(skb->data);
1287 buf_num = hns3_nic_maybe_stop_tx(ring, &skb);
1288 if (unlikely(buf_num <= 0)) {
1289 if (buf_num == -EBUSY) {
1290 u64_stats_update_begin(&ring->syncp);
1291 ring->stats.tx_busy++;
1292 u64_stats_update_end(&ring->syncp);
1293 goto out_net_tx_busy;
1294 } else if (buf_num == -ENOMEM) {
1295 u64_stats_update_begin(&ring->syncp);
1296 ring->stats.sw_err_cnt++;
1297 u64_stats_update_end(&ring->syncp);
1300 if (net_ratelimit())
1301 netdev_err(netdev, "xmit error: %d!\n", buf_num);
1306 /* No. of segments (plus a header) */
1307 seg_num = skb_shinfo(skb)->nr_frags + 1;
1308 /* Fill the first part */
1309 size = skb_headlen(skb);
1311 next_to_use_head = ring->next_to_use;
1313 ret = hns3_fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0,
1318 /* Fill the fragments */
1319 for (i = 1; i < seg_num; i++) {
1320 frag = &skb_shinfo(skb)->frags[i - 1];
1321 size = skb_frag_size(frag);
1323 ret = hns3_fill_desc(ring, frag, size,
1324 seg_num - 1 == i ? 1 : 0,
1331 /* Complete translate all packets */
1332 dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
1333 netdev_tx_sent_queue(dev_queue, skb->len);
1335 wmb(); /* Commit all data before submit */
1337 hnae3_queue_xmit(ring->tqp, buf_num);
1339 return NETDEV_TX_OK;
1342 hns3_clear_desc(ring, next_to_use_head);
1345 dev_kfree_skb_any(skb);
1346 return NETDEV_TX_OK;
1349 netif_stop_subqueue(netdev, ring_data->queue_index);
1350 smp_mb(); /* Commit all data before submit */
1352 return NETDEV_TX_BUSY;
1355 static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
1357 struct hnae3_handle *h = hns3_get_handle(netdev);
1358 struct sockaddr *mac_addr = p;
1361 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1362 return -EADDRNOTAVAIL;
1364 if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) {
1365 netdev_info(netdev, "already using mac address %pM\n",
1370 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
1372 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
1376 ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
1381 static int hns3_nic_do_ioctl(struct net_device *netdev,
1382 struct ifreq *ifr, int cmd)
1384 struct hnae3_handle *h = hns3_get_handle(netdev);
1386 if (!netif_running(netdev))
1389 if (!h->ae_algo->ops->do_ioctl)
1392 return h->ae_algo->ops->do_ioctl(h, ifr, cmd);
1395 static int hns3_nic_set_features(struct net_device *netdev,
1396 netdev_features_t features)
1398 netdev_features_t changed = netdev->features ^ features;
1399 struct hns3_nic_priv *priv = netdev_priv(netdev);
1400 struct hnae3_handle *h = priv->ae_handle;
1404 if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) {
1405 enable = !!(features & NETIF_F_GRO_HW);
1406 ret = h->ae_algo->ops->set_gro_en(h, enable);
1411 if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
1412 h->ae_algo->ops->enable_vlan_filter) {
1413 enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER);
1414 h->ae_algo->ops->enable_vlan_filter(h, enable);
1417 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
1418 h->ae_algo->ops->enable_hw_strip_rxvtag) {
1419 enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
1420 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable);
1425 if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) {
1426 enable = !!(features & NETIF_F_NTUPLE);
1427 h->ae_algo->ops->enable_fd(h, enable);
1430 netdev->features = features;
1434 static void hns3_nic_get_stats64(struct net_device *netdev,
1435 struct rtnl_link_stats64 *stats)
1437 struct hns3_nic_priv *priv = netdev_priv(netdev);
1438 int queue_num = priv->ae_handle->kinfo.num_tqps;
1439 struct hnae3_handle *handle = priv->ae_handle;
1440 struct hns3_enet_ring *ring;
1441 u64 rx_length_errors = 0;
1442 u64 rx_crc_errors = 0;
1443 u64 rx_multicast = 0;
1455 if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
1458 handle->ae_algo->ops->update_stats(handle, &netdev->stats);
1460 for (idx = 0; idx < queue_num; idx++) {
1461 /* fetch the tx stats */
1462 ring = priv->ring_data[idx].ring;
1464 start = u64_stats_fetch_begin_irq(&ring->syncp);
1465 tx_bytes += ring->stats.tx_bytes;
1466 tx_pkts += ring->stats.tx_pkts;
1467 tx_drop += ring->stats.sw_err_cnt;
1468 tx_errors += ring->stats.sw_err_cnt;
1469 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1471 /* fetch the rx stats */
1472 ring = priv->ring_data[idx + queue_num].ring;
1474 start = u64_stats_fetch_begin_irq(&ring->syncp);
1475 rx_bytes += ring->stats.rx_bytes;
1476 rx_pkts += ring->stats.rx_pkts;
1477 rx_drop += ring->stats.non_vld_descs;
1478 rx_drop += ring->stats.l2_err;
1479 rx_errors += ring->stats.non_vld_descs;
1480 rx_errors += ring->stats.l2_err;
1481 rx_crc_errors += ring->stats.l2_err;
1482 rx_crc_errors += ring->stats.l3l4_csum_err;
1483 rx_multicast += ring->stats.rx_multicast;
1484 rx_length_errors += ring->stats.err_pkt_len;
1485 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1488 stats->tx_bytes = tx_bytes;
1489 stats->tx_packets = tx_pkts;
1490 stats->rx_bytes = rx_bytes;
1491 stats->rx_packets = rx_pkts;
1493 stats->rx_errors = rx_errors;
1494 stats->multicast = rx_multicast;
1495 stats->rx_length_errors = rx_length_errors;
1496 stats->rx_crc_errors = rx_crc_errors;
1497 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
1499 stats->tx_errors = tx_errors;
1500 stats->rx_dropped = rx_drop;
1501 stats->tx_dropped = tx_drop;
1502 stats->collisions = netdev->stats.collisions;
1503 stats->rx_over_errors = netdev->stats.rx_over_errors;
1504 stats->rx_frame_errors = netdev->stats.rx_frame_errors;
1505 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
1506 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
1507 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
1508 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
1509 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
1510 stats->tx_window_errors = netdev->stats.tx_window_errors;
1511 stats->rx_compressed = netdev->stats.rx_compressed;
1512 stats->tx_compressed = netdev->stats.tx_compressed;
1515 static int hns3_setup_tc(struct net_device *netdev, void *type_data)
1517 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
1518 struct hnae3_handle *h = hns3_get_handle(netdev);
1519 struct hnae3_knic_private_info *kinfo = &h->kinfo;
1520 u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map;
1521 u8 tc = mqprio_qopt->qopt.num_tc;
1522 u16 mode = mqprio_qopt->mode;
1523 u8 hw = mqprio_qopt->qopt.hw;
1525 if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
1526 mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
1529 if (tc > HNAE3_MAX_TC)
1535 return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
1536 kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
1539 static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
1542 if (type != TC_SETUP_QDISC_MQPRIO)
1545 return hns3_setup_tc(dev, type_data);
1548 static int hns3_vlan_rx_add_vid(struct net_device *netdev,
1549 __be16 proto, u16 vid)
1551 struct hnae3_handle *h = hns3_get_handle(netdev);
1554 if (h->ae_algo->ops->set_vlan_filter)
1555 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
1560 static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
1561 __be16 proto, u16 vid)
1563 struct hnae3_handle *h = hns3_get_handle(netdev);
1566 if (h->ae_algo->ops->set_vlan_filter)
1567 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
1572 static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1573 u8 qos, __be16 vlan_proto)
1575 struct hnae3_handle *h = hns3_get_handle(netdev);
1578 if (h->ae_algo->ops->set_vf_vlan_filter)
1579 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
1585 static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
1587 struct hnae3_handle *h = hns3_get_handle(netdev);
1590 if (hns3_nic_resetting(netdev))
1593 if (!h->ae_algo->ops->set_mtu)
1596 ret = h->ae_algo->ops->set_mtu(h, new_mtu);
1598 netdev_err(netdev, "failed to change MTU in hardware %d\n",
1601 netdev->mtu = new_mtu;
1606 static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
1608 struct hns3_nic_priv *priv = netdev_priv(ndev);
1609 struct hnae3_handle *h = hns3_get_handle(ndev);
1610 struct hns3_enet_ring *tx_ring = NULL;
1611 struct napi_struct *napi;
1612 int timeout_queue = 0;
1613 int hw_head, hw_tail;
1614 int fbd_num, fbd_oft;
1615 int ebd_num, ebd_oft;
1620 /* Find the stopped queue the same way the stack does */
1621 for (i = 0; i < ndev->num_tx_queues; i++) {
1622 struct netdev_queue *q;
1623 unsigned long trans_start;
1625 q = netdev_get_tx_queue(ndev, i);
1626 trans_start = q->trans_start;
1627 if (netif_xmit_stopped(q) &&
1629 (trans_start + ndev->watchdog_timeo))) {
1635 if (i == ndev->num_tx_queues) {
1637 "no netdev TX timeout queue found, timeout count: %llu\n",
1638 priv->tx_timeout_count);
1642 priv->tx_timeout_count++;
1644 tx_ring = priv->ring_data[timeout_queue].ring;
1645 napi = &tx_ring->tqp_vector->napi;
1648 "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n",
1649 priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use,
1650 tx_ring->next_to_clean, napi->state);
1653 "tx_pkts: %llu, tx_bytes: %llu, io_err_cnt: %llu, sw_err_cnt: %llu\n",
1654 tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes,
1655 tx_ring->stats.io_err_cnt, tx_ring->stats.sw_err_cnt);
1658 "seg_pkt_cnt: %llu, tx_err_cnt: %llu, restart_queue: %llu, tx_busy: %llu\n",
1659 tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_err_cnt,
1660 tx_ring->stats.restart_queue, tx_ring->stats.tx_busy);
1662 /* When mac received many pause frames continuous, it's unable to send
1663 * packets, which may cause tx timeout
1665 if (h->ae_algo->ops->update_stats &&
1666 h->ae_algo->ops->get_mac_pause_stats) {
1667 u64 tx_pause_cnt, rx_pause_cnt;
1669 h->ae_algo->ops->update_stats(h, &ndev->stats);
1670 h->ae_algo->ops->get_mac_pause_stats(h, &tx_pause_cnt,
1672 netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n",
1673 tx_pause_cnt, rx_pause_cnt);
1676 hw_head = readl_relaxed(tx_ring->tqp->io_base +
1677 HNS3_RING_TX_RING_HEAD_REG);
1678 hw_tail = readl_relaxed(tx_ring->tqp->io_base +
1679 HNS3_RING_TX_RING_TAIL_REG);
1680 fbd_num = readl_relaxed(tx_ring->tqp->io_base +
1681 HNS3_RING_TX_RING_FBDNUM_REG);
1682 fbd_oft = readl_relaxed(tx_ring->tqp->io_base +
1683 HNS3_RING_TX_RING_OFFSET_REG);
1684 ebd_num = readl_relaxed(tx_ring->tqp->io_base +
1685 HNS3_RING_TX_RING_EBDNUM_REG);
1686 ebd_oft = readl_relaxed(tx_ring->tqp->io_base +
1687 HNS3_RING_TX_RING_EBD_OFFSET_REG);
1688 bd_num = readl_relaxed(tx_ring->tqp->io_base +
1689 HNS3_RING_TX_RING_BD_NUM_REG);
1690 bd_err = readl_relaxed(tx_ring->tqp->io_base +
1691 HNS3_RING_TX_RING_BD_ERR_REG);
1692 ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG);
1693 tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG);
1696 "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n",
1697 bd_num, hw_head, hw_tail, bd_err,
1698 readl(tx_ring->tqp_vector->mask_addr));
1700 "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n",
1701 ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft);
1706 static void hns3_nic_net_timeout(struct net_device *ndev)
1708 struct hns3_nic_priv *priv = netdev_priv(ndev);
1709 struct hnae3_handle *h = priv->ae_handle;
1711 if (!hns3_get_tx_timeo_queue_info(ndev))
1714 /* request the reset, and let the hclge to determine
1715 * which reset level should be done
1717 if (h->ae_algo->ops->reset_event)
1718 h->ae_algo->ops->reset_event(h->pdev, h);
1721 #ifdef CONFIG_RFS_ACCEL
1722 static int hns3_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
1723 u16 rxq_index, u32 flow_id)
1725 struct hnae3_handle *h = hns3_get_handle(dev);
1726 struct flow_keys fkeys;
1728 if (!h->ae_algo->ops->add_arfs_entry)
1731 if (skb->encapsulation)
1732 return -EPROTONOSUPPORT;
1734 if (!skb_flow_dissect_flow_keys(skb, &fkeys, 0))
1735 return -EPROTONOSUPPORT;
1737 if ((fkeys.basic.n_proto != htons(ETH_P_IP) &&
1738 fkeys.basic.n_proto != htons(ETH_P_IPV6)) ||
1739 (fkeys.basic.ip_proto != IPPROTO_TCP &&
1740 fkeys.basic.ip_proto != IPPROTO_UDP))
1741 return -EPROTONOSUPPORT;
1743 return h->ae_algo->ops->add_arfs_entry(h, rxq_index, flow_id, &fkeys);
1747 static const struct net_device_ops hns3_nic_netdev_ops = {
1748 .ndo_open = hns3_nic_net_open,
1749 .ndo_stop = hns3_nic_net_stop,
1750 .ndo_start_xmit = hns3_nic_net_xmit,
1751 .ndo_tx_timeout = hns3_nic_net_timeout,
1752 .ndo_set_mac_address = hns3_nic_net_set_mac_address,
1753 .ndo_do_ioctl = hns3_nic_do_ioctl,
1754 .ndo_change_mtu = hns3_nic_change_mtu,
1755 .ndo_set_features = hns3_nic_set_features,
1756 .ndo_get_stats64 = hns3_nic_get_stats64,
1757 .ndo_setup_tc = hns3_nic_setup_tc,
1758 .ndo_set_rx_mode = hns3_nic_set_rx_mode,
1759 .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
1760 .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
1761 .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
1762 #ifdef CONFIG_RFS_ACCEL
1763 .ndo_rx_flow_steer = hns3_rx_flow_steer,
1768 bool hns3_is_phys_func(struct pci_dev *pdev)
1770 u32 dev_id = pdev->device;
1773 case HNAE3_DEV_ID_GE:
1774 case HNAE3_DEV_ID_25GE:
1775 case HNAE3_DEV_ID_25GE_RDMA:
1776 case HNAE3_DEV_ID_25GE_RDMA_MACSEC:
1777 case HNAE3_DEV_ID_50GE_RDMA:
1778 case HNAE3_DEV_ID_50GE_RDMA_MACSEC:
1779 case HNAE3_DEV_ID_100G_RDMA_MACSEC:
1781 case HNAE3_DEV_ID_100G_VF:
1782 case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF:
1785 dev_warn(&pdev->dev, "un-recognized pci device-id %d",
1792 static void hns3_disable_sriov(struct pci_dev *pdev)
1794 /* If our VFs are assigned we cannot shut down SR-IOV
1795 * without causing issues, so just leave the hardware
1796 * available but disabled
1798 if (pci_vfs_assigned(pdev)) {
1799 dev_warn(&pdev->dev,
1800 "disabling driver while VFs are assigned\n");
1804 pci_disable_sriov(pdev);
1807 static void hns3_get_dev_capability(struct pci_dev *pdev,
1808 struct hnae3_ae_dev *ae_dev)
1810 if (pdev->revision >= 0x21) {
1811 hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1);
1812 hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B, 1);
1816 /* hns3_probe - Device initialization routine
1817 * @pdev: PCI device information struct
1818 * @ent: entry in hns3_pci_tbl
1820 * hns3_probe initializes a PF identified by a pci_dev structure.
1821 * The OS initialization, configuring of the PF private structure,
1822 * and a hardware reset occur.
1824 * Returns 0 on success, negative on failure
1826 static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1828 struct hnae3_ae_dev *ae_dev;
1831 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
1838 ae_dev->pdev = pdev;
1839 ae_dev->flag = ent->driver_data;
1840 ae_dev->dev_type = HNAE3_DEV_KNIC;
1841 ae_dev->reset_type = HNAE3_NONE_RESET;
1842 hns3_get_dev_capability(pdev, ae_dev);
1843 pci_set_drvdata(pdev, ae_dev);
1845 ret = hnae3_register_ae_dev(ae_dev);
1847 devm_kfree(&pdev->dev, ae_dev);
1848 pci_set_drvdata(pdev, NULL);
1854 /* hns3_remove - Device removal routine
1855 * @pdev: PCI device information struct
1857 static void hns3_remove(struct pci_dev *pdev)
1859 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1861 if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))
1862 hns3_disable_sriov(pdev);
1864 hnae3_unregister_ae_dev(ae_dev);
1865 pci_set_drvdata(pdev, NULL);
1869 * hns3_pci_sriov_configure
1870 * @pdev: pointer to a pci_dev structure
1871 * @num_vfs: number of VFs to allocate
1873 * Enable or change the number of VFs. Called when the user updates the number
1876 static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1880 if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) {
1881 dev_warn(&pdev->dev, "Can not config SRIOV\n");
1886 ret = pci_enable_sriov(pdev, num_vfs);
1888 dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret);
1891 } else if (!pci_vfs_assigned(pdev)) {
1892 pci_disable_sriov(pdev);
1894 dev_warn(&pdev->dev,
1895 "Unable to free VFs because some are assigned to VMs.\n");
1901 static void hns3_shutdown(struct pci_dev *pdev)
1903 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1905 hnae3_unregister_ae_dev(ae_dev);
1906 devm_kfree(&pdev->dev, ae_dev);
1907 pci_set_drvdata(pdev, NULL);
1909 if (system_state == SYSTEM_POWER_OFF)
1910 pci_set_power_state(pdev, PCI_D3hot);
1913 static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
1914 pci_channel_state_t state)
1916 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1917 pci_ers_result_t ret;
1919 dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state);
1921 if (state == pci_channel_io_perm_failure)
1922 return PCI_ERS_RESULT_DISCONNECT;
1924 if (!ae_dev || !ae_dev->ops) {
1926 "Can't recover - error happened before device initialized\n");
1927 return PCI_ERS_RESULT_NONE;
1930 if (ae_dev->ops->handle_hw_ras_error)
1931 ret = ae_dev->ops->handle_hw_ras_error(ae_dev);
1933 return PCI_ERS_RESULT_NONE;
1938 static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
1940 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1941 struct device *dev = &pdev->dev;
1943 dev_info(dev, "requesting reset due to PCI error\n");
1945 if (!ae_dev || !ae_dev->ops)
1946 return PCI_ERS_RESULT_NONE;
1948 /* request the reset */
1949 if (ae_dev->ops->reset_event) {
1950 if (!ae_dev->override_pci_need_reset)
1951 ae_dev->ops->reset_event(pdev, NULL);
1953 return PCI_ERS_RESULT_RECOVERED;
1956 return PCI_ERS_RESULT_DISCONNECT;
1959 static void hns3_reset_prepare(struct pci_dev *pdev)
1961 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1963 dev_info(&pdev->dev, "hns3 flr prepare\n");
1964 if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare)
1965 ae_dev->ops->flr_prepare(ae_dev);
1968 static void hns3_reset_done(struct pci_dev *pdev)
1970 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1972 dev_info(&pdev->dev, "hns3 flr done\n");
1973 if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done)
1974 ae_dev->ops->flr_done(ae_dev);
1977 static const struct pci_error_handlers hns3_err_handler = {
1978 .error_detected = hns3_error_detected,
1979 .slot_reset = hns3_slot_reset,
1980 .reset_prepare = hns3_reset_prepare,
1981 .reset_done = hns3_reset_done,
1984 static struct pci_driver hns3_driver = {
1985 .name = hns3_driver_name,
1986 .id_table = hns3_pci_tbl,
1987 .probe = hns3_probe,
1988 .remove = hns3_remove,
1989 .shutdown = hns3_shutdown,
1990 .sriov_configure = hns3_pci_sriov_configure,
1991 .err_handler = &hns3_err_handler,
1994 /* set default feature to hns3 */
1995 static void hns3_set_default_feature(struct net_device *netdev)
1997 struct hnae3_handle *h = hns3_get_handle(netdev);
1998 struct pci_dev *pdev = h->pdev;
2000 netdev->priv_flags |= IFF_UNICAST_FLT;
2002 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2003 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
2004 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
2005 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
2006 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
2008 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
2010 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
2012 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2013 NETIF_F_HW_VLAN_CTAG_FILTER |
2014 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2015 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
2016 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
2017 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
2018 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
2020 netdev->vlan_features |=
2021 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2022 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
2023 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
2024 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
2025 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
2027 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2028 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2029 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
2030 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
2031 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
2032 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
2034 if (pdev->revision >= 0x21) {
2035 netdev->hw_features |= NETIF_F_GRO_HW;
2036 netdev->features |= NETIF_F_GRO_HW;
2038 if (!(h->flags & HNAE3_SUPPORT_VF)) {
2039 netdev->hw_features |= NETIF_F_NTUPLE;
2040 netdev->features |= NETIF_F_NTUPLE;
2045 static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
2046 struct hns3_desc_cb *cb)
2048 unsigned int order = hnae3_page_order(ring);
2051 p = dev_alloc_pages(order);
2056 cb->page_offset = 0;
2058 cb->buf = page_address(p);
2059 cb->length = hnae3_page_size(ring);
2060 cb->type = DESC_TYPE_PAGE;
2065 static void hns3_free_buffer(struct hns3_enet_ring *ring,
2066 struct hns3_desc_cb *cb)
2068 if (cb->type == DESC_TYPE_SKB)
2069 dev_kfree_skb_any((struct sk_buff *)cb->priv);
2070 else if (!HNAE3_IS_TX_RING(ring))
2071 put_page((struct page *)cb->priv);
2072 memset(cb, 0, sizeof(*cb));
2075 static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
2077 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
2078 cb->length, ring_to_dma_dir(ring));
2080 if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma)))
2086 static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
2087 struct hns3_desc_cb *cb)
2089 if (cb->type == DESC_TYPE_SKB)
2090 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
2091 ring_to_dma_dir(ring));
2092 else if (cb->length)
2093 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
2094 ring_to_dma_dir(ring));
2097 static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
2099 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
2100 ring->desc[i].addr = 0;
2103 static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
2105 struct hns3_desc_cb *cb = &ring->desc_cb[i];
2107 if (!ring->desc_cb[i].dma)
2110 hns3_buffer_detach(ring, i);
2111 hns3_free_buffer(ring, cb);
2114 static void hns3_free_buffers(struct hns3_enet_ring *ring)
2118 for (i = 0; i < ring->desc_num; i++)
2119 hns3_free_buffer_detach(ring, i);
2122 /* free desc along with its attached buffer */
2123 static void hns3_free_desc(struct hns3_enet_ring *ring)
2125 int size = ring->desc_num * sizeof(ring->desc[0]);
2127 hns3_free_buffers(ring);
2130 dma_free_coherent(ring_to_dev(ring), size,
2131 ring->desc, ring->desc_dma_addr);
2136 static int hns3_alloc_desc(struct hns3_enet_ring *ring)
2138 int size = ring->desc_num * sizeof(ring->desc[0]);
2140 ring->desc = dma_alloc_coherent(ring_to_dev(ring), size,
2141 &ring->desc_dma_addr, GFP_KERNEL);
2148 static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
2149 struct hns3_desc_cb *cb)
2153 ret = hns3_alloc_buffer(ring, cb);
2157 ret = hns3_map_buffer(ring, cb);
2164 hns3_free_buffer(ring, cb);
2169 static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i)
2171 int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]);
2176 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
2181 /* Allocate memory for raw pkg, and map with dma */
2182 static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
2186 for (i = 0; i < ring->desc_num; i++) {
2187 ret = hns3_alloc_buffer_attach(ring, i);
2189 goto out_buffer_fail;
2195 for (j = i - 1; j >= 0; j--)
2196 hns3_free_buffer_detach(ring, j);
2200 /* detach a in-used buffer and replace with a reserved one */
2201 static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
2202 struct hns3_desc_cb *res_cb)
2204 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
2205 ring->desc_cb[i] = *res_cb;
2206 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
2207 ring->desc[i].rx.bd_base_info = 0;
2210 static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
2212 ring->desc_cb[i].reuse_flag = 0;
2213 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
2214 + ring->desc_cb[i].page_offset);
2215 ring->desc[i].rx.bd_base_info = 0;
2218 static void hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, int head,
2219 int *bytes, int *pkts)
2221 int ntc = ring->next_to_clean;
2222 struct hns3_desc_cb *desc_cb;
2224 while (head != ntc) {
2225 desc_cb = &ring->desc_cb[ntc];
2226 (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
2227 (*bytes) += desc_cb->length;
2228 /* desc_cb will be cleaned, after hnae3_free_buffer_detach */
2229 hns3_free_buffer_detach(ring, ntc);
2231 if (++ntc == ring->desc_num)
2234 /* Issue prefetch for next Tx descriptor */
2235 prefetch(&ring->desc_cb[ntc]);
2238 /* This smp_store_release() pairs with smp_load_acquire() in
2239 * ring_space called by hns3_nic_net_xmit.
2241 smp_store_release(&ring->next_to_clean, ntc);
2244 static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
2246 int u = ring->next_to_use;
2247 int c = ring->next_to_clean;
2249 if (unlikely(h > ring->desc_num))
2252 return u > c ? (h > c && h <= u) : (h > c || h <= u);
2255 void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
2257 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2258 struct hns3_nic_priv *priv = netdev_priv(netdev);
2259 struct netdev_queue *dev_queue;
2263 head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
2264 rmb(); /* Make sure head is ready before touch any data */
2266 if (is_ring_empty(ring) || head == ring->next_to_clean)
2267 return; /* no data to poll */
2269 if (unlikely(!is_valid_clean_head(ring, head))) {
2270 netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
2271 ring->next_to_use, ring->next_to_clean);
2273 u64_stats_update_begin(&ring->syncp);
2274 ring->stats.io_err_cnt++;
2275 u64_stats_update_end(&ring->syncp);
2281 hns3_nic_reclaim_desc(ring, head, &bytes, &pkts);
2283 ring->tqp_vector->tx_group.total_bytes += bytes;
2284 ring->tqp_vector->tx_group.total_packets += pkts;
2286 u64_stats_update_begin(&ring->syncp);
2287 ring->stats.tx_bytes += bytes;
2288 ring->stats.tx_pkts += pkts;
2289 u64_stats_update_end(&ring->syncp);
2291 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
2292 netdev_tx_completed_queue(dev_queue, pkts, bytes);
2294 if (unlikely(pkts && netif_carrier_ok(netdev) &&
2295 (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
2296 /* Make sure that anybody stopping the queue after this
2297 * sees the new next_to_clean.
2300 if (netif_tx_queue_stopped(dev_queue) &&
2301 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
2302 netif_tx_wake_queue(dev_queue);
2303 ring->stats.restart_queue++;
2308 static int hns3_desc_unused(struct hns3_enet_ring *ring)
2310 int ntc = ring->next_to_clean;
2311 int ntu = ring->next_to_use;
2313 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
2317 hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
2319 struct hns3_desc_cb *desc_cb;
2320 struct hns3_desc_cb res_cbs;
2323 for (i = 0; i < cleand_count; i++) {
2324 desc_cb = &ring->desc_cb[ring->next_to_use];
2325 if (desc_cb->reuse_flag) {
2326 u64_stats_update_begin(&ring->syncp);
2327 ring->stats.reuse_pg_cnt++;
2328 u64_stats_update_end(&ring->syncp);
2330 hns3_reuse_buffer(ring, ring->next_to_use);
2332 ret = hns3_reserve_buffer_map(ring, &res_cbs);
2334 u64_stats_update_begin(&ring->syncp);
2335 ring->stats.sw_err_cnt++;
2336 u64_stats_update_end(&ring->syncp);
2338 netdev_err(ring->tqp->handle->kinfo.netdev,
2339 "hnae reserve buffer map failed.\n");
2342 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
2344 u64_stats_update_begin(&ring->syncp);
2345 ring->stats.non_reuse_pg++;
2346 u64_stats_update_end(&ring->syncp);
2349 ring_ptr_move_fw(ring, next_to_use);
2352 wmb(); /* Make all data has been write before submit */
2353 writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
2356 static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
2357 struct hns3_enet_ring *ring, int pull_len,
2358 struct hns3_desc_cb *desc_cb)
2360 struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
2361 int size = le16_to_cpu(desc->rx.size);
2362 u32 truesize = hnae3_buf_size(ring);
2364 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
2365 size - pull_len, truesize);
2367 /* Avoid re-using remote pages, or the stack is still using the page
2368 * when page_offset rollback to zero, flag default unreuse
2370 if (unlikely(page_to_nid(desc_cb->priv) != numa_mem_id()) ||
2371 (!desc_cb->page_offset && page_count(desc_cb->priv) > 1))
2374 /* Move offset up to the next cache line */
2375 desc_cb->page_offset += truesize;
2377 if (desc_cb->page_offset + truesize <= hnae3_page_size(ring)) {
2378 desc_cb->reuse_flag = 1;
2379 /* Bump ref count on page before it is given*/
2380 get_page(desc_cb->priv);
2381 } else if (page_count(desc_cb->priv) == 1) {
2382 desc_cb->reuse_flag = 1;
2383 desc_cb->page_offset = 0;
2384 get_page(desc_cb->priv);
2388 static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
2390 __be16 type = skb->protocol;
2394 while (eth_type_vlan(type)) {
2395 struct vlan_hdr *vh;
2397 if ((depth + VLAN_HLEN) > skb_headlen(skb))
2400 vh = (struct vlan_hdr *)(skb->data + depth);
2401 type = vh->h_vlan_encapsulated_proto;
2405 skb_set_network_header(skb, depth);
2407 if (type == htons(ETH_P_IP)) {
2408 const struct iphdr *iph = ip_hdr(skb);
2410 depth += sizeof(struct iphdr);
2411 skb_set_transport_header(skb, depth);
2413 th->check = ~tcp_v4_check(skb->len - depth, iph->saddr,
2415 } else if (type == htons(ETH_P_IPV6)) {
2416 const struct ipv6hdr *iph = ipv6_hdr(skb);
2418 depth += sizeof(struct ipv6hdr);
2419 skb_set_transport_header(skb, depth);
2421 th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr,
2424 netdev_err(skb->dev,
2425 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
2426 be16_to_cpu(type), depth);
2430 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
2432 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
2434 if (l234info & BIT(HNS3_RXD_GRO_FIXID_B))
2435 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
2437 skb->csum_start = (unsigned char *)th - skb->head;
2438 skb->csum_offset = offsetof(struct tcphdr, check);
2439 skb->ip_summed = CHECKSUM_PARTIAL;
2443 static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
2444 u32 l234info, u32 bd_base_info, u32 ol_info)
2446 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2447 int l3_type, l4_type;
2450 skb->ip_summed = CHECKSUM_NONE;
2452 skb_checksum_none_assert(skb);
2454 if (!(netdev->features & NETIF_F_RXCSUM))
2457 /* check if hardware has done checksum */
2458 if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
2461 if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) |
2462 BIT(HNS3_RXD_OL3E_B) |
2463 BIT(HNS3_RXD_OL4E_B)))) {
2464 u64_stats_update_begin(&ring->syncp);
2465 ring->stats.l3l4_csum_err++;
2466 u64_stats_update_end(&ring->syncp);
2471 ol4_type = hnae3_get_field(ol_info, HNS3_RXD_OL4ID_M,
2474 case HNS3_OL4_TYPE_MAC_IN_UDP:
2475 case HNS3_OL4_TYPE_NVGRE:
2476 skb->csum_level = 1;
2478 case HNS3_OL4_TYPE_NO_TUN:
2479 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
2481 l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
2484 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
2485 if ((l3_type == HNS3_L3_TYPE_IPV4 ||
2486 l3_type == HNS3_L3_TYPE_IPV6) &&
2487 (l4_type == HNS3_L4_TYPE_UDP ||
2488 l4_type == HNS3_L4_TYPE_TCP ||
2489 l4_type == HNS3_L4_TYPE_SCTP))
2490 skb->ip_summed = CHECKSUM_UNNECESSARY;
2497 static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
2499 if (skb_has_frag_list(skb))
2500 napi_gro_flush(&ring->tqp_vector->napi, false);
2502 napi_gro_receive(&ring->tqp_vector->napi, skb);
2505 static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
2506 struct hns3_desc *desc, u32 l234info,
2509 struct hnae3_handle *handle = ring->tqp->handle;
2510 struct pci_dev *pdev = ring->tqp->handle->pdev;
2512 if (pdev->revision == 0x20) {
2513 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2514 if (!(*vlan_tag & VLAN_VID_MASK))
2515 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2517 return (*vlan_tag != 0);
2520 #define HNS3_STRP_OUTER_VLAN 0x1
2521 #define HNS3_STRP_INNER_VLAN 0x2
2522 #define HNS3_STRP_BOTH 0x3
2524 /* Hardware always insert VLAN tag into RX descriptor when
2525 * remove the tag from packet, driver needs to determine
2526 * reporting which tag to stack.
2528 switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
2529 HNS3_RXD_STRP_TAGP_S)) {
2530 case HNS3_STRP_OUTER_VLAN:
2531 if (handle->port_base_vlan_state !=
2532 HNAE3_PORT_BASE_VLAN_DISABLE)
2535 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2537 case HNS3_STRP_INNER_VLAN:
2538 if (handle->port_base_vlan_state !=
2539 HNAE3_PORT_BASE_VLAN_DISABLE)
2542 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2544 case HNS3_STRP_BOTH:
2545 if (handle->port_base_vlan_state ==
2546 HNAE3_PORT_BASE_VLAN_DISABLE)
2547 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2549 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2557 static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length,
2560 #define HNS3_NEED_ADD_FRAG 1
2561 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
2562 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2563 struct sk_buff *skb;
2565 ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE);
2567 if (unlikely(!skb)) {
2568 netdev_err(netdev, "alloc rx skb fail\n");
2570 u64_stats_update_begin(&ring->syncp);
2571 ring->stats.sw_err_cnt++;
2572 u64_stats_update_end(&ring->syncp);
2577 prefetchw(skb->data);
2579 ring->pending_buf = 1;
2581 ring->tail_skb = NULL;
2582 if (length <= HNS3_RX_HEAD_SIZE) {
2583 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
2585 /* We can reuse buffer as-is, just make sure it is local */
2586 if (likely(page_to_nid(desc_cb->priv) == numa_mem_id()))
2587 desc_cb->reuse_flag = 1;
2588 else /* This page cannot be reused so discard it */
2589 put_page(desc_cb->priv);
2591 ring_ptr_move_fw(ring, next_to_clean);
2594 u64_stats_update_begin(&ring->syncp);
2595 ring->stats.seg_pkt_cnt++;
2596 u64_stats_update_end(&ring->syncp);
2598 ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE);
2599 __skb_put(skb, ring->pull_len);
2600 hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len,
2602 ring_ptr_move_fw(ring, next_to_clean);
2604 return HNS3_NEED_ADD_FRAG;
2607 static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
2608 struct sk_buff **out_skb, bool pending)
2610 struct sk_buff *skb = *out_skb;
2611 struct sk_buff *head_skb = *out_skb;
2612 struct sk_buff *new_skb;
2613 struct hns3_desc_cb *desc_cb;
2614 struct hns3_desc *pre_desc;
2618 /* if there is pending bd, the SW param next_to_clean has moved
2619 * to next and the next is NULL
2622 pre_bd = (ring->next_to_clean - 1 + ring->desc_num) %
2624 pre_desc = &ring->desc[pre_bd];
2625 bd_base_info = le32_to_cpu(pre_desc->rx.bd_base_info);
2627 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2630 while (!(bd_base_info & BIT(HNS3_RXD_FE_B))) {
2631 desc = &ring->desc[ring->next_to_clean];
2632 desc_cb = &ring->desc_cb[ring->next_to_clean];
2633 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2634 /* make sure HW write desc complete */
2636 if (!(bd_base_info & BIT(HNS3_RXD_VLD_B)))
2639 if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) {
2640 new_skb = napi_alloc_skb(&ring->tqp_vector->napi,
2642 if (unlikely(!new_skb)) {
2643 netdev_err(ring->tqp->handle->kinfo.netdev,
2644 "alloc rx skb frag fail\n");
2649 if (ring->tail_skb) {
2650 ring->tail_skb->next = new_skb;
2651 ring->tail_skb = new_skb;
2653 skb_shinfo(skb)->frag_list = new_skb;
2654 ring->tail_skb = new_skb;
2658 if (ring->tail_skb) {
2659 head_skb->truesize += hnae3_buf_size(ring);
2660 head_skb->data_len += le16_to_cpu(desc->rx.size);
2661 head_skb->len += le16_to_cpu(desc->rx.size);
2662 skb = ring->tail_skb;
2665 hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
2666 ring_ptr_move_fw(ring, next_to_clean);
2667 ring->pending_buf++;
2673 static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
2674 struct sk_buff *skb, u32 l234info,
2675 u32 bd_base_info, u32 ol_info)
2679 skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
2680 HNS3_RXD_GRO_SIZE_M,
2681 HNS3_RXD_GRO_SIZE_S);
2682 /* if there is no HW GRO, do not set gro params */
2683 if (!skb_shinfo(skb)->gso_size) {
2684 hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info);
2688 NAPI_GRO_CB(skb)->count = hnae3_get_field(l234info,
2689 HNS3_RXD_GRO_COUNT_M,
2690 HNS3_RXD_GRO_COUNT_S);
2692 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
2694 if (l3_type == HNS3_L3_TYPE_IPV4)
2695 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2696 else if (l3_type == HNS3_L3_TYPE_IPV6)
2697 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
2701 return hns3_gro_complete(skb, l234info);
2704 static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
2705 struct sk_buff *skb, u32 rss_hash)
2707 struct hnae3_handle *handle = ring->tqp->handle;
2708 enum pkt_hash_types rss_type;
2711 rss_type = handle->kinfo.rss_type;
2713 rss_type = PKT_HASH_TYPE_NONE;
2715 skb_set_hash(skb, rss_hash, rss_type);
2718 static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
2720 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2721 enum hns3_pkt_l2t_type l2_frame_type;
2722 u32 bd_base_info, l234info, ol_info;
2723 struct hns3_desc *desc;
2727 /* bdinfo handled below is only valid on the last BD of the
2728 * current packet, and ring->next_to_clean indicates the first
2729 * descriptor of next packet, so need - 1 below.
2731 pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) :
2732 (ring->desc_num - 1);
2733 desc = &ring->desc[pre_ntc];
2734 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2735 l234info = le32_to_cpu(desc->rx.l234_info);
2736 ol_info = le32_to_cpu(desc->rx.ol_info);
2738 /* Based on hw strategy, the tag offloaded will be stored at
2739 * ot_vlan_tag in two layer tag case, and stored at vlan_tag
2740 * in one layer tag case.
2742 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
2745 if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
2746 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2750 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) {
2751 u64_stats_update_begin(&ring->syncp);
2752 ring->stats.non_vld_descs++;
2753 u64_stats_update_end(&ring->syncp);
2758 if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
2759 BIT(HNS3_RXD_L2E_B))))) {
2760 u64_stats_update_begin(&ring->syncp);
2761 if (l234info & BIT(HNS3_RXD_L2E_B))
2762 ring->stats.l2_err++;
2764 ring->stats.err_pkt_len++;
2765 u64_stats_update_end(&ring->syncp);
2772 /* Do update ip stack process */
2773 skb->protocol = eth_type_trans(skb, netdev);
2775 /* This is needed in order to enable forwarding support */
2776 ret = hns3_set_gro_and_checksum(ring, skb, l234info,
2777 bd_base_info, ol_info);
2778 if (unlikely(ret)) {
2779 u64_stats_update_begin(&ring->syncp);
2780 ring->stats.rx_err_cnt++;
2781 u64_stats_update_end(&ring->syncp);
2785 l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M,
2788 u64_stats_update_begin(&ring->syncp);
2789 ring->stats.rx_pkts++;
2790 ring->stats.rx_bytes += len;
2792 if (l2_frame_type == HNS3_L2_TYPE_MULTICAST)
2793 ring->stats.rx_multicast++;
2795 u64_stats_update_end(&ring->syncp);
2797 ring->tqp_vector->rx_group.total_bytes += len;
2799 hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash));
2803 static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
2804 struct sk_buff **out_skb)
2806 struct sk_buff *skb = ring->skb;
2807 struct hns3_desc_cb *desc_cb;
2808 struct hns3_desc *desc;
2813 desc = &ring->desc[ring->next_to_clean];
2814 desc_cb = &ring->desc_cb[ring->next_to_clean];
2818 length = le16_to_cpu(desc->rx.size);
2819 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2821 /* Check valid BD */
2822 if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
2826 ring->va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
2828 /* Prefetch first cache line of first page
2829 * Idea is to cache few bytes of the header of the packet. Our L1 Cache
2830 * line size is 64B so need to prefetch twice to make it 128B. But in
2831 * actual we can have greater size of caches with 128B Level 1 cache
2832 * lines. In such a case, single fetch would suffice to cache in the
2833 * relevant part of the header.
2836 #if L1_CACHE_BYTES < 128
2837 prefetch(ring->va + L1_CACHE_BYTES);
2841 ret = hns3_alloc_skb(ring, length, ring->va);
2842 *out_skb = skb = ring->skb;
2844 if (ret < 0) /* alloc buffer fail */
2846 if (ret > 0) { /* need add frag */
2847 ret = hns3_add_frag(ring, desc, &skb, false);
2851 /* As the head data may be changed when GRO enable, copy
2852 * the head data in after other data rx completed
2854 memcpy(skb->data, ring->va,
2855 ALIGN(ring->pull_len, sizeof(long)));
2858 ret = hns3_add_frag(ring, desc, &skb, true);
2862 /* As the head data may be changed when GRO enable, copy
2863 * the head data in after other data rx completed
2865 memcpy(skb->data, ring->va,
2866 ALIGN(ring->pull_len, sizeof(long)));
2869 ret = hns3_handle_bdinfo(ring, skb);
2870 if (unlikely(ret)) {
2871 dev_kfree_skb_any(skb);
2875 skb_record_rx_queue(skb, ring->tqp->tqp_index);
2881 int hns3_clean_rx_ring(
2882 struct hns3_enet_ring *ring, int budget,
2883 void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
2885 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2886 int recv_pkts, recv_bds, clean_count, err;
2887 int unused_count = hns3_desc_unused(ring);
2888 struct sk_buff *skb = ring->skb;
2891 num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
2892 rmb(); /* Make sure num taken effect before the other data is touched */
2894 recv_pkts = 0, recv_bds = 0, clean_count = 0;
2895 num -= unused_count;
2896 unused_count -= ring->pending_buf;
2898 while (recv_pkts < budget && recv_bds < num) {
2899 /* Reuse or realloc buffers */
2900 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
2901 hns3_nic_alloc_rx_buffers(ring,
2902 clean_count + unused_count);
2904 unused_count = hns3_desc_unused(ring) -
2909 err = hns3_handle_rx_bd(ring, &skb);
2910 if (unlikely(!skb)) /* This fault cannot be repaired */
2913 if (err == -ENXIO) { /* Do not get FE for the packet */
2915 } else if (unlikely(err)) { /* Do jump the err */
2916 recv_bds += ring->pending_buf;
2917 clean_count += ring->pending_buf;
2919 ring->pending_buf = 0;
2924 recv_bds += ring->pending_buf;
2925 clean_count += ring->pending_buf;
2927 ring->pending_buf = 0;
2933 /* Make all data has been write before submit */
2934 if (clean_count + unused_count > 0)
2935 hns3_nic_alloc_rx_buffers(ring,
2936 clean_count + unused_count);
2941 static bool hns3_get_new_flow_lvl(struct hns3_enet_ring_group *ring_group)
2943 #define HNS3_RX_LOW_BYTE_RATE 10000
2944 #define HNS3_RX_MID_BYTE_RATE 20000
2945 #define HNS3_RX_ULTRA_PACKET_RATE 40
2947 enum hns3_flow_level_range new_flow_level;
2948 struct hns3_enet_tqp_vector *tqp_vector;
2949 int packets_per_msecs, bytes_per_msecs;
2952 tqp_vector = ring_group->ring->tqp_vector;
2954 jiffies_to_msecs(jiffies - tqp_vector->last_jiffies);
2955 if (!time_passed_ms)
2958 do_div(ring_group->total_packets, time_passed_ms);
2959 packets_per_msecs = ring_group->total_packets;
2961 do_div(ring_group->total_bytes, time_passed_ms);
2962 bytes_per_msecs = ring_group->total_bytes;
2964 new_flow_level = ring_group->coal.flow_level;
2966 /* Simple throttlerate management
2967 * 0-10MB/s lower (50000 ints/s)
2968 * 10-20MB/s middle (20000 ints/s)
2969 * 20-1249MB/s high (18000 ints/s)
2970 * > 40000pps ultra (8000 ints/s)
2972 switch (new_flow_level) {
2974 if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE)
2975 new_flow_level = HNS3_FLOW_MID;
2978 if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE)
2979 new_flow_level = HNS3_FLOW_HIGH;
2980 else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE)
2981 new_flow_level = HNS3_FLOW_LOW;
2983 case HNS3_FLOW_HIGH:
2984 case HNS3_FLOW_ULTRA:
2986 if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE)
2987 new_flow_level = HNS3_FLOW_MID;
2991 if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE &&
2992 &tqp_vector->rx_group == ring_group)
2993 new_flow_level = HNS3_FLOW_ULTRA;
2995 ring_group->total_bytes = 0;
2996 ring_group->total_packets = 0;
2997 ring_group->coal.flow_level = new_flow_level;
3002 static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
3004 struct hns3_enet_tqp_vector *tqp_vector;
3007 if (!ring_group->ring)
3010 tqp_vector = ring_group->ring->tqp_vector;
3011 if (!tqp_vector->last_jiffies)
3014 if (ring_group->total_packets == 0) {
3015 ring_group->coal.int_gl = HNS3_INT_GL_50K;
3016 ring_group->coal.flow_level = HNS3_FLOW_LOW;
3020 if (!hns3_get_new_flow_lvl(ring_group))
3023 new_int_gl = ring_group->coal.int_gl;
3024 switch (ring_group->coal.flow_level) {
3026 new_int_gl = HNS3_INT_GL_50K;
3029 new_int_gl = HNS3_INT_GL_20K;
3031 case HNS3_FLOW_HIGH:
3032 new_int_gl = HNS3_INT_GL_18K;
3034 case HNS3_FLOW_ULTRA:
3035 new_int_gl = HNS3_INT_GL_8K;
3041 if (new_int_gl != ring_group->coal.int_gl) {
3042 ring_group->coal.int_gl = new_int_gl;
3048 static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
3050 struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
3051 struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
3052 bool rx_update, tx_update;
3054 /* update param every 1000ms */
3055 if (time_before(jiffies,
3056 tqp_vector->last_jiffies + msecs_to_jiffies(1000)))
3059 if (rx_group->coal.gl_adapt_enable) {
3060 rx_update = hns3_get_new_int_gl(rx_group);
3062 hns3_set_vector_coalesce_rx_gl(tqp_vector,
3063 rx_group->coal.int_gl);
3066 if (tx_group->coal.gl_adapt_enable) {
3067 tx_update = hns3_get_new_int_gl(tx_group);
3069 hns3_set_vector_coalesce_tx_gl(tqp_vector,
3070 tx_group->coal.int_gl);
3073 tqp_vector->last_jiffies = jiffies;
3076 static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
3078 struct hns3_nic_priv *priv = netdev_priv(napi->dev);
3079 struct hns3_enet_ring *ring;
3080 int rx_pkt_total = 0;
3082 struct hns3_enet_tqp_vector *tqp_vector =
3083 container_of(napi, struct hns3_enet_tqp_vector, napi);
3084 bool clean_complete = true;
3085 int rx_budget = budget;
3087 if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
3088 napi_complete(napi);
3092 /* Since the actual Tx work is minimal, we can give the Tx a larger
3093 * budget and be more aggressive about cleaning up the Tx descriptors.
3095 hns3_for_each_ring(ring, tqp_vector->tx_group)
3096 hns3_clean_tx_ring(ring);
3098 /* make sure rx ring budget not smaller than 1 */
3099 if (tqp_vector->num_tqps > 1)
3100 rx_budget = max(budget / tqp_vector->num_tqps, 1);
3102 hns3_for_each_ring(ring, tqp_vector->rx_group) {
3103 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
3106 if (rx_cleaned >= rx_budget)
3107 clean_complete = false;
3109 rx_pkt_total += rx_cleaned;
3112 tqp_vector->rx_group.total_packets += rx_pkt_total;
3114 if (!clean_complete)
3117 if (napi_complete(napi) &&
3118 likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
3119 hns3_update_new_int_gl(tqp_vector);
3120 hns3_mask_vector_irq(tqp_vector, 1);
3123 return rx_pkt_total;
3126 static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
3127 struct hnae3_ring_chain_node *head)
3129 struct pci_dev *pdev = tqp_vector->handle->pdev;
3130 struct hnae3_ring_chain_node *cur_chain = head;
3131 struct hnae3_ring_chain_node *chain;
3132 struct hns3_enet_ring *tx_ring;
3133 struct hns3_enet_ring *rx_ring;
3135 tx_ring = tqp_vector->tx_group.ring;
3137 cur_chain->tqp_index = tx_ring->tqp->tqp_index;
3138 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
3139 HNAE3_RING_TYPE_TX);
3140 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
3141 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
3143 cur_chain->next = NULL;
3145 while (tx_ring->next) {
3146 tx_ring = tx_ring->next;
3148 chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
3151 goto err_free_chain;
3153 cur_chain->next = chain;
3154 chain->tqp_index = tx_ring->tqp->tqp_index;
3155 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
3156 HNAE3_RING_TYPE_TX);
3157 hnae3_set_field(chain->int_gl_idx,
3158 HNAE3_RING_GL_IDX_M,
3159 HNAE3_RING_GL_IDX_S,
3166 rx_ring = tqp_vector->rx_group.ring;
3167 if (!tx_ring && rx_ring) {
3168 cur_chain->next = NULL;
3169 cur_chain->tqp_index = rx_ring->tqp->tqp_index;
3170 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
3171 HNAE3_RING_TYPE_RX);
3172 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
3173 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
3175 rx_ring = rx_ring->next;
3179 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
3181 goto err_free_chain;
3183 cur_chain->next = chain;
3184 chain->tqp_index = rx_ring->tqp->tqp_index;
3185 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
3186 HNAE3_RING_TYPE_RX);
3187 hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
3188 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
3192 rx_ring = rx_ring->next;
3198 cur_chain = head->next;
3200 chain = cur_chain->next;
3201 devm_kfree(&pdev->dev, cur_chain);
3209 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
3210 struct hnae3_ring_chain_node *head)
3212 struct pci_dev *pdev = tqp_vector->handle->pdev;
3213 struct hnae3_ring_chain_node *chain_tmp, *chain;
3218 chain_tmp = chain->next;
3219 devm_kfree(&pdev->dev, chain);
3224 static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
3225 struct hns3_enet_ring *ring)
3227 ring->next = group->ring;
3233 static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv)
3235 struct pci_dev *pdev = priv->ae_handle->pdev;
3236 struct hns3_enet_tqp_vector *tqp_vector;
3237 int num_vectors = priv->vector_num;
3241 numa_node = dev_to_node(&pdev->dev);
3243 for (vector_i = 0; vector_i < num_vectors; vector_i++) {
3244 tqp_vector = &priv->tqp_vector[vector_i];
3245 cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node),
3246 &tqp_vector->affinity_mask);
3250 static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
3252 struct hnae3_ring_chain_node vector_ring_chain;
3253 struct hnae3_handle *h = priv->ae_handle;
3254 struct hns3_enet_tqp_vector *tqp_vector;
3258 hns3_nic_set_cpumask(priv);
3260 for (i = 0; i < priv->vector_num; i++) {
3261 tqp_vector = &priv->tqp_vector[i];
3262 hns3_vector_gl_rl_init_hw(tqp_vector, priv);
3263 tqp_vector->num_tqps = 0;
3266 for (i = 0; i < h->kinfo.num_tqps; i++) {
3267 u16 vector_i = i % priv->vector_num;
3268 u16 tqp_num = h->kinfo.num_tqps;
3270 tqp_vector = &priv->tqp_vector[vector_i];
3272 hns3_add_ring_to_group(&tqp_vector->tx_group,
3273 priv->ring_data[i].ring);
3275 hns3_add_ring_to_group(&tqp_vector->rx_group,
3276 priv->ring_data[i + tqp_num].ring);
3278 priv->ring_data[i].ring->tqp_vector = tqp_vector;
3279 priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
3280 tqp_vector->num_tqps++;
3283 for (i = 0; i < priv->vector_num; i++) {
3284 tqp_vector = &priv->tqp_vector[i];
3286 tqp_vector->rx_group.total_bytes = 0;
3287 tqp_vector->rx_group.total_packets = 0;
3288 tqp_vector->tx_group.total_bytes = 0;
3289 tqp_vector->tx_group.total_packets = 0;
3290 tqp_vector->handle = h;
3292 ret = hns3_get_vector_ring_chain(tqp_vector,
3293 &vector_ring_chain);
3297 ret = h->ae_algo->ops->map_ring_to_vector(h,
3298 tqp_vector->vector_irq, &vector_ring_chain);
3300 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
3305 netif_napi_add(priv->netdev, &tqp_vector->napi,
3306 hns3_nic_common_poll, NAPI_POLL_WEIGHT);
3313 netif_napi_del(&priv->tqp_vector[i].napi);
3318 static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
3320 #define HNS3_VECTOR_PF_MAX_NUM 64
3322 struct hnae3_handle *h = priv->ae_handle;
3323 struct hns3_enet_tqp_vector *tqp_vector;
3324 struct hnae3_vector_info *vector;
3325 struct pci_dev *pdev = h->pdev;
3326 u16 tqp_num = h->kinfo.num_tqps;
3331 /* RSS size, cpu online and vector_num should be the same */
3332 /* Should consider 2p/4p later */
3333 vector_num = min_t(u16, num_online_cpus(), tqp_num);
3334 vector_num = min_t(u16, vector_num, HNS3_VECTOR_PF_MAX_NUM);
3336 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
3341 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
3343 priv->vector_num = vector_num;
3344 priv->tqp_vector = (struct hns3_enet_tqp_vector *)
3345 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
3347 if (!priv->tqp_vector) {
3352 for (i = 0; i < priv->vector_num; i++) {
3353 tqp_vector = &priv->tqp_vector[i];
3354 tqp_vector->idx = i;
3355 tqp_vector->mask_addr = vector[i].io_addr;
3356 tqp_vector->vector_irq = vector[i].vector;
3357 hns3_vector_gl_rl_init(tqp_vector, priv);
3361 devm_kfree(&pdev->dev, vector);
3365 static void hns3_clear_ring_group(struct hns3_enet_ring_group *group)
3371 static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
3373 struct hnae3_ring_chain_node vector_ring_chain;
3374 struct hnae3_handle *h = priv->ae_handle;
3375 struct hns3_enet_tqp_vector *tqp_vector;
3378 for (i = 0; i < priv->vector_num; i++) {
3379 tqp_vector = &priv->tqp_vector[i];
3381 if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring)
3384 hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain);
3386 h->ae_algo->ops->unmap_ring_from_vector(h,
3387 tqp_vector->vector_irq, &vector_ring_chain);
3389 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
3391 if (tqp_vector->irq_init_flag == HNS3_VECTOR_INITED) {
3392 irq_set_affinity_hint(tqp_vector->vector_irq, NULL);
3393 free_irq(tqp_vector->vector_irq, tqp_vector);
3394 tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED;
3397 hns3_clear_ring_group(&tqp_vector->rx_group);
3398 hns3_clear_ring_group(&tqp_vector->tx_group);
3399 netif_napi_del(&priv->tqp_vector[i].napi);
3403 static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
3405 struct hnae3_handle *h = priv->ae_handle;
3406 struct pci_dev *pdev = h->pdev;
3409 for (i = 0; i < priv->vector_num; i++) {
3410 struct hns3_enet_tqp_vector *tqp_vector;
3412 tqp_vector = &priv->tqp_vector[i];
3413 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
3418 devm_kfree(&pdev->dev, priv->tqp_vector);
3422 static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
3425 struct hns3_nic_ring_data *ring_data = priv->ring_data;
3426 int queue_num = priv->ae_handle->kinfo.num_tqps;
3427 struct pci_dev *pdev = priv->ae_handle->pdev;
3428 struct hns3_enet_ring *ring;
3431 ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
3435 if (ring_type == HNAE3_RING_TYPE_TX) {
3436 desc_num = priv->ae_handle->kinfo.num_tx_desc;
3437 ring_data[q->tqp_index].ring = ring;
3438 ring_data[q->tqp_index].queue_index = q->tqp_index;
3439 ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
3441 desc_num = priv->ae_handle->kinfo.num_rx_desc;
3442 ring_data[q->tqp_index + queue_num].ring = ring;
3443 ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index;
3444 ring->io_base = q->io_base;
3447 hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
3451 ring->desc_cb = NULL;
3452 ring->dev = priv->dev;
3453 ring->desc_dma_addr = 0;
3454 ring->buf_size = q->buf_size;
3455 ring->desc_num = desc_num;
3456 ring->next_to_use = 0;
3457 ring->next_to_clean = 0;
3462 static int hns3_queue_to_ring(struct hnae3_queue *tqp,
3463 struct hns3_nic_priv *priv)
3467 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
3471 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
3473 devm_kfree(priv->dev, priv->ring_data[tqp->tqp_index].ring);
3480 static int hns3_get_ring_config(struct hns3_nic_priv *priv)
3482 struct hnae3_handle *h = priv->ae_handle;
3483 struct pci_dev *pdev = h->pdev;
3486 priv->ring_data = devm_kzalloc(&pdev->dev,
3487 array3_size(h->kinfo.num_tqps,
3488 sizeof(*priv->ring_data),
3491 if (!priv->ring_data)
3494 for (i = 0; i < h->kinfo.num_tqps; i++) {
3495 ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
3503 devm_kfree(priv->dev, priv->ring_data[i].ring);
3504 devm_kfree(priv->dev,
3505 priv->ring_data[i + h->kinfo.num_tqps].ring);
3508 devm_kfree(&pdev->dev, priv->ring_data);
3509 priv->ring_data = NULL;
3513 static void hns3_put_ring_config(struct hns3_nic_priv *priv)
3515 struct hnae3_handle *h = priv->ae_handle;
3518 if (!priv->ring_data)
3521 for (i = 0; i < h->kinfo.num_tqps; i++) {
3522 devm_kfree(priv->dev, priv->ring_data[i].ring);
3523 devm_kfree(priv->dev,
3524 priv->ring_data[i + h->kinfo.num_tqps].ring);
3526 devm_kfree(priv->dev, priv->ring_data);
3527 priv->ring_data = NULL;
3530 static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
3534 if (ring->desc_num <= 0 || ring->buf_size <= 0)
3537 ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num,
3538 sizeof(ring->desc_cb[0]), GFP_KERNEL);
3539 if (!ring->desc_cb) {
3544 ret = hns3_alloc_desc(ring);
3546 goto out_with_desc_cb;
3548 if (!HNAE3_IS_TX_RING(ring)) {
3549 ret = hns3_alloc_ring_buffers(ring);
3557 hns3_free_desc(ring);
3559 devm_kfree(ring_to_dev(ring), ring->desc_cb);
3560 ring->desc_cb = NULL;
3565 static void hns3_fini_ring(struct hns3_enet_ring *ring)
3567 hns3_free_desc(ring);
3568 devm_kfree(ring_to_dev(ring), ring->desc_cb);
3569 ring->desc_cb = NULL;
3570 ring->next_to_clean = 0;
3571 ring->next_to_use = 0;
3572 ring->pending_buf = 0;
3574 dev_kfree_skb_any(ring->skb);
3579 static int hns3_buf_size2type(u32 buf_size)
3585 bd_size_type = HNS3_BD_SIZE_512_TYPE;
3588 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
3591 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
3594 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
3597 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
3600 return bd_size_type;
3603 static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
3605 dma_addr_t dma = ring->desc_dma_addr;
3606 struct hnae3_queue *q = ring->tqp;
3608 if (!HNAE3_IS_TX_RING(ring)) {
3609 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
3611 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
3612 (u32)((dma >> 31) >> 1));
3614 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
3615 hns3_buf_size2type(ring->buf_size));
3616 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
3617 ring->desc_num / 8 - 1);
3620 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
3622 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
3623 (u32)((dma >> 31) >> 1));
3625 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
3626 ring->desc_num / 8 - 1);
3630 static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
3632 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
3635 for (i = 0; i < HNAE3_MAX_TC; i++) {
3636 struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
3639 if (!tc_info->enable)
3642 for (j = 0; j < tc_info->tqp_count; j++) {
3643 struct hnae3_queue *q;
3645 q = priv->ring_data[tc_info->tqp_offset + j].ring->tqp;
3646 hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG,
3652 int hns3_init_all_ring(struct hns3_nic_priv *priv)
3654 struct hnae3_handle *h = priv->ae_handle;
3655 int ring_num = h->kinfo.num_tqps * 2;
3659 for (i = 0; i < ring_num; i++) {
3660 ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
3663 "Alloc ring memory fail! ret=%d\n", ret);
3664 goto out_when_alloc_ring_memory;
3667 u64_stats_init(&priv->ring_data[i].ring->syncp);
3672 out_when_alloc_ring_memory:
3673 for (j = i - 1; j >= 0; j--)
3674 hns3_fini_ring(priv->ring_data[j].ring);
3679 int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
3681 struct hnae3_handle *h = priv->ae_handle;
3684 for (i = 0; i < h->kinfo.num_tqps; i++) {
3685 hns3_fini_ring(priv->ring_data[i].ring);
3686 hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
3691 /* Set mac addr if it is configured. or leave it to the AE driver */
3692 static int hns3_init_mac_addr(struct net_device *netdev, bool init)
3694 struct hns3_nic_priv *priv = netdev_priv(netdev);
3695 struct hnae3_handle *h = priv->ae_handle;
3696 u8 mac_addr_temp[ETH_ALEN];
3699 if (h->ae_algo->ops->get_mac_addr && init) {
3700 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
3701 ether_addr_copy(netdev->dev_addr, mac_addr_temp);
3704 /* Check if the MAC address is valid, if not get a random one */
3705 if (!is_valid_ether_addr(netdev->dev_addr)) {
3706 eth_hw_addr_random(netdev);
3707 dev_warn(priv->dev, "using random MAC address %pM\n",
3711 if (h->ae_algo->ops->set_mac_addr)
3712 ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true);
3717 static int hns3_init_phy(struct net_device *netdev)
3719 struct hnae3_handle *h = hns3_get_handle(netdev);
3722 if (h->ae_algo->ops->mac_connect_phy)
3723 ret = h->ae_algo->ops->mac_connect_phy(h);
3728 static void hns3_uninit_phy(struct net_device *netdev)
3730 struct hnae3_handle *h = hns3_get_handle(netdev);
3732 if (h->ae_algo->ops->mac_disconnect_phy)
3733 h->ae_algo->ops->mac_disconnect_phy(h);
3736 static int hns3_restore_fd_rules(struct net_device *netdev)
3738 struct hnae3_handle *h = hns3_get_handle(netdev);
3741 if (h->ae_algo->ops->restore_fd_rules)
3742 ret = h->ae_algo->ops->restore_fd_rules(h);
3747 static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list)
3749 struct hnae3_handle *h = hns3_get_handle(netdev);
3751 if (h->ae_algo->ops->del_all_fd_entries)
3752 h->ae_algo->ops->del_all_fd_entries(h, clear_list);
3755 static int hns3_client_start(struct hnae3_handle *handle)
3757 if (!handle->ae_algo->ops->client_start)
3760 return handle->ae_algo->ops->client_start(handle);
3763 static void hns3_client_stop(struct hnae3_handle *handle)
3765 if (!handle->ae_algo->ops->client_stop)
3768 handle->ae_algo->ops->client_stop(handle);
3771 static void hns3_info_show(struct hns3_nic_priv *priv)
3773 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
3775 dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr);
3776 dev_info(priv->dev, "Task queue pairs numbers: %d\n", kinfo->num_tqps);
3777 dev_info(priv->dev, "RSS size: %d\n", kinfo->rss_size);
3778 dev_info(priv->dev, "Allocated RSS size: %d\n", kinfo->req_rss_size);
3779 dev_info(priv->dev, "RX buffer length: %d\n", kinfo->rx_buf_len);
3780 dev_info(priv->dev, "Desc num per TX queue: %d\n", kinfo->num_tx_desc);
3781 dev_info(priv->dev, "Desc num per RX queue: %d\n", kinfo->num_rx_desc);
3782 dev_info(priv->dev, "Total number of enabled TCs: %d\n", kinfo->num_tc);
3783 dev_info(priv->dev, "Max mtu size: %d\n", priv->netdev->max_mtu);
3786 static int hns3_client_init(struct hnae3_handle *handle)
3788 struct pci_dev *pdev = handle->pdev;
3789 u16 alloc_tqps, max_rss_size;
3790 struct hns3_nic_priv *priv;
3791 struct net_device *netdev;
3794 handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps,
3796 netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps);
3800 priv = netdev_priv(netdev);
3801 priv->dev = &pdev->dev;
3802 priv->netdev = netdev;
3803 priv->ae_handle = handle;
3804 priv->tx_timeout_count = 0;
3805 set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
3807 handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
3809 handle->kinfo.netdev = netdev;
3810 handle->priv = (void *)priv;
3812 hns3_init_mac_addr(netdev, true);
3814 hns3_set_default_feature(netdev);
3816 netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
3817 netdev->priv_flags |= IFF_UNICAST_FLT;
3818 netdev->netdev_ops = &hns3_nic_netdev_ops;
3819 SET_NETDEV_DEV(netdev, &pdev->dev);
3820 hns3_ethtool_set_ops(netdev);
3822 /* Carrier off reporting is important to ethtool even BEFORE open */
3823 netif_carrier_off(netdev);
3825 ret = hns3_get_ring_config(priv);
3828 goto out_get_ring_cfg;
3831 ret = hns3_nic_alloc_vector_data(priv);
3834 goto out_alloc_vector_data;
3837 ret = hns3_nic_init_vector_data(priv);
3840 goto out_init_vector_data;
3843 ret = hns3_init_all_ring(priv);
3846 goto out_init_ring_data;
3849 ret = hns3_init_phy(netdev);
3853 ret = register_netdev(netdev);
3855 dev_err(priv->dev, "probe register netdev fail!\n");
3856 goto out_reg_netdev_fail;
3859 ret = hns3_client_start(handle);
3861 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
3862 goto out_client_start;
3865 hns3_dcbnl_setup(handle);
3867 hns3_dbg_init(handle);
3869 /* MTU range: (ETH_MIN_MTU(kernel default) - 9702) */
3870 netdev->max_mtu = HNS3_MAX_MTU;
3872 set_bit(HNS3_NIC_STATE_INITED, &priv->state);
3874 if (netif_msg_drv(handle))
3875 hns3_info_show(priv);
3880 unregister_netdev(netdev);
3881 out_reg_netdev_fail:
3882 hns3_uninit_phy(netdev);
3884 hns3_uninit_all_ring(priv);
3886 hns3_nic_uninit_vector_data(priv);
3887 out_init_vector_data:
3888 hns3_nic_dealloc_vector_data(priv);
3889 out_alloc_vector_data:
3890 priv->ring_data = NULL;
3892 priv->ae_handle = NULL;
3893 free_netdev(netdev);
3897 static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
3899 struct net_device *netdev = handle->kinfo.netdev;
3900 struct hns3_nic_priv *priv = netdev_priv(netdev);
3903 hns3_remove_hw_addr(netdev);
3905 if (netdev->reg_state != NETREG_UNINITIALIZED)
3906 unregister_netdev(netdev);
3908 hns3_client_stop(handle);
3910 hns3_uninit_phy(netdev);
3912 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
3913 netdev_warn(netdev, "already uninitialized\n");
3914 goto out_netdev_free;
3917 hns3_del_all_fd_rules(netdev, true);
3919 hns3_force_clear_all_rx_ring(handle);
3921 hns3_nic_uninit_vector_data(priv);
3923 ret = hns3_nic_dealloc_vector_data(priv);
3925 netdev_err(netdev, "dealloc vector error\n");
3927 ret = hns3_uninit_all_ring(priv);
3929 netdev_err(netdev, "uninit ring error\n");
3931 hns3_put_ring_config(priv);
3933 hns3_dbg_uninit(handle);
3936 free_netdev(netdev);
3939 static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
3941 struct net_device *netdev = handle->kinfo.netdev;
3947 netif_carrier_on(netdev);
3948 netif_tx_wake_all_queues(netdev);
3949 if (netif_msg_link(handle))
3950 netdev_info(netdev, "link up\n");
3952 netif_carrier_off(netdev);
3953 netif_tx_stop_all_queues(netdev);
3954 if (netif_msg_link(handle))
3955 netdev_info(netdev, "link down\n");
3959 static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
3961 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3962 struct net_device *ndev = kinfo->netdev;
3964 if (tc > HNAE3_MAX_TC)
3970 return hns3_nic_set_real_num_queue(ndev);
3973 static int hns3_recover_hw_addr(struct net_device *ndev)
3975 struct netdev_hw_addr_list *list;
3976 struct netdev_hw_addr *ha, *tmp;
3979 netif_addr_lock_bh(ndev);
3980 /* go through and sync uc_addr entries to the device */
3982 list_for_each_entry_safe(ha, tmp, &list->list, list) {
3983 ret = hns3_nic_uc_sync(ndev, ha->addr);
3988 /* go through and sync mc_addr entries to the device */
3990 list_for_each_entry_safe(ha, tmp, &list->list, list) {
3991 ret = hns3_nic_mc_sync(ndev, ha->addr);
3997 netif_addr_unlock_bh(ndev);
4001 static void hns3_remove_hw_addr(struct net_device *netdev)
4003 struct netdev_hw_addr_list *list;
4004 struct netdev_hw_addr *ha, *tmp;
4006 hns3_nic_uc_unsync(netdev, netdev->dev_addr);
4008 netif_addr_lock_bh(netdev);
4009 /* go through and unsync uc_addr entries to the device */
4011 list_for_each_entry_safe(ha, tmp, &list->list, list)
4012 hns3_nic_uc_unsync(netdev, ha->addr);
4014 /* go through and unsync mc_addr entries to the device */
4016 list_for_each_entry_safe(ha, tmp, &list->list, list)
4017 if (ha->refcount > 1)
4018 hns3_nic_mc_unsync(netdev, ha->addr);
4020 netif_addr_unlock_bh(netdev);
4023 static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
4025 while (ring->next_to_clean != ring->next_to_use) {
4026 ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0;
4027 hns3_free_buffer_detach(ring, ring->next_to_clean);
4028 ring_ptr_move_fw(ring, next_to_clean);
4032 static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
4034 struct hns3_desc_cb res_cbs;
4037 while (ring->next_to_use != ring->next_to_clean) {
4038 /* When a buffer is not reused, it's memory has been
4039 * freed in hns3_handle_rx_bd or will be freed by
4040 * stack, so we need to replace the buffer here.
4042 if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
4043 ret = hns3_reserve_buffer_map(ring, &res_cbs);
4045 u64_stats_update_begin(&ring->syncp);
4046 ring->stats.sw_err_cnt++;
4047 u64_stats_update_end(&ring->syncp);
4048 /* if alloc new buffer fail, exit directly
4049 * and reclear in up flow.
4051 netdev_warn(ring->tqp->handle->kinfo.netdev,
4052 "reserve buffer map failed, ret = %d\n",
4056 hns3_replace_buffer(ring, ring->next_to_use,
4059 ring_ptr_move_fw(ring, next_to_use);
4062 /* Free the pending skb in rx ring */
4064 dev_kfree_skb_any(ring->skb);
4066 ring->pending_buf = 0;
4072 static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
4074 while (ring->next_to_use != ring->next_to_clean) {
4075 /* When a buffer is not reused, it's memory has been
4076 * freed in hns3_handle_rx_bd or will be freed by
4077 * stack, so only need to unmap the buffer here.
4079 if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
4080 hns3_unmap_buffer(ring,
4081 &ring->desc_cb[ring->next_to_use]);
4082 ring->desc_cb[ring->next_to_use].dma = 0;
4085 ring_ptr_move_fw(ring, next_to_use);
4089 static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h)
4091 struct net_device *ndev = h->kinfo.netdev;
4092 struct hns3_nic_priv *priv = netdev_priv(ndev);
4093 struct hns3_enet_ring *ring;
4096 for (i = 0; i < h->kinfo.num_tqps; i++) {
4097 ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
4098 hns3_force_clear_rx_ring(ring);
4102 static void hns3_clear_all_ring(struct hnae3_handle *h)
4104 struct net_device *ndev = h->kinfo.netdev;
4105 struct hns3_nic_priv *priv = netdev_priv(ndev);
4108 for (i = 0; i < h->kinfo.num_tqps; i++) {
4109 struct netdev_queue *dev_queue;
4110 struct hns3_enet_ring *ring;
4112 ring = priv->ring_data[i].ring;
4113 hns3_clear_tx_ring(ring);
4114 dev_queue = netdev_get_tx_queue(ndev,
4115 priv->ring_data[i].queue_index);
4116 netdev_tx_reset_queue(dev_queue);
4118 ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
4119 /* Continue to clear other rings even if clearing some
4122 hns3_clear_rx_ring(ring);
4126 int hns3_nic_reset_all_ring(struct hnae3_handle *h)
4128 struct net_device *ndev = h->kinfo.netdev;
4129 struct hns3_nic_priv *priv = netdev_priv(ndev);
4130 struct hns3_enet_ring *rx_ring;
4134 for (i = 0; i < h->kinfo.num_tqps; i++) {
4135 ret = h->ae_algo->ops->reset_queue(h, i);
4139 hns3_init_ring_hw(priv->ring_data[i].ring);
4141 /* We need to clear tx ring here because self test will
4142 * use the ring and will not run down before up
4144 hns3_clear_tx_ring(priv->ring_data[i].ring);
4145 priv->ring_data[i].ring->next_to_clean = 0;
4146 priv->ring_data[i].ring->next_to_use = 0;
4148 rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
4149 hns3_init_ring_hw(rx_ring);
4150 ret = hns3_clear_rx_ring(rx_ring);
4154 /* We can not know the hardware head and tail when this
4155 * function is called in reset flow, so we reuse all desc.
4157 for (j = 0; j < rx_ring->desc_num; j++)
4158 hns3_reuse_buffer(rx_ring, j);
4160 rx_ring->next_to_clean = 0;
4161 rx_ring->next_to_use = 0;
4164 hns3_init_tx_ring_tc(priv);
4169 static void hns3_store_coal(struct hns3_nic_priv *priv)
4171 /* ethtool only support setting and querying one coal
4172 * configuation for now, so save the vector 0' coal
4173 * configuation here in order to restore it.
4175 memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal,
4176 sizeof(struct hns3_enet_coalesce));
4177 memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal,
4178 sizeof(struct hns3_enet_coalesce));
4181 static void hns3_restore_coal(struct hns3_nic_priv *priv)
4183 u16 vector_num = priv->vector_num;
4186 for (i = 0; i < vector_num; i++) {
4187 memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal,
4188 sizeof(struct hns3_enet_coalesce));
4189 memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal,
4190 sizeof(struct hns3_enet_coalesce));
4194 static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
4196 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4197 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
4198 struct net_device *ndev = kinfo->netdev;
4199 struct hns3_nic_priv *priv = netdev_priv(ndev);
4201 if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
4204 /* it is cumbersome for hardware to pick-and-choose entries for deletion
4205 * from table space. Hence, for function reset software intervention is
4206 * required to delete the entries
4208 if (hns3_dev_ongoing_func_reset(ae_dev)) {
4209 hns3_remove_hw_addr(ndev);
4210 hns3_del_all_fd_rules(ndev, false);
4213 if (!netif_running(ndev))
4216 return hns3_nic_net_stop(ndev);
4219 static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
4221 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
4222 struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev);
4225 clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
4227 if (netif_running(kinfo->netdev)) {
4228 ret = hns3_nic_net_open(kinfo->netdev);
4230 set_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
4231 netdev_err(kinfo->netdev,
4232 "hns net up fail, ret=%d!\n", ret);
4240 static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
4242 struct net_device *netdev = handle->kinfo.netdev;
4243 struct hns3_nic_priv *priv = netdev_priv(netdev);
4246 /* Carrier off reporting is important to ethtool even BEFORE open */
4247 netif_carrier_off(netdev);
4249 ret = hns3_get_ring_config(priv);
4253 ret = hns3_nic_alloc_vector_data(priv);
4257 hns3_restore_coal(priv);
4259 ret = hns3_nic_init_vector_data(priv);
4261 goto err_dealloc_vector;
4263 ret = hns3_init_all_ring(priv);
4265 goto err_uninit_vector;
4267 ret = hns3_client_start(handle);
4269 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
4270 goto err_uninit_ring;
4273 set_bit(HNS3_NIC_STATE_INITED, &priv->state);
4278 hns3_uninit_all_ring(priv);
4280 hns3_nic_uninit_vector_data(priv);
4282 hns3_nic_dealloc_vector_data(priv);
4284 hns3_put_ring_config(priv);
4289 static int hns3_reset_notify_restore_enet(struct hnae3_handle *handle)
4291 struct net_device *netdev = handle->kinfo.netdev;
4292 bool vlan_filter_enable;
4295 ret = hns3_init_mac_addr(netdev, false);
4299 ret = hns3_recover_hw_addr(netdev);
4303 ret = hns3_update_promisc_mode(netdev, handle->netdev_flags);
4307 vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true;
4308 hns3_enable_vlan_filter(netdev, vlan_filter_enable);
4310 if (handle->ae_algo->ops->restore_vlan_table)
4311 handle->ae_algo->ops->restore_vlan_table(handle);
4313 return hns3_restore_fd_rules(netdev);
4316 static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
4318 struct net_device *netdev = handle->kinfo.netdev;
4319 struct hns3_nic_priv *priv = netdev_priv(netdev);
4322 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
4323 netdev_warn(netdev, "already uninitialized\n");
4327 hns3_force_clear_all_rx_ring(handle);
4329 hns3_nic_uninit_vector_data(priv);
4331 hns3_store_coal(priv);
4333 ret = hns3_nic_dealloc_vector_data(priv);
4335 netdev_err(netdev, "dealloc vector error\n");
4337 ret = hns3_uninit_all_ring(priv);
4339 netdev_err(netdev, "uninit ring error\n");
4341 hns3_put_ring_config(priv);
4346 static int hns3_reset_notify(struct hnae3_handle *handle,
4347 enum hnae3_reset_notify_type type)
4352 case HNAE3_UP_CLIENT:
4353 ret = hns3_reset_notify_up_enet(handle);
4355 case HNAE3_DOWN_CLIENT:
4356 ret = hns3_reset_notify_down_enet(handle);
4358 case HNAE3_INIT_CLIENT:
4359 ret = hns3_reset_notify_init_enet(handle);
4361 case HNAE3_UNINIT_CLIENT:
4362 ret = hns3_reset_notify_uninit_enet(handle);
4364 case HNAE3_RESTORE_CLIENT:
4365 ret = hns3_reset_notify_restore_enet(handle);
4374 int hns3_set_channels(struct net_device *netdev,
4375 struct ethtool_channels *ch)
4377 struct hnae3_handle *h = hns3_get_handle(netdev);
4378 struct hnae3_knic_private_info *kinfo = &h->kinfo;
4379 bool rxfh_configured = netif_is_rxfh_configured(netdev);
4380 u32 new_tqp_num = ch->combined_count;
4384 if (ch->rx_count || ch->tx_count)
4387 if (new_tqp_num > hns3_get_max_available_channels(h) ||
4389 dev_err(&netdev->dev,
4390 "Change tqps fail, the tqp range is from 1 to %d",
4391 hns3_get_max_available_channels(h));
4395 if (kinfo->rss_size == new_tqp_num)
4398 ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT);
4402 ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT);
4406 org_tqp_num = h->kinfo.num_tqps;
4407 ret = h->ae_algo->ops->set_channels(h, new_tqp_num, rxfh_configured);
4409 ret = h->ae_algo->ops->set_channels(h, org_tqp_num,
4412 /* If revert to old tqp failed, fatal error occurred */
4413 dev_err(&netdev->dev,
4414 "Revert to old tqp num fail, ret=%d", ret);
4417 dev_info(&netdev->dev,
4418 "Change tqp num fail, Revert to old tqp num");
4420 ret = hns3_reset_notify(h, HNAE3_INIT_CLIENT);
4424 return hns3_reset_notify(h, HNAE3_UP_CLIENT);
4427 static const struct hnae3_client_ops client_ops = {
4428 .init_instance = hns3_client_init,
4429 .uninit_instance = hns3_client_uninit,
4430 .link_status_change = hns3_link_status_change,
4431 .setup_tc = hns3_client_setup_tc,
4432 .reset_notify = hns3_reset_notify,
4435 /* hns3_init_module - Driver registration routine
4436 * hns3_init_module is the first routine called when the driver is
4437 * loaded. All it does is register with the PCI subsystem.
4439 static int __init hns3_init_module(void)
4443 pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
4444 pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
4446 client.type = HNAE3_CLIENT_KNIC;
4447 snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s",
4450 client.ops = &client_ops;
4452 INIT_LIST_HEAD(&client.node);
4454 hns3_dbg_register_debugfs(hns3_driver_name);
4456 ret = hnae3_register_client(&client);
4458 goto err_reg_client;
4460 ret = pci_register_driver(&hns3_driver);
4462 goto err_reg_driver;
4467 hnae3_unregister_client(&client);
4469 hns3_dbg_unregister_debugfs();
4472 module_init(hns3_init_module);
4474 /* hns3_exit_module - Driver exit cleanup routine
4475 * hns3_exit_module is called just before the driver is removed
4478 static void __exit hns3_exit_module(void)
4480 pci_unregister_driver(&hns3_driver);
4481 hnae3_unregister_client(&client);
4482 hns3_dbg_unregister_debugfs();
4484 module_exit(hns3_exit_module);
4486 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
4487 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
4488 MODULE_LICENSE("GPL");
4489 MODULE_ALIAS("pci:hns-nic");
4490 MODULE_VERSION(HNS3_MOD_VERSION);