net: hns3: Fix for vxlan tx checksum bug
[platform/kernel/linux-rpi.git] / drivers / net / ethernet / hisilicon / hns3 / hns3_enet.c
1 /*
2  * Copyright (c) 2016~2017 Hisilicon Limited.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  */
9
10 #include <linux/dma-mapping.h>
11 #include <linux/etherdevice.h>
12 #include <linux/interrupt.h>
13 #include <linux/if_vlan.h>
14 #include <linux/ip.h>
15 #include <linux/ipv6.h>
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/skbuff.h>
19 #include <linux/sctp.h>
20 #include <linux/vermagic.h>
21 #include <net/gre.h>
22 #include <net/pkt_cls.h>
23 #include <net/vxlan.h>
24
25 #include "hnae3.h"
26 #include "hns3_enet.h"
27
28 static void hns3_clear_all_ring(struct hnae3_handle *h);
29 static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
30
31 static const char hns3_driver_name[] = "hns3";
32 const char hns3_driver_version[] = VERMAGIC_STRING;
33 static const char hns3_driver_string[] =
34                         "Hisilicon Ethernet Network Driver for Hip08 Family";
35 static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
36 static struct hnae3_client client;
37
38 /* hns3_pci_tbl - PCI Device ID Table
39  *
40  * Last entry must be all 0s
41  *
42  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
43  *   Class, Class Mask, private data (not used) }
44  */
45 static const struct pci_device_id hns3_pci_tbl[] = {
46         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
47         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
48         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
49          HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
50         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
51          HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
52         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
53          HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
54         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
55          HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
56         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
57          HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
58         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
59         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
60         /* required last entry */
61         {0, }
62 };
63 MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
64
65 static irqreturn_t hns3_irq_handle(int irq, void *dev)
66 {
67         struct hns3_enet_tqp_vector *tqp_vector = dev;
68
69         napi_schedule(&tqp_vector->napi);
70
71         return IRQ_HANDLED;
72 }
73
74 static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
75 {
76         struct hns3_enet_tqp_vector *tqp_vectors;
77         unsigned int i;
78
79         for (i = 0; i < priv->vector_num; i++) {
80                 tqp_vectors = &priv->tqp_vector[i];
81
82                 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
83                         continue;
84
85                 /* release the irq resource */
86                 free_irq(tqp_vectors->vector_irq, tqp_vectors);
87                 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
88         }
89 }
90
91 static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
92 {
93         struct hns3_enet_tqp_vector *tqp_vectors;
94         int txrx_int_idx = 0;
95         int rx_int_idx = 0;
96         int tx_int_idx = 0;
97         unsigned int i;
98         int ret;
99
100         for (i = 0; i < priv->vector_num; i++) {
101                 tqp_vectors = &priv->tqp_vector[i];
102
103                 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
104                         continue;
105
106                 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
107                         snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
108                                  "%s-%s-%d", priv->netdev->name, "TxRx",
109                                  txrx_int_idx++);
110                         txrx_int_idx++;
111                 } else if (tqp_vectors->rx_group.ring) {
112                         snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
113                                  "%s-%s-%d", priv->netdev->name, "Rx",
114                                  rx_int_idx++);
115                 } else if (tqp_vectors->tx_group.ring) {
116                         snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
117                                  "%s-%s-%d", priv->netdev->name, "Tx",
118                                  tx_int_idx++);
119                 } else {
120                         /* Skip this unused q_vector */
121                         continue;
122                 }
123
124                 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
125
126                 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
127                                   tqp_vectors->name,
128                                        tqp_vectors);
129                 if (ret) {
130                         netdev_err(priv->netdev, "request irq(%d) fail\n",
131                                    tqp_vectors->vector_irq);
132                         return ret;
133                 }
134
135                 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
136         }
137
138         return 0;
139 }
140
141 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
142                                  u32 mask_en)
143 {
144         writel(mask_en, tqp_vector->mask_addr);
145 }
146
147 static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
148 {
149         napi_enable(&tqp_vector->napi);
150
151         /* enable vector */
152         hns3_mask_vector_irq(tqp_vector, 1);
153 }
154
155 static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
156 {
157         /* disable vector */
158         hns3_mask_vector_irq(tqp_vector, 0);
159
160         disable_irq(tqp_vector->vector_irq);
161         napi_disable(&tqp_vector->napi);
162 }
163
164 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
165                                  u32 rl_value)
166 {
167         u32 rl_reg = hns3_rl_usec_to_reg(rl_value);
168
169         /* this defines the configuration for RL (Interrupt Rate Limiter).
170          * Rl defines rate of interrupts i.e. number of interrupts-per-second
171          * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
172          */
173
174         if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable &&
175             !tqp_vector->rx_group.coal.gl_adapt_enable)
176                 /* According to the hardware, the range of rl_reg is
177                  * 0-59 and the unit is 4.
178                  */
179                 rl_reg |=  HNS3_INT_RL_ENABLE_MASK;
180
181         writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
182 }
183
184 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
185                                     u32 gl_value)
186 {
187         u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value);
188
189         writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
190 }
191
192 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
193                                     u32 gl_value)
194 {
195         u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value);
196
197         writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
198 }
199
200 static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
201                                    struct hns3_nic_priv *priv)
202 {
203         struct hnae3_handle *h = priv->ae_handle;
204
205         /* initialize the configuration for interrupt coalescing.
206          * 1. GL (Interrupt Gap Limiter)
207          * 2. RL (Interrupt Rate Limiter)
208          */
209
210         /* Default: enable interrupt coalescing self-adaptive and GL */
211         tqp_vector->tx_group.coal.gl_adapt_enable = 1;
212         tqp_vector->rx_group.coal.gl_adapt_enable = 1;
213
214         tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K;
215         tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K;
216
217         /* Default: disable RL */
218         h->kinfo.int_rl_setting = 0;
219
220         tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START;
221         tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW;
222         tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW;
223 }
224
225 static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector,
226                                       struct hns3_nic_priv *priv)
227 {
228         struct hnae3_handle *h = priv->ae_handle;
229
230         hns3_set_vector_coalesce_tx_gl(tqp_vector,
231                                        tqp_vector->tx_group.coal.int_gl);
232         hns3_set_vector_coalesce_rx_gl(tqp_vector,
233                                        tqp_vector->rx_group.coal.int_gl);
234         hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
235 }
236
237 static int hns3_nic_set_real_num_queue(struct net_device *netdev)
238 {
239         struct hnae3_handle *h = hns3_get_handle(netdev);
240         struct hnae3_knic_private_info *kinfo = &h->kinfo;
241         unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
242         int ret;
243
244         ret = netif_set_real_num_tx_queues(netdev, queue_size);
245         if (ret) {
246                 netdev_err(netdev,
247                            "netif_set_real_num_tx_queues fail, ret=%d!\n",
248                            ret);
249                 return ret;
250         }
251
252         ret = netif_set_real_num_rx_queues(netdev, queue_size);
253         if (ret) {
254                 netdev_err(netdev,
255                            "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
256                 return ret;
257         }
258
259         return 0;
260 }
261
262 static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
263 {
264         u16 free_tqps, max_rss_size, max_tqps;
265
266         h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size);
267         max_tqps = h->kinfo.num_tc * max_rss_size;
268
269         return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps));
270 }
271
272 static int hns3_nic_net_up(struct net_device *netdev)
273 {
274         struct hns3_nic_priv *priv = netdev_priv(netdev);
275         struct hnae3_handle *h = priv->ae_handle;
276         int i, j;
277         int ret;
278
279         ret = hns3_nic_reset_all_ring(h);
280         if (ret)
281                 return ret;
282
283         /* get irq resource for all vectors */
284         ret = hns3_nic_init_irq(priv);
285         if (ret) {
286                 netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
287                 return ret;
288         }
289
290         /* enable the vectors */
291         for (i = 0; i < priv->vector_num; i++)
292                 hns3_vector_enable(&priv->tqp_vector[i]);
293
294         /* start the ae_dev */
295         ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
296         if (ret)
297                 goto out_start_err;
298
299         clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
300
301         return 0;
302
303 out_start_err:
304         for (j = i - 1; j >= 0; j--)
305                 hns3_vector_disable(&priv->tqp_vector[j]);
306
307         hns3_nic_uninit_irq(priv);
308
309         return ret;
310 }
311
312 static int hns3_nic_net_open(struct net_device *netdev)
313 {
314         struct hns3_nic_priv *priv = netdev_priv(netdev);
315         int ret;
316
317         netif_carrier_off(netdev);
318
319         ret = hns3_nic_set_real_num_queue(netdev);
320         if (ret)
321                 return ret;
322
323         ret = hns3_nic_net_up(netdev);
324         if (ret) {
325                 netdev_err(netdev,
326                            "hns net up fail, ret=%d!\n", ret);
327                 return ret;
328         }
329
330         priv->ae_handle->last_reset_time = jiffies;
331         return 0;
332 }
333
334 static void hns3_nic_net_down(struct net_device *netdev)
335 {
336         struct hns3_nic_priv *priv = netdev_priv(netdev);
337         const struct hnae3_ae_ops *ops;
338         int i;
339
340         if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
341                 return;
342
343         /* disable vectors */
344         for (i = 0; i < priv->vector_num; i++)
345                 hns3_vector_disable(&priv->tqp_vector[i]);
346
347         /* stop ae_dev */
348         ops = priv->ae_handle->ae_algo->ops;
349         if (ops->stop)
350                 ops->stop(priv->ae_handle);
351
352         /* free irq resources */
353         hns3_nic_uninit_irq(priv);
354
355         hns3_clear_all_ring(priv->ae_handle);
356 }
357
358 static int hns3_nic_net_stop(struct net_device *netdev)
359 {
360         netif_tx_stop_all_queues(netdev);
361         netif_carrier_off(netdev);
362
363         hns3_nic_net_down(netdev);
364
365         return 0;
366 }
367
368 static int hns3_nic_uc_sync(struct net_device *netdev,
369                             const unsigned char *addr)
370 {
371         struct hnae3_handle *h = hns3_get_handle(netdev);
372
373         if (h->ae_algo->ops->add_uc_addr)
374                 return h->ae_algo->ops->add_uc_addr(h, addr);
375
376         return 0;
377 }
378
379 static int hns3_nic_uc_unsync(struct net_device *netdev,
380                               const unsigned char *addr)
381 {
382         struct hnae3_handle *h = hns3_get_handle(netdev);
383
384         if (h->ae_algo->ops->rm_uc_addr)
385                 return h->ae_algo->ops->rm_uc_addr(h, addr);
386
387         return 0;
388 }
389
390 static int hns3_nic_mc_sync(struct net_device *netdev,
391                             const unsigned char *addr)
392 {
393         struct hnae3_handle *h = hns3_get_handle(netdev);
394
395         if (h->ae_algo->ops->add_mc_addr)
396                 return h->ae_algo->ops->add_mc_addr(h, addr);
397
398         return 0;
399 }
400
401 static int hns3_nic_mc_unsync(struct net_device *netdev,
402                               const unsigned char *addr)
403 {
404         struct hnae3_handle *h = hns3_get_handle(netdev);
405
406         if (h->ae_algo->ops->rm_mc_addr)
407                 return h->ae_algo->ops->rm_mc_addr(h, addr);
408
409         return 0;
410 }
411
412 static void hns3_nic_set_rx_mode(struct net_device *netdev)
413 {
414         struct hnae3_handle *h = hns3_get_handle(netdev);
415
416         if (h->ae_algo->ops->set_promisc_mode) {
417                 if (netdev->flags & IFF_PROMISC)
418                         h->ae_algo->ops->set_promisc_mode(h, true, true);
419                 else if (netdev->flags & IFF_ALLMULTI)
420                         h->ae_algo->ops->set_promisc_mode(h, false, true);
421                 else
422                         h->ae_algo->ops->set_promisc_mode(h, false, false);
423         }
424         if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync))
425                 netdev_err(netdev, "sync uc address fail\n");
426         if (netdev->flags & IFF_MULTICAST)
427                 if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync))
428                         netdev_err(netdev, "sync mc address fail\n");
429 }
430
431 static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
432                         u16 *mss, u32 *type_cs_vlan_tso)
433 {
434         u32 l4_offset, hdr_len;
435         union l3_hdr_info l3;
436         union l4_hdr_info l4;
437         u32 l4_paylen;
438         int ret;
439
440         if (!skb_is_gso(skb))
441                 return 0;
442
443         ret = skb_cow_head(skb, 0);
444         if (ret)
445                 return ret;
446
447         l3.hdr = skb_network_header(skb);
448         l4.hdr = skb_transport_header(skb);
449
450         /* Software should clear the IPv4's checksum field when tso is
451          * needed.
452          */
453         if (l3.v4->version == 4)
454                 l3.v4->check = 0;
455
456         /* tunnel packet.*/
457         if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
458                                          SKB_GSO_GRE_CSUM |
459                                          SKB_GSO_UDP_TUNNEL |
460                                          SKB_GSO_UDP_TUNNEL_CSUM)) {
461                 if ((!(skb_shinfo(skb)->gso_type &
462                     SKB_GSO_PARTIAL)) &&
463                     (skb_shinfo(skb)->gso_type &
464                     SKB_GSO_UDP_TUNNEL_CSUM)) {
465                         /* Software should clear the udp's checksum
466                          * field when tso is needed.
467                          */
468                         l4.udp->check = 0;
469                 }
470                 /* reset l3&l4 pointers from outer to inner headers */
471                 l3.hdr = skb_inner_network_header(skb);
472                 l4.hdr = skb_inner_transport_header(skb);
473
474                 /* Software should clear the IPv4's checksum field when
475                  * tso is needed.
476                  */
477                 if (l3.v4->version == 4)
478                         l3.v4->check = 0;
479         }
480
481         /* normal or tunnel packet*/
482         l4_offset = l4.hdr - skb->data;
483         hdr_len = (l4.tcp->doff * 4) + l4_offset;
484
485         /* remove payload length from inner pseudo checksum when tso*/
486         l4_paylen = skb->len - l4_offset;
487         csum_replace_by_diff(&l4.tcp->check,
488                              (__force __wsum)htonl(l4_paylen));
489
490         /* find the txbd field values */
491         *paylen = skb->len - hdr_len;
492         hnae_set_bit(*type_cs_vlan_tso,
493                      HNS3_TXD_TSO_B, 1);
494
495         /* get MSS for TSO */
496         *mss = skb_shinfo(skb)->gso_size;
497
498         return 0;
499 }
500
501 static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
502                                 u8 *il4_proto)
503 {
504         union {
505                 struct iphdr *v4;
506                 struct ipv6hdr *v6;
507                 unsigned char *hdr;
508         } l3;
509         unsigned char *l4_hdr;
510         unsigned char *exthdr;
511         u8 l4_proto_tmp;
512         __be16 frag_off;
513
514         /* find outer header point */
515         l3.hdr = skb_network_header(skb);
516         l4_hdr = skb_transport_header(skb);
517
518         if (skb->protocol == htons(ETH_P_IPV6)) {
519                 exthdr = l3.hdr + sizeof(*l3.v6);
520                 l4_proto_tmp = l3.v6->nexthdr;
521                 if (l4_hdr != exthdr)
522                         ipv6_skip_exthdr(skb, exthdr - skb->data,
523                                          &l4_proto_tmp, &frag_off);
524         } else if (skb->protocol == htons(ETH_P_IP)) {
525                 l4_proto_tmp = l3.v4->protocol;
526         } else {
527                 return -EINVAL;
528         }
529
530         *ol4_proto = l4_proto_tmp;
531
532         /* tunnel packet */
533         if (!skb->encapsulation) {
534                 *il4_proto = 0;
535                 return 0;
536         }
537
538         /* find inner header point */
539         l3.hdr = skb_inner_network_header(skb);
540         l4_hdr = skb_inner_transport_header(skb);
541
542         if (l3.v6->version == 6) {
543                 exthdr = l3.hdr + sizeof(*l3.v6);
544                 l4_proto_tmp = l3.v6->nexthdr;
545                 if (l4_hdr != exthdr)
546                         ipv6_skip_exthdr(skb, exthdr - skb->data,
547                                          &l4_proto_tmp, &frag_off);
548         } else if (l3.v4->version == 4) {
549                 l4_proto_tmp = l3.v4->protocol;
550         }
551
552         *il4_proto = l4_proto_tmp;
553
554         return 0;
555 }
556
557 static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
558                                 u8 il4_proto, u32 *type_cs_vlan_tso,
559                                 u32 *ol_type_vlan_len_msec)
560 {
561         union {
562                 struct iphdr *v4;
563                 struct ipv6hdr *v6;
564                 unsigned char *hdr;
565         } l3;
566         union {
567                 struct tcphdr *tcp;
568                 struct udphdr *udp;
569                 struct gre_base_hdr *gre;
570                 unsigned char *hdr;
571         } l4;
572         unsigned char *l2_hdr;
573         u8 l4_proto = ol4_proto;
574         u32 ol2_len;
575         u32 ol3_len;
576         u32 ol4_len;
577         u32 l2_len;
578         u32 l3_len;
579
580         l3.hdr = skb_network_header(skb);
581         l4.hdr = skb_transport_header(skb);
582
583         /* compute L2 header size for normal packet, defined in 2 Bytes */
584         l2_len = l3.hdr - skb->data;
585         hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
586                        HNS3_TXD_L2LEN_S, l2_len >> 1);
587
588         /* tunnel packet*/
589         if (skb->encapsulation) {
590                 /* compute OL2 header size, defined in 2 Bytes */
591                 ol2_len = l2_len;
592                 hnae_set_field(*ol_type_vlan_len_msec,
593                                HNS3_TXD_L2LEN_M,
594                                HNS3_TXD_L2LEN_S, ol2_len >> 1);
595
596                 /* compute OL3 header size, defined in 4 Bytes */
597                 ol3_len = l4.hdr - l3.hdr;
598                 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
599                                HNS3_TXD_L3LEN_S, ol3_len >> 2);
600
601                 /* MAC in UDP, MAC in GRE (0x6558)*/
602                 if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
603                         /* switch MAC header ptr from outer to inner header.*/
604                         l2_hdr = skb_inner_mac_header(skb);
605
606                         /* compute OL4 header size, defined in 4 Bytes. */
607                         ol4_len = l2_hdr - l4.hdr;
608                         hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M,
609                                        HNS3_TXD_L4LEN_S, ol4_len >> 2);
610
611                         /* switch IP header ptr from outer to inner header */
612                         l3.hdr = skb_inner_network_header(skb);
613
614                         /* compute inner l2 header size, defined in 2 Bytes. */
615                         l2_len = l3.hdr - l2_hdr;
616                         hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
617                                        HNS3_TXD_L2LEN_S, l2_len >> 1);
618                 } else {
619                         /* skb packet types not supported by hardware,
620                          * txbd len fild doesn't be filled.
621                          */
622                         return;
623                 }
624
625                 /* switch L4 header pointer from outer to inner */
626                 l4.hdr = skb_inner_transport_header(skb);
627
628                 l4_proto = il4_proto;
629         }
630
631         /* compute inner(/normal) L3 header size, defined in 4 Bytes */
632         l3_len = l4.hdr - l3.hdr;
633         hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
634                        HNS3_TXD_L3LEN_S, l3_len >> 2);
635
636         /* compute inner(/normal) L4 header size, defined in 4 Bytes */
637         switch (l4_proto) {
638         case IPPROTO_TCP:
639                 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
640                                HNS3_TXD_L4LEN_S, l4.tcp->doff);
641                 break;
642         case IPPROTO_SCTP:
643                 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
644                                HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2));
645                 break;
646         case IPPROTO_UDP:
647                 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
648                                HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2));
649                 break;
650         default:
651                 /* skb packet types not supported by hardware,
652                  * txbd len fild doesn't be filled.
653                  */
654                 return;
655         }
656 }
657
658 /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
659  * and it is udp packet, which has a dest port as the IANA assigned.
660  * the hardware is expected to do the checksum offload, but the
661  * hardware will not do the checksum offload when udp dest port is
662  * 4789.
663  */
664 static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
665 {
666 #define IANA_VXLAN_PORT 4789
667         union {
668                 struct tcphdr *tcp;
669                 struct udphdr *udp;
670                 struct gre_base_hdr *gre;
671                 unsigned char *hdr;
672         } l4;
673
674         l4.hdr = skb_transport_header(skb);
675
676         if (!(!skb->encapsulation && l4.udp->dest == htons(IANA_VXLAN_PORT)))
677                 return false;
678
679         skb_checksum_help(skb);
680
681         return true;
682 }
683
684 static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
685                                    u8 il4_proto, u32 *type_cs_vlan_tso,
686                                    u32 *ol_type_vlan_len_msec)
687 {
688         union {
689                 struct iphdr *v4;
690                 struct ipv6hdr *v6;
691                 unsigned char *hdr;
692         } l3;
693         u32 l4_proto = ol4_proto;
694
695         l3.hdr = skb_network_header(skb);
696
697         /* define OL3 type and tunnel type(OL4).*/
698         if (skb->encapsulation) {
699                 /* define outer network header type.*/
700                 if (skb->protocol == htons(ETH_P_IP)) {
701                         if (skb_is_gso(skb))
702                                 hnae_set_field(*ol_type_vlan_len_msec,
703                                                HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
704                                                HNS3_OL3T_IPV4_CSUM);
705                         else
706                                 hnae_set_field(*ol_type_vlan_len_msec,
707                                                HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
708                                                HNS3_OL3T_IPV4_NO_CSUM);
709
710                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
711                         hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
712                                        HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
713                 }
714
715                 /* define tunnel type(OL4).*/
716                 switch (l4_proto) {
717                 case IPPROTO_UDP:
718                         hnae_set_field(*ol_type_vlan_len_msec,
719                                        HNS3_TXD_TUNTYPE_M,
720                                        HNS3_TXD_TUNTYPE_S,
721                                        HNS3_TUN_MAC_IN_UDP);
722                         break;
723                 case IPPROTO_GRE:
724                         hnae_set_field(*ol_type_vlan_len_msec,
725                                        HNS3_TXD_TUNTYPE_M,
726                                        HNS3_TXD_TUNTYPE_S,
727                                        HNS3_TUN_NVGRE);
728                         break;
729                 default:
730                         /* drop the skb tunnel packet if hardware don't support,
731                          * because hardware can't calculate csum when TSO.
732                          */
733                         if (skb_is_gso(skb))
734                                 return -EDOM;
735
736                         /* the stack computes the IP header already,
737                          * driver calculate l4 checksum when not TSO.
738                          */
739                         skb_checksum_help(skb);
740                         return 0;
741                 }
742
743                 l3.hdr = skb_inner_network_header(skb);
744                 l4_proto = il4_proto;
745         }
746
747         if (l3.v4->version == 4) {
748                 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
749                                HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
750
751                 /* the stack computes the IP header already, the only time we
752                  * need the hardware to recompute it is in the case of TSO.
753                  */
754                 if (skb_is_gso(skb))
755                         hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
756
757                 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
758         } else if (l3.v6->version == 6) {
759                 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
760                                HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
761                 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
762         }
763
764         switch (l4_proto) {
765         case IPPROTO_TCP:
766                 hnae_set_field(*type_cs_vlan_tso,
767                                HNS3_TXD_L4T_M,
768                                HNS3_TXD_L4T_S,
769                                HNS3_L4T_TCP);
770                 break;
771         case IPPROTO_UDP:
772                 if (hns3_tunnel_csum_bug(skb))
773                         break;
774
775                 hnae_set_field(*type_cs_vlan_tso,
776                                HNS3_TXD_L4T_M,
777                                HNS3_TXD_L4T_S,
778                                HNS3_L4T_UDP);
779                 break;
780         case IPPROTO_SCTP:
781                 hnae_set_field(*type_cs_vlan_tso,
782                                HNS3_TXD_L4T_M,
783                                HNS3_TXD_L4T_S,
784                                HNS3_L4T_SCTP);
785                 break;
786         default:
787                 /* drop the skb tunnel packet if hardware don't support,
788                  * because hardware can't calculate csum when TSO.
789                  */
790                 if (skb_is_gso(skb))
791                         return -EDOM;
792
793                 /* the stack computes the IP header already,
794                  * driver calculate l4 checksum when not TSO.
795                  */
796                 skb_checksum_help(skb);
797                 return 0;
798         }
799
800         return 0;
801 }
802
803 static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
804 {
805         /* Config bd buffer end */
806         hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
807                        HNS3_TXD_BDTYPE_S, 0);
808         hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
809         hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
810         hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
811 }
812
813 static int hns3_fill_desc_vtags(struct sk_buff *skb,
814                                 struct hns3_enet_ring *tx_ring,
815                                 u32 *inner_vlan_flag,
816                                 u32 *out_vlan_flag,
817                                 u16 *inner_vtag,
818                                 u16 *out_vtag)
819 {
820 #define HNS3_TX_VLAN_PRIO_SHIFT 13
821
822         if (skb->protocol == htons(ETH_P_8021Q) &&
823             !(tx_ring->tqp->handle->kinfo.netdev->features &
824             NETIF_F_HW_VLAN_CTAG_TX)) {
825                 /* When HW VLAN acceleration is turned off, and the stack
826                  * sets the protocol to 802.1q, the driver just need to
827                  * set the protocol to the encapsulated ethertype.
828                  */
829                 skb->protocol = vlan_get_protocol(skb);
830                 return 0;
831         }
832
833         if (skb_vlan_tag_present(skb)) {
834                 u16 vlan_tag;
835
836                 vlan_tag = skb_vlan_tag_get(skb);
837                 vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
838
839                 /* Based on hw strategy, use out_vtag in two layer tag case,
840                  * and use inner_vtag in one tag case.
841                  */
842                 if (skb->protocol == htons(ETH_P_8021Q)) {
843                         hnae_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
844                         *out_vtag = vlan_tag;
845                 } else {
846                         hnae_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
847                         *inner_vtag = vlan_tag;
848                 }
849         } else if (skb->protocol == htons(ETH_P_8021Q)) {
850                 struct vlan_ethhdr *vhdr;
851                 int rc;
852
853                 rc = skb_cow_head(skb, 0);
854                 if (rc < 0)
855                         return rc;
856                 vhdr = (struct vlan_ethhdr *)skb->data;
857                 vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
858                                         << HNS3_TX_VLAN_PRIO_SHIFT);
859         }
860
861         skb->protocol = vlan_get_protocol(skb);
862         return 0;
863 }
864
865 static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
866                           int size, dma_addr_t dma, int frag_end,
867                           enum hns_desc_type type)
868 {
869         struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
870         struct hns3_desc *desc = &ring->desc[ring->next_to_use];
871         u32 ol_type_vlan_len_msec = 0;
872         u16 bdtp_fe_sc_vld_ra_ri = 0;
873         u32 type_cs_vlan_tso = 0;
874         struct sk_buff *skb;
875         u16 inner_vtag = 0;
876         u16 out_vtag = 0;
877         u32 paylen = 0;
878         u16 mss = 0;
879         __be16 protocol;
880         u8 ol4_proto;
881         u8 il4_proto;
882         int ret;
883
884         /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
885         desc_cb->priv = priv;
886         desc_cb->length = size;
887         desc_cb->dma = dma;
888         desc_cb->type = type;
889
890         /* now, fill the descriptor */
891         desc->addr = cpu_to_le64(dma);
892         desc->tx.send_size = cpu_to_le16((u16)size);
893         hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
894         desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
895
896         if (type == DESC_TYPE_SKB) {
897                 skb = (struct sk_buff *)priv;
898                 paylen = skb->len;
899
900                 ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
901                                            &ol_type_vlan_len_msec,
902                                            &inner_vtag, &out_vtag);
903                 if (unlikely(ret))
904                         return ret;
905
906                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
907                         skb_reset_mac_len(skb);
908                         protocol = skb->protocol;
909
910                         ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
911                         if (ret)
912                                 return ret;
913                         hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto,
914                                             &type_cs_vlan_tso,
915                                             &ol_type_vlan_len_msec);
916                         ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto,
917                                                       &type_cs_vlan_tso,
918                                                       &ol_type_vlan_len_msec);
919                         if (ret)
920                                 return ret;
921
922                         ret = hns3_set_tso(skb, &paylen, &mss,
923                                            &type_cs_vlan_tso);
924                         if (ret)
925                                 return ret;
926                 }
927
928                 /* Set txbd */
929                 desc->tx.ol_type_vlan_len_msec =
930                         cpu_to_le32(ol_type_vlan_len_msec);
931                 desc->tx.type_cs_vlan_tso_len =
932                         cpu_to_le32(type_cs_vlan_tso);
933                 desc->tx.paylen = cpu_to_le32(paylen);
934                 desc->tx.mss = cpu_to_le16(mss);
935                 desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
936                 desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
937         }
938
939         /* move ring pointer to next.*/
940         ring_ptr_move_fw(ring, next_to_use);
941
942         return 0;
943 }
944
945 static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv,
946                               int size, dma_addr_t dma, int frag_end,
947                               enum hns_desc_type type)
948 {
949         unsigned int frag_buf_num;
950         unsigned int k;
951         int sizeoflast;
952         int ret;
953
954         frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
955         sizeoflast = size % HNS3_MAX_BD_SIZE;
956         sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
957
958         /* When the frag size is bigger than hardware, split this frag */
959         for (k = 0; k < frag_buf_num; k++) {
960                 ret = hns3_fill_desc(ring, priv,
961                                      (k == frag_buf_num - 1) ?
962                                 sizeoflast : HNS3_MAX_BD_SIZE,
963                                 dma + HNS3_MAX_BD_SIZE * k,
964                                 frag_end && (k == frag_buf_num - 1) ? 1 : 0,
965                                 (type == DESC_TYPE_SKB && !k) ?
966                                         DESC_TYPE_SKB : DESC_TYPE_PAGE);
967                 if (ret)
968                         return ret;
969         }
970
971         return 0;
972 }
973
974 static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
975                                    struct hns3_enet_ring *ring)
976 {
977         struct sk_buff *skb = *out_skb;
978         struct skb_frag_struct *frag;
979         int bdnum_for_frag;
980         int frag_num;
981         int buf_num;
982         int size;
983         int i;
984
985         size = skb_headlen(skb);
986         buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
987
988         frag_num = skb_shinfo(skb)->nr_frags;
989         for (i = 0; i < frag_num; i++) {
990                 frag = &skb_shinfo(skb)->frags[i];
991                 size = skb_frag_size(frag);
992                 bdnum_for_frag =
993                         (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
994                 if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)
995                         return -ENOMEM;
996
997                 buf_num += bdnum_for_frag;
998         }
999
1000         if (buf_num > ring_space(ring))
1001                 return -EBUSY;
1002
1003         *bnum = buf_num;
1004         return 0;
1005 }
1006
1007 static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
1008                                   struct hns3_enet_ring *ring)
1009 {
1010         struct sk_buff *skb = *out_skb;
1011         int buf_num;
1012
1013         /* No. of segments (plus a header) */
1014         buf_num = skb_shinfo(skb)->nr_frags + 1;
1015
1016         if (buf_num > ring_space(ring))
1017                 return -EBUSY;
1018
1019         *bnum = buf_num;
1020
1021         return 0;
1022 }
1023
1024 static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig)
1025 {
1026         struct device *dev = ring_to_dev(ring);
1027         unsigned int i;
1028
1029         for (i = 0; i < ring->desc_num; i++) {
1030                 /* check if this is where we started */
1031                 if (ring->next_to_use == next_to_use_orig)
1032                         break;
1033
1034                 /* unmap the descriptor dma address */
1035                 if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
1036                         dma_unmap_single(dev,
1037                                          ring->desc_cb[ring->next_to_use].dma,
1038                                         ring->desc_cb[ring->next_to_use].length,
1039                                         DMA_TO_DEVICE);
1040                 else
1041                         dma_unmap_page(dev,
1042                                        ring->desc_cb[ring->next_to_use].dma,
1043                                        ring->desc_cb[ring->next_to_use].length,
1044                                        DMA_TO_DEVICE);
1045
1046                 /* rollback one */
1047                 ring_ptr_move_bw(ring, next_to_use);
1048         }
1049 }
1050
1051 netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
1052 {
1053         struct hns3_nic_priv *priv = netdev_priv(netdev);
1054         struct hns3_nic_ring_data *ring_data =
1055                 &tx_ring_data(priv, skb->queue_mapping);
1056         struct hns3_enet_ring *ring = ring_data->ring;
1057         struct device *dev = priv->dev;
1058         struct netdev_queue *dev_queue;
1059         struct skb_frag_struct *frag;
1060         int next_to_use_head;
1061         int next_to_use_frag;
1062         dma_addr_t dma;
1063         int buf_num;
1064         int seg_num;
1065         int size;
1066         int ret;
1067         int i;
1068
1069         /* Prefetch the data used later */
1070         prefetch(skb->data);
1071
1072         switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
1073         case -EBUSY:
1074                 u64_stats_update_begin(&ring->syncp);
1075                 ring->stats.tx_busy++;
1076                 u64_stats_update_end(&ring->syncp);
1077
1078                 goto out_net_tx_busy;
1079         case -ENOMEM:
1080                 u64_stats_update_begin(&ring->syncp);
1081                 ring->stats.sw_err_cnt++;
1082                 u64_stats_update_end(&ring->syncp);
1083                 netdev_err(netdev, "no memory to xmit!\n");
1084
1085                 goto out_err_tx_ok;
1086         default:
1087                 break;
1088         }
1089
1090         /* No. of segments (plus a header) */
1091         seg_num = skb_shinfo(skb)->nr_frags + 1;
1092         /* Fill the first part */
1093         size = skb_headlen(skb);
1094
1095         next_to_use_head = ring->next_to_use;
1096
1097         dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
1098         if (dma_mapping_error(dev, dma)) {
1099                 netdev_err(netdev, "TX head DMA map failed\n");
1100                 ring->stats.sw_err_cnt++;
1101                 goto out_err_tx_ok;
1102         }
1103
1104         ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
1105                            DESC_TYPE_SKB);
1106         if (ret)
1107                 goto head_dma_map_err;
1108
1109         next_to_use_frag = ring->next_to_use;
1110         /* Fill the fragments */
1111         for (i = 1; i < seg_num; i++) {
1112                 frag = &skb_shinfo(skb)->frags[i - 1];
1113                 size = skb_frag_size(frag);
1114                 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
1115                 if (dma_mapping_error(dev, dma)) {
1116                         netdev_err(netdev, "TX frag(%d) DMA map failed\n", i);
1117                         ring->stats.sw_err_cnt++;
1118                         goto frag_dma_map_err;
1119                 }
1120                 ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
1121                                     seg_num - 1 == i ? 1 : 0,
1122                                     DESC_TYPE_PAGE);
1123
1124                 if (ret)
1125                         goto frag_dma_map_err;
1126         }
1127
1128         /* Complete translate all packets */
1129         dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
1130         netdev_tx_sent_queue(dev_queue, skb->len);
1131
1132         wmb(); /* Commit all data before submit */
1133
1134         hnae_queue_xmit(ring->tqp, buf_num);
1135
1136         return NETDEV_TX_OK;
1137
1138 frag_dma_map_err:
1139         hns_nic_dma_unmap(ring, next_to_use_frag);
1140
1141 head_dma_map_err:
1142         hns_nic_dma_unmap(ring, next_to_use_head);
1143
1144 out_err_tx_ok:
1145         dev_kfree_skb_any(skb);
1146         return NETDEV_TX_OK;
1147
1148 out_net_tx_busy:
1149         netif_stop_subqueue(netdev, ring_data->queue_index);
1150         smp_mb(); /* Commit all data before submit */
1151
1152         return NETDEV_TX_BUSY;
1153 }
1154
1155 static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
1156 {
1157         struct hnae3_handle *h = hns3_get_handle(netdev);
1158         struct sockaddr *mac_addr = p;
1159         int ret;
1160
1161         if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1162                 return -EADDRNOTAVAIL;
1163
1164         if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) {
1165                 netdev_info(netdev, "already using mac address %pM\n",
1166                             mac_addr->sa_data);
1167                 return 0;
1168         }
1169
1170         ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
1171         if (ret) {
1172                 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
1173                 return ret;
1174         }
1175
1176         ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
1177
1178         return 0;
1179 }
1180
1181 static int hns3_nic_set_features(struct net_device *netdev,
1182                                  netdev_features_t features)
1183 {
1184         netdev_features_t changed = netdev->features ^ features;
1185         struct hns3_nic_priv *priv = netdev_priv(netdev);
1186         struct hnae3_handle *h = priv->ae_handle;
1187         int ret;
1188
1189         if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
1190                 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
1191                         priv->ops.fill_desc = hns3_fill_desc_tso;
1192                         priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
1193                 } else {
1194                         priv->ops.fill_desc = hns3_fill_desc;
1195                         priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
1196                 }
1197         }
1198
1199         if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
1200             h->ae_algo->ops->enable_vlan_filter) {
1201                 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1202                         h->ae_algo->ops->enable_vlan_filter(h, true);
1203                 else
1204                         h->ae_algo->ops->enable_vlan_filter(h, false);
1205         }
1206
1207         if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
1208             h->ae_algo->ops->enable_hw_strip_rxvtag) {
1209                 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1210                         ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true);
1211                 else
1212                         ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, false);
1213
1214                 if (ret)
1215                         return ret;
1216         }
1217
1218         netdev->features = features;
1219         return 0;
1220 }
1221
1222 static void hns3_nic_get_stats64(struct net_device *netdev,
1223                                  struct rtnl_link_stats64 *stats)
1224 {
1225         struct hns3_nic_priv *priv = netdev_priv(netdev);
1226         int queue_num = priv->ae_handle->kinfo.num_tqps;
1227         struct hnae3_handle *handle = priv->ae_handle;
1228         struct hns3_enet_ring *ring;
1229         unsigned int start;
1230         unsigned int idx;
1231         u64 tx_bytes = 0;
1232         u64 rx_bytes = 0;
1233         u64 tx_pkts = 0;
1234         u64 rx_pkts = 0;
1235         u64 tx_drop = 0;
1236         u64 rx_drop = 0;
1237
1238         if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
1239                 return;
1240
1241         handle->ae_algo->ops->update_stats(handle, &netdev->stats);
1242
1243         for (idx = 0; idx < queue_num; idx++) {
1244                 /* fetch the tx stats */
1245                 ring = priv->ring_data[idx].ring;
1246                 do {
1247                         start = u64_stats_fetch_begin_irq(&ring->syncp);
1248                         tx_bytes += ring->stats.tx_bytes;
1249                         tx_pkts += ring->stats.tx_pkts;
1250                         tx_drop += ring->stats.tx_busy;
1251                         tx_drop += ring->stats.sw_err_cnt;
1252                 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1253
1254                 /* fetch the rx stats */
1255                 ring = priv->ring_data[idx + queue_num].ring;
1256                 do {
1257                         start = u64_stats_fetch_begin_irq(&ring->syncp);
1258                         rx_bytes += ring->stats.rx_bytes;
1259                         rx_pkts += ring->stats.rx_pkts;
1260                         rx_drop += ring->stats.non_vld_descs;
1261                         rx_drop += ring->stats.err_pkt_len;
1262                         rx_drop += ring->stats.l2_err;
1263                 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1264         }
1265
1266         stats->tx_bytes = tx_bytes;
1267         stats->tx_packets = tx_pkts;
1268         stats->rx_bytes = rx_bytes;
1269         stats->rx_packets = rx_pkts;
1270
1271         stats->rx_errors = netdev->stats.rx_errors;
1272         stats->multicast = netdev->stats.multicast;
1273         stats->rx_length_errors = netdev->stats.rx_length_errors;
1274         stats->rx_crc_errors = netdev->stats.rx_crc_errors;
1275         stats->rx_missed_errors = netdev->stats.rx_missed_errors;
1276
1277         stats->tx_errors = netdev->stats.tx_errors;
1278         stats->rx_dropped = rx_drop + netdev->stats.rx_dropped;
1279         stats->tx_dropped = tx_drop + netdev->stats.tx_dropped;
1280         stats->collisions = netdev->stats.collisions;
1281         stats->rx_over_errors = netdev->stats.rx_over_errors;
1282         stats->rx_frame_errors = netdev->stats.rx_frame_errors;
1283         stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
1284         stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
1285         stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
1286         stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
1287         stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
1288         stats->tx_window_errors = netdev->stats.tx_window_errors;
1289         stats->rx_compressed = netdev->stats.rx_compressed;
1290         stats->tx_compressed = netdev->stats.tx_compressed;
1291 }
1292
1293 static int hns3_setup_tc(struct net_device *netdev, void *type_data)
1294 {
1295         struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
1296         struct hnae3_handle *h = hns3_get_handle(netdev);
1297         struct hnae3_knic_private_info *kinfo = &h->kinfo;
1298         u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map;
1299         u8 tc = mqprio_qopt->qopt.num_tc;
1300         u16 mode = mqprio_qopt->mode;
1301         u8 hw = mqprio_qopt->qopt.hw;
1302         bool if_running;
1303         unsigned int i;
1304         int ret;
1305
1306         if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
1307                mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
1308                 return -EOPNOTSUPP;
1309
1310         if (tc > HNAE3_MAX_TC)
1311                 return -EINVAL;
1312
1313         if (!netdev)
1314                 return -EINVAL;
1315
1316         if_running = netif_running(netdev);
1317         if (if_running) {
1318                 hns3_nic_net_stop(netdev);
1319                 msleep(100);
1320         }
1321
1322         ret = (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
1323                 kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
1324         if (ret)
1325                 goto out;
1326
1327         if (tc <= 1) {
1328                 netdev_reset_tc(netdev);
1329         } else {
1330                 ret = netdev_set_num_tc(netdev, tc);
1331                 if (ret)
1332                         goto out;
1333
1334                 for (i = 0; i < HNAE3_MAX_TC; i++) {
1335                         if (!kinfo->tc_info[i].enable)
1336                                 continue;
1337
1338                         netdev_set_tc_queue(netdev,
1339                                             kinfo->tc_info[i].tc,
1340                                             kinfo->tc_info[i].tqp_count,
1341                                             kinfo->tc_info[i].tqp_offset);
1342                 }
1343         }
1344
1345         ret = hns3_nic_set_real_num_queue(netdev);
1346
1347 out:
1348         if (if_running)
1349                 hns3_nic_net_open(netdev);
1350
1351         return ret;
1352 }
1353
1354 static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
1355                              void *type_data)
1356 {
1357         if (type != TC_SETUP_QDISC_MQPRIO)
1358                 return -EOPNOTSUPP;
1359
1360         return hns3_setup_tc(dev, type_data);
1361 }
1362
1363 static int hns3_vlan_rx_add_vid(struct net_device *netdev,
1364                                 __be16 proto, u16 vid)
1365 {
1366         struct hnae3_handle *h = hns3_get_handle(netdev);
1367         struct hns3_nic_priv *priv = netdev_priv(netdev);
1368         int ret = -EIO;
1369
1370         if (h->ae_algo->ops->set_vlan_filter)
1371                 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
1372
1373         if (!ret)
1374                 set_bit(vid, priv->active_vlans);
1375
1376         return ret;
1377 }
1378
1379 static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
1380                                  __be16 proto, u16 vid)
1381 {
1382         struct hnae3_handle *h = hns3_get_handle(netdev);
1383         struct hns3_nic_priv *priv = netdev_priv(netdev);
1384         int ret = -EIO;
1385
1386         if (h->ae_algo->ops->set_vlan_filter)
1387                 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
1388
1389         if (!ret)
1390                 clear_bit(vid, priv->active_vlans);
1391
1392         return ret;
1393 }
1394
1395 static void hns3_restore_vlan(struct net_device *netdev)
1396 {
1397         struct hns3_nic_priv *priv = netdev_priv(netdev);
1398         u16 vid;
1399         int ret;
1400
1401         for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
1402                 ret = hns3_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
1403                 if (ret)
1404                         netdev_warn(netdev, "Restore vlan: %d filter, ret:%d\n",
1405                                     vid, ret);
1406         }
1407 }
1408
1409 static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1410                                 u8 qos, __be16 vlan_proto)
1411 {
1412         struct hnae3_handle *h = hns3_get_handle(netdev);
1413         int ret = -EIO;
1414
1415         if (h->ae_algo->ops->set_vf_vlan_filter)
1416                 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
1417                                                    qos, vlan_proto);
1418
1419         return ret;
1420 }
1421
1422 static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
1423 {
1424         struct hnae3_handle *h = hns3_get_handle(netdev);
1425         bool if_running = netif_running(netdev);
1426         int ret;
1427
1428         if (!h->ae_algo->ops->set_mtu)
1429                 return -EOPNOTSUPP;
1430
1431         /* if this was called with netdev up then bring netdevice down */
1432         if (if_running) {
1433                 (void)hns3_nic_net_stop(netdev);
1434                 msleep(100);
1435         }
1436
1437         ret = h->ae_algo->ops->set_mtu(h, new_mtu);
1438         if (ret) {
1439                 netdev_err(netdev, "failed to change MTU in hardware %d\n",
1440                            ret);
1441                 return ret;
1442         }
1443
1444         netdev->mtu = new_mtu;
1445
1446         /* if the netdev was running earlier, bring it up again */
1447         if (if_running && hns3_nic_net_open(netdev))
1448                 ret = -EINVAL;
1449
1450         return ret;
1451 }
1452
1453 static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
1454 {
1455         struct hns3_nic_priv *priv = netdev_priv(ndev);
1456         struct hns3_enet_ring *tx_ring = NULL;
1457         int timeout_queue = 0;
1458         int hw_head, hw_tail;
1459         int i;
1460
1461         /* Find the stopped queue the same way the stack does */
1462         for (i = 0; i < ndev->real_num_tx_queues; i++) {
1463                 struct netdev_queue *q;
1464                 unsigned long trans_start;
1465
1466                 q = netdev_get_tx_queue(ndev, i);
1467                 trans_start = q->trans_start;
1468                 if (netif_xmit_stopped(q) &&
1469                     time_after(jiffies,
1470                                (trans_start + ndev->watchdog_timeo))) {
1471                         timeout_queue = i;
1472                         break;
1473                 }
1474         }
1475
1476         if (i == ndev->num_tx_queues) {
1477                 netdev_info(ndev,
1478                             "no netdev TX timeout queue found, timeout count: %llu\n",
1479                             priv->tx_timeout_count);
1480                 return false;
1481         }
1482
1483         tx_ring = priv->ring_data[timeout_queue].ring;
1484
1485         hw_head = readl_relaxed(tx_ring->tqp->io_base +
1486                                 HNS3_RING_TX_RING_HEAD_REG);
1487         hw_tail = readl_relaxed(tx_ring->tqp->io_base +
1488                                 HNS3_RING_TX_RING_TAIL_REG);
1489         netdev_info(ndev,
1490                     "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n",
1491                     priv->tx_timeout_count,
1492                     timeout_queue,
1493                     tx_ring->next_to_use,
1494                     tx_ring->next_to_clean,
1495                     hw_head,
1496                     hw_tail,
1497                     readl(tx_ring->tqp_vector->mask_addr));
1498
1499         return true;
1500 }
1501
1502 static void hns3_nic_net_timeout(struct net_device *ndev)
1503 {
1504         struct hns3_nic_priv *priv = netdev_priv(ndev);
1505         struct hnae3_handle *h = priv->ae_handle;
1506
1507         if (!hns3_get_tx_timeo_queue_info(ndev))
1508                 return;
1509
1510         priv->tx_timeout_count++;
1511
1512         if (time_before(jiffies, (h->last_reset_time + ndev->watchdog_timeo)))
1513                 return;
1514
1515         /* request the reset */
1516         if (h->ae_algo->ops->reset_event)
1517                 h->ae_algo->ops->reset_event(h);
1518 }
1519
1520 static const struct net_device_ops hns3_nic_netdev_ops = {
1521         .ndo_open               = hns3_nic_net_open,
1522         .ndo_stop               = hns3_nic_net_stop,
1523         .ndo_start_xmit         = hns3_nic_net_xmit,
1524         .ndo_tx_timeout         = hns3_nic_net_timeout,
1525         .ndo_set_mac_address    = hns3_nic_net_set_mac_address,
1526         .ndo_change_mtu         = hns3_nic_change_mtu,
1527         .ndo_set_features       = hns3_nic_set_features,
1528         .ndo_get_stats64        = hns3_nic_get_stats64,
1529         .ndo_setup_tc           = hns3_nic_setup_tc,
1530         .ndo_set_rx_mode        = hns3_nic_set_rx_mode,
1531         .ndo_vlan_rx_add_vid    = hns3_vlan_rx_add_vid,
1532         .ndo_vlan_rx_kill_vid   = hns3_vlan_rx_kill_vid,
1533         .ndo_set_vf_vlan        = hns3_ndo_set_vf_vlan,
1534 };
1535
1536 static bool hns3_is_phys_func(struct pci_dev *pdev)
1537 {
1538         u32 dev_id = pdev->device;
1539
1540         switch (dev_id) {
1541         case HNAE3_DEV_ID_GE:
1542         case HNAE3_DEV_ID_25GE:
1543         case HNAE3_DEV_ID_25GE_RDMA:
1544         case HNAE3_DEV_ID_25GE_RDMA_MACSEC:
1545         case HNAE3_DEV_ID_50GE_RDMA:
1546         case HNAE3_DEV_ID_50GE_RDMA_MACSEC:
1547         case HNAE3_DEV_ID_100G_RDMA_MACSEC:
1548                 return true;
1549         case HNAE3_DEV_ID_100G_VF:
1550         case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF:
1551                 return false;
1552         default:
1553                 dev_warn(&pdev->dev, "un-recognized pci device-id %d",
1554                          dev_id);
1555         }
1556
1557         return false;
1558 }
1559
1560 static void hns3_disable_sriov(struct pci_dev *pdev)
1561 {
1562         /* If our VFs are assigned we cannot shut down SR-IOV
1563          * without causing issues, so just leave the hardware
1564          * available but disabled
1565          */
1566         if (pci_vfs_assigned(pdev)) {
1567                 dev_warn(&pdev->dev,
1568                          "disabling driver while VFs are assigned\n");
1569                 return;
1570         }
1571
1572         pci_disable_sriov(pdev);
1573 }
1574
1575 /* hns3_probe - Device initialization routine
1576  * @pdev: PCI device information struct
1577  * @ent: entry in hns3_pci_tbl
1578  *
1579  * hns3_probe initializes a PF identified by a pci_dev structure.
1580  * The OS initialization, configuring of the PF private structure,
1581  * and a hardware reset occur.
1582  *
1583  * Returns 0 on success, negative on failure
1584  */
1585 static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1586 {
1587         struct hnae3_ae_dev *ae_dev;
1588         int ret;
1589
1590         ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
1591                               GFP_KERNEL);
1592         if (!ae_dev) {
1593                 ret = -ENOMEM;
1594                 return ret;
1595         }
1596
1597         ae_dev->pdev = pdev;
1598         ae_dev->flag = ent->driver_data;
1599         ae_dev->dev_type = HNAE3_DEV_KNIC;
1600         pci_set_drvdata(pdev, ae_dev);
1601
1602         hnae3_register_ae_dev(ae_dev);
1603
1604         return 0;
1605 }
1606
1607 /* hns3_remove - Device removal routine
1608  * @pdev: PCI device information struct
1609  */
1610 static void hns3_remove(struct pci_dev *pdev)
1611 {
1612         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1613
1614         if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))
1615                 hns3_disable_sriov(pdev);
1616
1617         hnae3_unregister_ae_dev(ae_dev);
1618 }
1619
1620 /**
1621  * hns3_pci_sriov_configure
1622  * @pdev: pointer to a pci_dev structure
1623  * @num_vfs: number of VFs to allocate
1624  *
1625  * Enable or change the number of VFs. Called when the user updates the number
1626  * of VFs in sysfs.
1627  **/
1628 static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1629 {
1630         int ret;
1631
1632         if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) {
1633                 dev_warn(&pdev->dev, "Can not config SRIOV\n");
1634                 return -EINVAL;
1635         }
1636
1637         if (num_vfs) {
1638                 ret = pci_enable_sriov(pdev, num_vfs);
1639                 if (ret)
1640                         dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret);
1641                 else
1642                         return num_vfs;
1643         } else if (!pci_vfs_assigned(pdev)) {
1644                 pci_disable_sriov(pdev);
1645         } else {
1646                 dev_warn(&pdev->dev,
1647                          "Unable to free VFs because some are assigned to VMs.\n");
1648         }
1649
1650         return 0;
1651 }
1652
1653 static struct pci_driver hns3_driver = {
1654         .name     = hns3_driver_name,
1655         .id_table = hns3_pci_tbl,
1656         .probe    = hns3_probe,
1657         .remove   = hns3_remove,
1658         .sriov_configure = hns3_pci_sriov_configure,
1659 };
1660
1661 /* set default feature to hns3 */
1662 static void hns3_set_default_feature(struct net_device *netdev)
1663 {
1664         netdev->priv_flags |= IFF_UNICAST_FLT;
1665
1666         netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1667                 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1668                 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1669                 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1670                 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1671
1672         netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
1673
1674         netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
1675
1676         netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1677                 NETIF_F_HW_VLAN_CTAG_FILTER |
1678                 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
1679                 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1680                 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1681                 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1682                 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1683
1684         netdev->vlan_features |=
1685                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
1686                 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
1687                 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1688                 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1689                 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1690
1691         netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1692                 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
1693                 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1694                 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1695                 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1696                 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1697 }
1698
1699 static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
1700                              struct hns3_desc_cb *cb)
1701 {
1702         unsigned int order = hnae_page_order(ring);
1703         struct page *p;
1704
1705         p = dev_alloc_pages(order);
1706         if (!p)
1707                 return -ENOMEM;
1708
1709         cb->priv = p;
1710         cb->page_offset = 0;
1711         cb->reuse_flag = 0;
1712         cb->buf  = page_address(p);
1713         cb->length = hnae_page_size(ring);
1714         cb->type = DESC_TYPE_PAGE;
1715
1716         return 0;
1717 }
1718
1719 static void hns3_free_buffer(struct hns3_enet_ring *ring,
1720                              struct hns3_desc_cb *cb)
1721 {
1722         if (cb->type == DESC_TYPE_SKB)
1723                 dev_kfree_skb_any((struct sk_buff *)cb->priv);
1724         else if (!HNAE3_IS_TX_RING(ring))
1725                 put_page((struct page *)cb->priv);
1726         memset(cb, 0, sizeof(*cb));
1727 }
1728
1729 static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
1730 {
1731         cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
1732                                cb->length, ring_to_dma_dir(ring));
1733
1734         if (dma_mapping_error(ring_to_dev(ring), cb->dma))
1735                 return -EIO;
1736
1737         return 0;
1738 }
1739
1740 static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
1741                               struct hns3_desc_cb *cb)
1742 {
1743         if (cb->type == DESC_TYPE_SKB)
1744                 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
1745                                  ring_to_dma_dir(ring));
1746         else
1747                 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
1748                                ring_to_dma_dir(ring));
1749 }
1750
1751 static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
1752 {
1753         hns3_unmap_buffer(ring, &ring->desc_cb[i]);
1754         ring->desc[i].addr = 0;
1755 }
1756
1757 static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
1758 {
1759         struct hns3_desc_cb *cb = &ring->desc_cb[i];
1760
1761         if (!ring->desc_cb[i].dma)
1762                 return;
1763
1764         hns3_buffer_detach(ring, i);
1765         hns3_free_buffer(ring, cb);
1766 }
1767
1768 static void hns3_free_buffers(struct hns3_enet_ring *ring)
1769 {
1770         int i;
1771
1772         for (i = 0; i < ring->desc_num; i++)
1773                 hns3_free_buffer_detach(ring, i);
1774 }
1775
1776 /* free desc along with its attached buffer */
1777 static void hns3_free_desc(struct hns3_enet_ring *ring)
1778 {
1779         hns3_free_buffers(ring);
1780
1781         dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
1782                          ring->desc_num * sizeof(ring->desc[0]),
1783                          DMA_BIDIRECTIONAL);
1784         ring->desc_dma_addr = 0;
1785         kfree(ring->desc);
1786         ring->desc = NULL;
1787 }
1788
1789 static int hns3_alloc_desc(struct hns3_enet_ring *ring)
1790 {
1791         int size = ring->desc_num * sizeof(ring->desc[0]);
1792
1793         ring->desc = kzalloc(size, GFP_KERNEL);
1794         if (!ring->desc)
1795                 return -ENOMEM;
1796
1797         ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc,
1798                                              size, DMA_BIDIRECTIONAL);
1799         if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
1800                 ring->desc_dma_addr = 0;
1801                 kfree(ring->desc);
1802                 ring->desc = NULL;
1803                 return -ENOMEM;
1804         }
1805
1806         return 0;
1807 }
1808
1809 static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
1810                                    struct hns3_desc_cb *cb)
1811 {
1812         int ret;
1813
1814         ret = hns3_alloc_buffer(ring, cb);
1815         if (ret)
1816                 goto out;
1817
1818         ret = hns3_map_buffer(ring, cb);
1819         if (ret)
1820                 goto out_with_buf;
1821
1822         return 0;
1823
1824 out_with_buf:
1825         hns3_free_buffer(ring, cb);
1826 out:
1827         return ret;
1828 }
1829
1830 static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i)
1831 {
1832         int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]);
1833
1834         if (ret)
1835                 return ret;
1836
1837         ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1838
1839         return 0;
1840 }
1841
1842 /* Allocate memory for raw pkg, and map with dma */
1843 static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
1844 {
1845         int i, j, ret;
1846
1847         for (i = 0; i < ring->desc_num; i++) {
1848                 ret = hns3_alloc_buffer_attach(ring, i);
1849                 if (ret)
1850                         goto out_buffer_fail;
1851         }
1852
1853         return 0;
1854
1855 out_buffer_fail:
1856         for (j = i - 1; j >= 0; j--)
1857                 hns3_free_buffer_detach(ring, j);
1858         return ret;
1859 }
1860
1861 /* detach a in-used buffer and replace with a reserved one  */
1862 static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
1863                                 struct hns3_desc_cb *res_cb)
1864 {
1865         hns3_unmap_buffer(ring, &ring->desc_cb[i]);
1866         ring->desc_cb[i] = *res_cb;
1867         ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1868         ring->desc[i].rx.bd_base_info = 0;
1869 }
1870
1871 static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
1872 {
1873         ring->desc_cb[i].reuse_flag = 0;
1874         ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
1875                 + ring->desc_cb[i].page_offset);
1876         ring->desc[i].rx.bd_base_info = 0;
1877 }
1878
1879 static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
1880                                       int *pkts)
1881 {
1882         struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
1883
1884         (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
1885         (*bytes) += desc_cb->length;
1886         /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
1887         hns3_free_buffer_detach(ring, ring->next_to_clean);
1888
1889         ring_ptr_move_fw(ring, next_to_clean);
1890 }
1891
1892 static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
1893 {
1894         int u = ring->next_to_use;
1895         int c = ring->next_to_clean;
1896
1897         if (unlikely(h > ring->desc_num))
1898                 return 0;
1899
1900         return u > c ? (h > c && h <= u) : (h > c || h <= u);
1901 }
1902
1903 bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
1904 {
1905         struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1906         struct netdev_queue *dev_queue;
1907         int bytes, pkts;
1908         int head;
1909
1910         head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
1911         rmb(); /* Make sure head is ready before touch any data */
1912
1913         if (is_ring_empty(ring) || head == ring->next_to_clean)
1914                 return true; /* no data to poll */
1915
1916         if (!is_valid_clean_head(ring, head)) {
1917                 netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
1918                            ring->next_to_use, ring->next_to_clean);
1919
1920                 u64_stats_update_begin(&ring->syncp);
1921                 ring->stats.io_err_cnt++;
1922                 u64_stats_update_end(&ring->syncp);
1923                 return true;
1924         }
1925
1926         bytes = 0;
1927         pkts = 0;
1928         while (head != ring->next_to_clean && budget) {
1929                 hns3_nic_reclaim_one_desc(ring, &bytes, &pkts);
1930                 /* Issue prefetch for next Tx descriptor */
1931                 prefetch(&ring->desc_cb[ring->next_to_clean]);
1932                 budget--;
1933         }
1934
1935         ring->tqp_vector->tx_group.total_bytes += bytes;
1936         ring->tqp_vector->tx_group.total_packets += pkts;
1937
1938         u64_stats_update_begin(&ring->syncp);
1939         ring->stats.tx_bytes += bytes;
1940         ring->stats.tx_pkts += pkts;
1941         u64_stats_update_end(&ring->syncp);
1942
1943         dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
1944         netdev_tx_completed_queue(dev_queue, pkts, bytes);
1945
1946         if (unlikely(pkts && netif_carrier_ok(netdev) &&
1947                      (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
1948                 /* Make sure that anybody stopping the queue after this
1949                  * sees the new next_to_clean.
1950                  */
1951                 smp_mb();
1952                 if (netif_tx_queue_stopped(dev_queue)) {
1953                         netif_tx_wake_queue(dev_queue);
1954                         ring->stats.restart_queue++;
1955                 }
1956         }
1957
1958         return !!budget;
1959 }
1960
1961 static int hns3_desc_unused(struct hns3_enet_ring *ring)
1962 {
1963         int ntc = ring->next_to_clean;
1964         int ntu = ring->next_to_use;
1965
1966         return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
1967 }
1968
1969 static void
1970 hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
1971 {
1972         struct hns3_desc_cb *desc_cb;
1973         struct hns3_desc_cb res_cbs;
1974         int i, ret;
1975
1976         for (i = 0; i < cleand_count; i++) {
1977                 desc_cb = &ring->desc_cb[ring->next_to_use];
1978                 if (desc_cb->reuse_flag) {
1979                         u64_stats_update_begin(&ring->syncp);
1980                         ring->stats.reuse_pg_cnt++;
1981                         u64_stats_update_end(&ring->syncp);
1982
1983                         hns3_reuse_buffer(ring, ring->next_to_use);
1984                 } else {
1985                         ret = hns3_reserve_buffer_map(ring, &res_cbs);
1986                         if (ret) {
1987                                 u64_stats_update_begin(&ring->syncp);
1988                                 ring->stats.sw_err_cnt++;
1989                                 u64_stats_update_end(&ring->syncp);
1990
1991                                 netdev_err(ring->tqp->handle->kinfo.netdev,
1992                                            "hnae reserve buffer map failed.\n");
1993                                 break;
1994                         }
1995                         hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
1996                 }
1997
1998                 ring_ptr_move_fw(ring, next_to_use);
1999         }
2000
2001         wmb(); /* Make all data has been write before submit */
2002         writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
2003 }
2004
2005 static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
2006                                 struct hns3_enet_ring *ring, int pull_len,
2007                                 struct hns3_desc_cb *desc_cb)
2008 {
2009         struct hns3_desc *desc;
2010         int truesize, size;
2011         int last_offset;
2012         bool twobufs;
2013
2014         twobufs = ((PAGE_SIZE < 8192) &&
2015                 hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
2016
2017         desc = &ring->desc[ring->next_to_clean];
2018         size = le16_to_cpu(desc->rx.size);
2019
2020         truesize = hnae_buf_size(ring);
2021
2022         if (!twobufs)
2023                 last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
2024
2025         skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
2026                         size - pull_len, truesize);
2027
2028          /* Avoid re-using remote pages,flag default unreuse */
2029         if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
2030                 return;
2031
2032         if (twobufs) {
2033                 /* If we are only owner of page we can reuse it */
2034                 if (likely(page_count(desc_cb->priv) == 1)) {
2035                         /* Flip page offset to other buffer */
2036                         desc_cb->page_offset ^= truesize;
2037
2038                         desc_cb->reuse_flag = 1;
2039                         /* bump ref count on page before it is given*/
2040                         get_page(desc_cb->priv);
2041                 }
2042                 return;
2043         }
2044
2045         /* Move offset up to the next cache line */
2046         desc_cb->page_offset += truesize;
2047
2048         if (desc_cb->page_offset <= last_offset) {
2049                 desc_cb->reuse_flag = 1;
2050                 /* Bump ref count on page before it is given*/
2051                 get_page(desc_cb->priv);
2052         }
2053 }
2054
2055 static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
2056                              struct hns3_desc *desc)
2057 {
2058         struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2059         int l3_type, l4_type;
2060         u32 bd_base_info;
2061         int ol4_type;
2062         u32 l234info;
2063
2064         bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2065         l234info = le32_to_cpu(desc->rx.l234_info);
2066
2067         skb->ip_summed = CHECKSUM_NONE;
2068
2069         skb_checksum_none_assert(skb);
2070
2071         if (!(netdev->features & NETIF_F_RXCSUM))
2072                 return;
2073
2074         /* check if hardware has done checksum */
2075         if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
2076                 return;
2077
2078         if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) ||
2079                      hnae_get_bit(l234info, HNS3_RXD_L4E_B) ||
2080                      hnae_get_bit(l234info, HNS3_RXD_OL3E_B) ||
2081                      hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) {
2082                 netdev_err(netdev, "L3/L4 error pkt\n");
2083                 u64_stats_update_begin(&ring->syncp);
2084                 ring->stats.l3l4_csum_err++;
2085                 u64_stats_update_end(&ring->syncp);
2086
2087                 return;
2088         }
2089
2090         l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M,
2091                                  HNS3_RXD_L3ID_S);
2092         l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M,
2093                                  HNS3_RXD_L4ID_S);
2094
2095         ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
2096         switch (ol4_type) {
2097         case HNS3_OL4_TYPE_MAC_IN_UDP:
2098         case HNS3_OL4_TYPE_NVGRE:
2099                 skb->csum_level = 1;
2100         case HNS3_OL4_TYPE_NO_TUN:
2101                 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
2102                 if (l3_type == HNS3_L3_TYPE_IPV4 ||
2103                     (l3_type == HNS3_L3_TYPE_IPV6 &&
2104                      (l4_type == HNS3_L4_TYPE_UDP ||
2105                       l4_type == HNS3_L4_TYPE_TCP ||
2106                       l4_type == HNS3_L4_TYPE_SCTP)))
2107                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2108                 break;
2109         }
2110 }
2111
2112 static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
2113 {
2114         napi_gro_receive(&ring->tqp_vector->napi, skb);
2115 }
2116
2117 static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
2118                                struct hns3_desc *desc, u32 l234info)
2119 {
2120         struct pci_dev *pdev = ring->tqp->handle->pdev;
2121         u16 vlan_tag;
2122
2123         if (pdev->revision == 0x20) {
2124                 vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2125                 if (!(vlan_tag & VLAN_VID_MASK))
2126                         vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2127
2128                 return vlan_tag;
2129         }
2130
2131 #define HNS3_STRP_OUTER_VLAN    0x1
2132 #define HNS3_STRP_INNER_VLAN    0x2
2133
2134         switch (hnae_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
2135                                HNS3_RXD_STRP_TAGP_S)) {
2136         case HNS3_STRP_OUTER_VLAN:
2137                 vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2138                 break;
2139         case HNS3_STRP_INNER_VLAN:
2140                 vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2141                 break;
2142         default:
2143                 vlan_tag = 0;
2144                 break;
2145         }
2146
2147         return vlan_tag;
2148 }
2149
2150 static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
2151                              struct sk_buff **out_skb, int *out_bnum)
2152 {
2153         struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2154         struct hns3_desc_cb *desc_cb;
2155         struct hns3_desc *desc;
2156         struct sk_buff *skb;
2157         unsigned char *va;
2158         u32 bd_base_info;
2159         int pull_len;
2160         u32 l234info;
2161         int length;
2162         int bnum;
2163
2164         desc = &ring->desc[ring->next_to_clean];
2165         desc_cb = &ring->desc_cb[ring->next_to_clean];
2166
2167         prefetch(desc);
2168
2169         length = le16_to_cpu(desc->rx.size);
2170         bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2171
2172         /* Check valid BD */
2173         if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))
2174                 return -EFAULT;
2175
2176         va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
2177
2178         /* Prefetch first cache line of first page
2179          * Idea is to cache few bytes of the header of the packet. Our L1 Cache
2180          * line size is 64B so need to prefetch twice to make it 128B. But in
2181          * actual we can have greater size of caches with 128B Level 1 cache
2182          * lines. In such a case, single fetch would suffice to cache in the
2183          * relevant part of the header.
2184          */
2185         prefetch(va);
2186 #if L1_CACHE_BYTES < 128
2187         prefetch(va + L1_CACHE_BYTES);
2188 #endif
2189
2190         skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi,
2191                                         HNS3_RX_HEAD_SIZE);
2192         if (unlikely(!skb)) {
2193                 netdev_err(netdev, "alloc rx skb fail\n");
2194
2195                 u64_stats_update_begin(&ring->syncp);
2196                 ring->stats.sw_err_cnt++;
2197                 u64_stats_update_end(&ring->syncp);
2198
2199                 return -ENOMEM;
2200         }
2201
2202         prefetchw(skb->data);
2203
2204         bnum = 1;
2205         if (length <= HNS3_RX_HEAD_SIZE) {
2206                 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
2207
2208                 /* We can reuse buffer as-is, just make sure it is local */
2209                 if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
2210                         desc_cb->reuse_flag = 1;
2211                 else /* This page cannot be reused so discard it */
2212                         put_page(desc_cb->priv);
2213
2214                 ring_ptr_move_fw(ring, next_to_clean);
2215         } else {
2216                 u64_stats_update_begin(&ring->syncp);
2217                 ring->stats.seg_pkt_cnt++;
2218                 u64_stats_update_end(&ring->syncp);
2219
2220                 pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE);
2221
2222                 memcpy(__skb_put(skb, pull_len), va,
2223                        ALIGN(pull_len, sizeof(long)));
2224
2225                 hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
2226                 ring_ptr_move_fw(ring, next_to_clean);
2227
2228                 while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
2229                         desc = &ring->desc[ring->next_to_clean];
2230                         desc_cb = &ring->desc_cb[ring->next_to_clean];
2231                         bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2232                         hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb);
2233                         ring_ptr_move_fw(ring, next_to_clean);
2234                         bnum++;
2235                 }
2236         }
2237
2238         *out_bnum = bnum;
2239
2240         l234info = le32_to_cpu(desc->rx.l234_info);
2241
2242         /* Based on hw strategy, the tag offloaded will be stored at
2243          * ot_vlan_tag in two layer tag case, and stored at vlan_tag
2244          * in one layer tag case.
2245          */
2246         if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
2247                 u16 vlan_tag;
2248
2249                 vlan_tag = hns3_parse_vlan_tag(ring, desc, l234info);
2250                 if (vlan_tag & VLAN_VID_MASK)
2251                         __vlan_hwaccel_put_tag(skb,
2252                                                htons(ETH_P_8021Q),
2253                                                vlan_tag);
2254         }
2255
2256         if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
2257                 netdev_err(netdev, "no valid bd,%016llx,%016llx\n",
2258                            ((u64 *)desc)[0], ((u64 *)desc)[1]);
2259                 u64_stats_update_begin(&ring->syncp);
2260                 ring->stats.non_vld_descs++;
2261                 u64_stats_update_end(&ring->syncp);
2262
2263                 dev_kfree_skb_any(skb);
2264                 return -EINVAL;
2265         }
2266
2267         if (unlikely((!desc->rx.pkt_len) ||
2268                      hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
2269                 netdev_err(netdev, "truncated pkt\n");
2270                 u64_stats_update_begin(&ring->syncp);
2271                 ring->stats.err_pkt_len++;
2272                 u64_stats_update_end(&ring->syncp);
2273
2274                 dev_kfree_skb_any(skb);
2275                 return -EFAULT;
2276         }
2277
2278         if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) {
2279                 netdev_err(netdev, "L2 error pkt\n");
2280                 u64_stats_update_begin(&ring->syncp);
2281                 ring->stats.l2_err++;
2282                 u64_stats_update_end(&ring->syncp);
2283
2284                 dev_kfree_skb_any(skb);
2285                 return -EFAULT;
2286         }
2287
2288         u64_stats_update_begin(&ring->syncp);
2289         ring->stats.rx_pkts++;
2290         ring->stats.rx_bytes += skb->len;
2291         u64_stats_update_end(&ring->syncp);
2292
2293         ring->tqp_vector->rx_group.total_bytes += skb->len;
2294
2295         hns3_rx_checksum(ring, skb, desc);
2296         return 0;
2297 }
2298
2299 int hns3_clean_rx_ring(
2300                 struct hns3_enet_ring *ring, int budget,
2301                 void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
2302 {
2303 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2304         struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2305         int recv_pkts, recv_bds, clean_count, err;
2306         int unused_count = hns3_desc_unused(ring);
2307         struct sk_buff *skb = NULL;
2308         int num, bnum = 0;
2309
2310         num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
2311         rmb(); /* Make sure num taken effect before the other data is touched */
2312
2313         recv_pkts = 0, recv_bds = 0, clean_count = 0;
2314         num -= unused_count;
2315
2316         while (recv_pkts < budget && recv_bds < num) {
2317                 /* Reuse or realloc buffers */
2318                 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
2319                         hns3_nic_alloc_rx_buffers(ring,
2320                                                   clean_count + unused_count);
2321                         clean_count = 0;
2322                         unused_count = hns3_desc_unused(ring);
2323                 }
2324
2325                 /* Poll one pkt */
2326                 err = hns3_handle_rx_bd(ring, &skb, &bnum);
2327                 if (unlikely(!skb)) /* This fault cannot be repaired */
2328                         goto out;
2329
2330                 recv_bds += bnum;
2331                 clean_count += bnum;
2332                 if (unlikely(err)) {  /* Do jump the err */
2333                         recv_pkts++;
2334                         continue;
2335                 }
2336
2337                 /* Do update ip stack process */
2338                 skb->protocol = eth_type_trans(skb, netdev);
2339                 rx_fn(ring, skb);
2340
2341                 recv_pkts++;
2342         }
2343
2344 out:
2345         /* Make all data has been write before submit */
2346         if (clean_count + unused_count > 0)
2347                 hns3_nic_alloc_rx_buffers(ring,
2348                                           clean_count + unused_count);
2349
2350         return recv_pkts;
2351 }
2352
2353 static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
2354 {
2355         struct hns3_enet_tqp_vector *tqp_vector =
2356                                         ring_group->ring->tqp_vector;
2357         enum hns3_flow_level_range new_flow_level;
2358         int packets_per_msecs;
2359         int bytes_per_msecs;
2360         u32 time_passed_ms;
2361         u16 new_int_gl;
2362
2363         if (!ring_group->coal.int_gl || !tqp_vector->last_jiffies)
2364                 return false;
2365
2366         if (ring_group->total_packets == 0) {
2367                 ring_group->coal.int_gl = HNS3_INT_GL_50K;
2368                 ring_group->coal.flow_level = HNS3_FLOW_LOW;
2369                 return true;
2370         }
2371
2372         /* Simple throttlerate management
2373          * 0-10MB/s   lower     (50000 ints/s)
2374          * 10-20MB/s   middle    (20000 ints/s)
2375          * 20-1249MB/s high      (18000 ints/s)
2376          * > 40000pps  ultra     (8000 ints/s)
2377          */
2378         new_flow_level = ring_group->coal.flow_level;
2379         new_int_gl = ring_group->coal.int_gl;
2380         time_passed_ms =
2381                 jiffies_to_msecs(jiffies - tqp_vector->last_jiffies);
2382
2383         if (!time_passed_ms)
2384                 return false;
2385
2386         do_div(ring_group->total_packets, time_passed_ms);
2387         packets_per_msecs = ring_group->total_packets;
2388
2389         do_div(ring_group->total_bytes, time_passed_ms);
2390         bytes_per_msecs = ring_group->total_bytes;
2391
2392 #define HNS3_RX_LOW_BYTE_RATE 10000
2393 #define HNS3_RX_MID_BYTE_RATE 20000
2394
2395         switch (new_flow_level) {
2396         case HNS3_FLOW_LOW:
2397                 if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE)
2398                         new_flow_level = HNS3_FLOW_MID;
2399                 break;
2400         case HNS3_FLOW_MID:
2401                 if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE)
2402                         new_flow_level = HNS3_FLOW_HIGH;
2403                 else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE)
2404                         new_flow_level = HNS3_FLOW_LOW;
2405                 break;
2406         case HNS3_FLOW_HIGH:
2407         case HNS3_FLOW_ULTRA:
2408         default:
2409                 if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE)
2410                         new_flow_level = HNS3_FLOW_MID;
2411                 break;
2412         }
2413
2414 #define HNS3_RX_ULTRA_PACKET_RATE 40
2415
2416         if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE &&
2417             &tqp_vector->rx_group == ring_group)
2418                 new_flow_level = HNS3_FLOW_ULTRA;
2419
2420         switch (new_flow_level) {
2421         case HNS3_FLOW_LOW:
2422                 new_int_gl = HNS3_INT_GL_50K;
2423                 break;
2424         case HNS3_FLOW_MID:
2425                 new_int_gl = HNS3_INT_GL_20K;
2426                 break;
2427         case HNS3_FLOW_HIGH:
2428                 new_int_gl = HNS3_INT_GL_18K;
2429                 break;
2430         case HNS3_FLOW_ULTRA:
2431                 new_int_gl = HNS3_INT_GL_8K;
2432                 break;
2433         default:
2434                 break;
2435         }
2436
2437         ring_group->total_bytes = 0;
2438         ring_group->total_packets = 0;
2439         ring_group->coal.flow_level = new_flow_level;
2440         if (new_int_gl != ring_group->coal.int_gl) {
2441                 ring_group->coal.int_gl = new_int_gl;
2442                 return true;
2443         }
2444         return false;
2445 }
2446
2447 static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
2448 {
2449         struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
2450         struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
2451         bool rx_update, tx_update;
2452
2453         if (tqp_vector->int_adapt_down > 0) {
2454                 tqp_vector->int_adapt_down--;
2455                 return;
2456         }
2457
2458         if (rx_group->coal.gl_adapt_enable) {
2459                 rx_update = hns3_get_new_int_gl(rx_group);
2460                 if (rx_update)
2461                         hns3_set_vector_coalesce_rx_gl(tqp_vector,
2462                                                        rx_group->coal.int_gl);
2463         }
2464
2465         if (tx_group->coal.gl_adapt_enable) {
2466                 tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group);
2467                 if (tx_update)
2468                         hns3_set_vector_coalesce_tx_gl(tqp_vector,
2469                                                        tx_group->coal.int_gl);
2470         }
2471
2472         tqp_vector->last_jiffies = jiffies;
2473         tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START;
2474 }
2475
2476 static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
2477 {
2478         struct hns3_enet_ring *ring;
2479         int rx_pkt_total = 0;
2480
2481         struct hns3_enet_tqp_vector *tqp_vector =
2482                 container_of(napi, struct hns3_enet_tqp_vector, napi);
2483         bool clean_complete = true;
2484         int rx_budget;
2485
2486         /* Since the actual Tx work is minimal, we can give the Tx a larger
2487          * budget and be more aggressive about cleaning up the Tx descriptors.
2488          */
2489         hns3_for_each_ring(ring, tqp_vector->tx_group) {
2490                 if (!hns3_clean_tx_ring(ring, budget))
2491                         clean_complete = false;
2492         }
2493
2494         /* make sure rx ring budget not smaller than 1 */
2495         rx_budget = max(budget / tqp_vector->num_tqps, 1);
2496
2497         hns3_for_each_ring(ring, tqp_vector->rx_group) {
2498                 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
2499                                                     hns3_rx_skb);
2500
2501                 if (rx_cleaned >= rx_budget)
2502                         clean_complete = false;
2503
2504                 rx_pkt_total += rx_cleaned;
2505         }
2506
2507         tqp_vector->rx_group.total_packets += rx_pkt_total;
2508
2509         if (!clean_complete)
2510                 return budget;
2511
2512         napi_complete(napi);
2513         hns3_update_new_int_gl(tqp_vector);
2514         hns3_mask_vector_irq(tqp_vector, 1);
2515
2516         return rx_pkt_total;
2517 }
2518
2519 static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2520                                       struct hnae3_ring_chain_node *head)
2521 {
2522         struct pci_dev *pdev = tqp_vector->handle->pdev;
2523         struct hnae3_ring_chain_node *cur_chain = head;
2524         struct hnae3_ring_chain_node *chain;
2525         struct hns3_enet_ring *tx_ring;
2526         struct hns3_enet_ring *rx_ring;
2527
2528         tx_ring = tqp_vector->tx_group.ring;
2529         if (tx_ring) {
2530                 cur_chain->tqp_index = tx_ring->tqp->tqp_index;
2531                 hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2532                              HNAE3_RING_TYPE_TX);
2533                 hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2534                                HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
2535
2536                 cur_chain->next = NULL;
2537
2538                 while (tx_ring->next) {
2539                         tx_ring = tx_ring->next;
2540
2541                         chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
2542                                              GFP_KERNEL);
2543                         if (!chain)
2544                                 return -ENOMEM;
2545
2546                         cur_chain->next = chain;
2547                         chain->tqp_index = tx_ring->tqp->tqp_index;
2548                         hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2549                                      HNAE3_RING_TYPE_TX);
2550                         hnae_set_field(chain->int_gl_idx,
2551                                        HNAE3_RING_GL_IDX_M,
2552                                        HNAE3_RING_GL_IDX_S,
2553                                        HNAE3_RING_GL_TX);
2554
2555                         cur_chain = chain;
2556                 }
2557         }
2558
2559         rx_ring = tqp_vector->rx_group.ring;
2560         if (!tx_ring && rx_ring) {
2561                 cur_chain->next = NULL;
2562                 cur_chain->tqp_index = rx_ring->tqp->tqp_index;
2563                 hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2564                              HNAE3_RING_TYPE_RX);
2565                 hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2566                                HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
2567
2568                 rx_ring = rx_ring->next;
2569         }
2570
2571         while (rx_ring) {
2572                 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
2573                 if (!chain)
2574                         return -ENOMEM;
2575
2576                 cur_chain->next = chain;
2577                 chain->tqp_index = rx_ring->tqp->tqp_index;
2578                 hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2579                              HNAE3_RING_TYPE_RX);
2580                 hnae_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2581                                HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
2582
2583                 cur_chain = chain;
2584
2585                 rx_ring = rx_ring->next;
2586         }
2587
2588         return 0;
2589 }
2590
2591 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2592                                         struct hnae3_ring_chain_node *head)
2593 {
2594         struct pci_dev *pdev = tqp_vector->handle->pdev;
2595         struct hnae3_ring_chain_node *chain_tmp, *chain;
2596
2597         chain = head->next;
2598
2599         while (chain) {
2600                 chain_tmp = chain->next;
2601                 devm_kfree(&pdev->dev, chain);
2602                 chain = chain_tmp;
2603         }
2604 }
2605
2606 static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
2607                                    struct hns3_enet_ring *ring)
2608 {
2609         ring->next = group->ring;
2610         group->ring = ring;
2611
2612         group->count++;
2613 }
2614
2615 static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
2616 {
2617         struct hnae3_ring_chain_node vector_ring_chain;
2618         struct hnae3_handle *h = priv->ae_handle;
2619         struct hns3_enet_tqp_vector *tqp_vector;
2620         int ret = 0;
2621         u16 i;
2622
2623         for (i = 0; i < priv->vector_num; i++) {
2624                 tqp_vector = &priv->tqp_vector[i];
2625                 hns3_vector_gl_rl_init_hw(tqp_vector, priv);
2626                 tqp_vector->num_tqps = 0;
2627         }
2628
2629         for (i = 0; i < h->kinfo.num_tqps; i++) {
2630                 u16 vector_i = i % priv->vector_num;
2631                 u16 tqp_num = h->kinfo.num_tqps;
2632
2633                 tqp_vector = &priv->tqp_vector[vector_i];
2634
2635                 hns3_add_ring_to_group(&tqp_vector->tx_group,
2636                                        priv->ring_data[i].ring);
2637
2638                 hns3_add_ring_to_group(&tqp_vector->rx_group,
2639                                        priv->ring_data[i + tqp_num].ring);
2640
2641                 priv->ring_data[i].ring->tqp_vector = tqp_vector;
2642                 priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
2643                 tqp_vector->num_tqps++;
2644         }
2645
2646         for (i = 0; i < priv->vector_num; i++) {
2647                 tqp_vector = &priv->tqp_vector[i];
2648
2649                 tqp_vector->rx_group.total_bytes = 0;
2650                 tqp_vector->rx_group.total_packets = 0;
2651                 tqp_vector->tx_group.total_bytes = 0;
2652                 tqp_vector->tx_group.total_packets = 0;
2653                 tqp_vector->handle = h;
2654
2655                 ret = hns3_get_vector_ring_chain(tqp_vector,
2656                                                  &vector_ring_chain);
2657                 if (ret)
2658                         return ret;
2659
2660                 ret = h->ae_algo->ops->map_ring_to_vector(h,
2661                         tqp_vector->vector_irq, &vector_ring_chain);
2662
2663                 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2664
2665                 if (ret)
2666                         return ret;
2667
2668                 netif_napi_add(priv->netdev, &tqp_vector->napi,
2669                                hns3_nic_common_poll, NAPI_POLL_WEIGHT);
2670         }
2671
2672         return 0;
2673 }
2674
2675 static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
2676 {
2677         struct hnae3_handle *h = priv->ae_handle;
2678         struct hns3_enet_tqp_vector *tqp_vector;
2679         struct hnae3_vector_info *vector;
2680         struct pci_dev *pdev = h->pdev;
2681         u16 tqp_num = h->kinfo.num_tqps;
2682         u16 vector_num;
2683         int ret = 0;
2684         u16 i;
2685
2686         /* RSS size, cpu online and vector_num should be the same */
2687         /* Should consider 2p/4p later */
2688         vector_num = min_t(u16, num_online_cpus(), tqp_num);
2689         vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
2690                               GFP_KERNEL);
2691         if (!vector)
2692                 return -ENOMEM;
2693
2694         vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
2695
2696         priv->vector_num = vector_num;
2697         priv->tqp_vector = (struct hns3_enet_tqp_vector *)
2698                 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
2699                              GFP_KERNEL);
2700         if (!priv->tqp_vector) {
2701                 ret = -ENOMEM;
2702                 goto out;
2703         }
2704
2705         for (i = 0; i < priv->vector_num; i++) {
2706                 tqp_vector = &priv->tqp_vector[i];
2707                 tqp_vector->idx = i;
2708                 tqp_vector->mask_addr = vector[i].io_addr;
2709                 tqp_vector->vector_irq = vector[i].vector;
2710                 hns3_vector_gl_rl_init(tqp_vector, priv);
2711         }
2712
2713 out:
2714         devm_kfree(&pdev->dev, vector);
2715         return ret;
2716 }
2717
2718 static void hns3_clear_ring_group(struct hns3_enet_ring_group *group)
2719 {
2720         group->ring = NULL;
2721         group->count = 0;
2722 }
2723
2724 static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
2725 {
2726         struct hnae3_ring_chain_node vector_ring_chain;
2727         struct hnae3_handle *h = priv->ae_handle;
2728         struct hns3_enet_tqp_vector *tqp_vector;
2729         int i, ret;
2730
2731         for (i = 0; i < priv->vector_num; i++) {
2732                 tqp_vector = &priv->tqp_vector[i];
2733
2734                 ret = hns3_get_vector_ring_chain(tqp_vector,
2735                                                  &vector_ring_chain);
2736                 if (ret)
2737                         return ret;
2738
2739                 ret = h->ae_algo->ops->unmap_ring_from_vector(h,
2740                         tqp_vector->vector_irq, &vector_ring_chain);
2741                 if (ret)
2742                         return ret;
2743
2744                 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
2745                 if (ret)
2746                         return ret;
2747
2748                 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2749
2750                 if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
2751                         (void)irq_set_affinity_hint(
2752                                 priv->tqp_vector[i].vector_irq,
2753                                                     NULL);
2754                         free_irq(priv->tqp_vector[i].vector_irq,
2755                                  &priv->tqp_vector[i]);
2756                 }
2757
2758                 priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
2759                 hns3_clear_ring_group(&tqp_vector->rx_group);
2760                 hns3_clear_ring_group(&tqp_vector->tx_group);
2761                 netif_napi_del(&priv->tqp_vector[i].napi);
2762         }
2763
2764         return 0;
2765 }
2766
2767 static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
2768 {
2769         struct hnae3_handle *h = priv->ae_handle;
2770         struct pci_dev *pdev = h->pdev;
2771         int i, ret;
2772
2773         for (i = 0; i < priv->vector_num; i++) {
2774                 struct hns3_enet_tqp_vector *tqp_vector;
2775
2776                 tqp_vector = &priv->tqp_vector[i];
2777                 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
2778                 if (ret)
2779                         return ret;
2780         }
2781
2782         devm_kfree(&pdev->dev, priv->tqp_vector);
2783         return 0;
2784 }
2785
2786 static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
2787                              int ring_type)
2788 {
2789         struct hns3_nic_ring_data *ring_data = priv->ring_data;
2790         int queue_num = priv->ae_handle->kinfo.num_tqps;
2791         struct pci_dev *pdev = priv->ae_handle->pdev;
2792         struct hns3_enet_ring *ring;
2793
2794         ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
2795         if (!ring)
2796                 return -ENOMEM;
2797
2798         if (ring_type == HNAE3_RING_TYPE_TX) {
2799                 ring_data[q->tqp_index].ring = ring;
2800                 ring_data[q->tqp_index].queue_index = q->tqp_index;
2801                 ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
2802         } else {
2803                 ring_data[q->tqp_index + queue_num].ring = ring;
2804                 ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index;
2805                 ring->io_base = q->io_base;
2806         }
2807
2808         hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
2809
2810         ring->tqp = q;
2811         ring->desc = NULL;
2812         ring->desc_cb = NULL;
2813         ring->dev = priv->dev;
2814         ring->desc_dma_addr = 0;
2815         ring->buf_size = q->buf_size;
2816         ring->desc_num = q->desc_num;
2817         ring->next_to_use = 0;
2818         ring->next_to_clean = 0;
2819
2820         return 0;
2821 }
2822
2823 static int hns3_queue_to_ring(struct hnae3_queue *tqp,
2824                               struct hns3_nic_priv *priv)
2825 {
2826         int ret;
2827
2828         ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
2829         if (ret)
2830                 return ret;
2831
2832         ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
2833         if (ret)
2834                 return ret;
2835
2836         return 0;
2837 }
2838
2839 static int hns3_get_ring_config(struct hns3_nic_priv *priv)
2840 {
2841         struct hnae3_handle *h = priv->ae_handle;
2842         struct pci_dev *pdev = h->pdev;
2843         int i, ret;
2844
2845         priv->ring_data =  devm_kzalloc(&pdev->dev, h->kinfo.num_tqps *
2846                                         sizeof(*priv->ring_data) * 2,
2847                                         GFP_KERNEL);
2848         if (!priv->ring_data)
2849                 return -ENOMEM;
2850
2851         for (i = 0; i < h->kinfo.num_tqps; i++) {
2852                 ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
2853                 if (ret)
2854                         goto err;
2855         }
2856
2857         return 0;
2858 err:
2859         devm_kfree(&pdev->dev, priv->ring_data);
2860         return ret;
2861 }
2862
2863 static void hns3_put_ring_config(struct hns3_nic_priv *priv)
2864 {
2865         struct hnae3_handle *h = priv->ae_handle;
2866         int i;
2867
2868         for (i = 0; i < h->kinfo.num_tqps; i++) {
2869                 devm_kfree(priv->dev, priv->ring_data[i].ring);
2870                 devm_kfree(priv->dev,
2871                            priv->ring_data[i + h->kinfo.num_tqps].ring);
2872         }
2873         devm_kfree(priv->dev, priv->ring_data);
2874 }
2875
2876 static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
2877 {
2878         int ret;
2879
2880         if (ring->desc_num <= 0 || ring->buf_size <= 0)
2881                 return -EINVAL;
2882
2883         ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
2884                                 GFP_KERNEL);
2885         if (!ring->desc_cb) {
2886                 ret = -ENOMEM;
2887                 goto out;
2888         }
2889
2890         ret = hns3_alloc_desc(ring);
2891         if (ret)
2892                 goto out_with_desc_cb;
2893
2894         if (!HNAE3_IS_TX_RING(ring)) {
2895                 ret = hns3_alloc_ring_buffers(ring);
2896                 if (ret)
2897                         goto out_with_desc;
2898         }
2899
2900         return 0;
2901
2902 out_with_desc:
2903         hns3_free_desc(ring);
2904 out_with_desc_cb:
2905         kfree(ring->desc_cb);
2906         ring->desc_cb = NULL;
2907 out:
2908         return ret;
2909 }
2910
2911 static void hns3_fini_ring(struct hns3_enet_ring *ring)
2912 {
2913         hns3_free_desc(ring);
2914         kfree(ring->desc_cb);
2915         ring->desc_cb = NULL;
2916         ring->next_to_clean = 0;
2917         ring->next_to_use = 0;
2918 }
2919
2920 static int hns3_buf_size2type(u32 buf_size)
2921 {
2922         int bd_size_type;
2923
2924         switch (buf_size) {
2925         case 512:
2926                 bd_size_type = HNS3_BD_SIZE_512_TYPE;
2927                 break;
2928         case 1024:
2929                 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
2930                 break;
2931         case 2048:
2932                 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2933                 break;
2934         case 4096:
2935                 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
2936                 break;
2937         default:
2938                 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2939         }
2940
2941         return bd_size_type;
2942 }
2943
2944 static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
2945 {
2946         dma_addr_t dma = ring->desc_dma_addr;
2947         struct hnae3_queue *q = ring->tqp;
2948
2949         if (!HNAE3_IS_TX_RING(ring)) {
2950                 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
2951                                (u32)dma);
2952                 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
2953                                (u32)((dma >> 31) >> 1));
2954
2955                 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
2956                                hns3_buf_size2type(ring->buf_size));
2957                 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
2958                                ring->desc_num / 8 - 1);
2959
2960         } else {
2961                 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
2962                                (u32)dma);
2963                 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
2964                                (u32)((dma >> 31) >> 1));
2965
2966                 hns3_write_dev(q, HNS3_RING_TX_RING_BD_LEN_REG,
2967                                hns3_buf_size2type(ring->buf_size));
2968                 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
2969                                ring->desc_num / 8 - 1);
2970         }
2971 }
2972
2973 int hns3_init_all_ring(struct hns3_nic_priv *priv)
2974 {
2975         struct hnae3_handle *h = priv->ae_handle;
2976         int ring_num = h->kinfo.num_tqps * 2;
2977         int i, j;
2978         int ret;
2979
2980         for (i = 0; i < ring_num; i++) {
2981                 ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
2982                 if (ret) {
2983                         dev_err(priv->dev,
2984                                 "Alloc ring memory fail! ret=%d\n", ret);
2985                         goto out_when_alloc_ring_memory;
2986                 }
2987
2988                 u64_stats_init(&priv->ring_data[i].ring->syncp);
2989         }
2990
2991         return 0;
2992
2993 out_when_alloc_ring_memory:
2994         for (j = i - 1; j >= 0; j--)
2995                 hns3_fini_ring(priv->ring_data[j].ring);
2996
2997         return -ENOMEM;
2998 }
2999
3000 int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
3001 {
3002         struct hnae3_handle *h = priv->ae_handle;
3003         int i;
3004
3005         for (i = 0; i < h->kinfo.num_tqps; i++) {
3006                 if (h->ae_algo->ops->reset_queue)
3007                         h->ae_algo->ops->reset_queue(h, i);
3008
3009                 hns3_fini_ring(priv->ring_data[i].ring);
3010                 hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
3011         }
3012         return 0;
3013 }
3014
3015 /* Set mac addr if it is configured. or leave it to the AE driver */
3016 static void hns3_init_mac_addr(struct net_device *netdev, bool init)
3017 {
3018         struct hns3_nic_priv *priv = netdev_priv(netdev);
3019         struct hnae3_handle *h = priv->ae_handle;
3020         u8 mac_addr_temp[ETH_ALEN];
3021
3022         if (h->ae_algo->ops->get_mac_addr && init) {
3023                 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
3024                 ether_addr_copy(netdev->dev_addr, mac_addr_temp);
3025         }
3026
3027         /* Check if the MAC address is valid, if not get a random one */
3028         if (!is_valid_ether_addr(netdev->dev_addr)) {
3029                 eth_hw_addr_random(netdev);
3030                 dev_warn(priv->dev, "using random MAC address %pM\n",
3031                          netdev->dev_addr);
3032         }
3033
3034         if (h->ae_algo->ops->set_mac_addr)
3035                 h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true);
3036
3037 }
3038
3039 static void hns3_uninit_mac_addr(struct net_device *netdev)
3040 {
3041         struct hns3_nic_priv *priv = netdev_priv(netdev);
3042         struct hnae3_handle *h = priv->ae_handle;
3043
3044         if (h->ae_algo->ops->rm_uc_addr)
3045                 h->ae_algo->ops->rm_uc_addr(h, netdev->dev_addr);
3046 }
3047
3048 static void hns3_nic_set_priv_ops(struct net_device *netdev)
3049 {
3050         struct hns3_nic_priv *priv = netdev_priv(netdev);
3051
3052         if ((netdev->features & NETIF_F_TSO) ||
3053             (netdev->features & NETIF_F_TSO6)) {
3054                 priv->ops.fill_desc = hns3_fill_desc_tso;
3055                 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
3056         } else {
3057                 priv->ops.fill_desc = hns3_fill_desc;
3058                 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
3059         }
3060 }
3061
3062 static int hns3_client_init(struct hnae3_handle *handle)
3063 {
3064         struct pci_dev *pdev = handle->pdev;
3065         struct hns3_nic_priv *priv;
3066         struct net_device *netdev;
3067         int ret;
3068
3069         netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv),
3070                                    hns3_get_max_available_channels(handle));
3071         if (!netdev)
3072                 return -ENOMEM;
3073
3074         priv = netdev_priv(netdev);
3075         priv->dev = &pdev->dev;
3076         priv->netdev = netdev;
3077         priv->ae_handle = handle;
3078         priv->ae_handle->reset_level = HNAE3_NONE_RESET;
3079         priv->ae_handle->last_reset_time = jiffies;
3080         priv->tx_timeout_count = 0;
3081
3082         handle->kinfo.netdev = netdev;
3083         handle->priv = (void *)priv;
3084
3085         hns3_init_mac_addr(netdev, true);
3086
3087         hns3_set_default_feature(netdev);
3088
3089         netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
3090         netdev->priv_flags |= IFF_UNICAST_FLT;
3091         netdev->netdev_ops = &hns3_nic_netdev_ops;
3092         SET_NETDEV_DEV(netdev, &pdev->dev);
3093         hns3_ethtool_set_ops(netdev);
3094         hns3_nic_set_priv_ops(netdev);
3095
3096         /* Carrier off reporting is important to ethtool even BEFORE open */
3097         netif_carrier_off(netdev);
3098
3099         ret = hns3_get_ring_config(priv);
3100         if (ret) {
3101                 ret = -ENOMEM;
3102                 goto out_get_ring_cfg;
3103         }
3104
3105         ret = hns3_nic_alloc_vector_data(priv);
3106         if (ret) {
3107                 ret = -ENOMEM;
3108                 goto out_alloc_vector_data;
3109         }
3110
3111         ret = hns3_nic_init_vector_data(priv);
3112         if (ret) {
3113                 ret = -ENOMEM;
3114                 goto out_init_vector_data;
3115         }
3116
3117         ret = hns3_init_all_ring(priv);
3118         if (ret) {
3119                 ret = -ENOMEM;
3120                 goto out_init_ring_data;
3121         }
3122
3123         ret = register_netdev(netdev);
3124         if (ret) {
3125                 dev_err(priv->dev, "probe register netdev fail!\n");
3126                 goto out_reg_netdev_fail;
3127         }
3128
3129         hns3_dcbnl_setup(handle);
3130
3131         /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
3132         netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
3133
3134         return ret;
3135
3136 out_reg_netdev_fail:
3137 out_init_ring_data:
3138         (void)hns3_nic_uninit_vector_data(priv);
3139 out_init_vector_data:
3140         hns3_nic_dealloc_vector_data(priv);
3141 out_alloc_vector_data:
3142         priv->ring_data = NULL;
3143 out_get_ring_cfg:
3144         priv->ae_handle = NULL;
3145         free_netdev(netdev);
3146         return ret;
3147 }
3148
3149 static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
3150 {
3151         struct net_device *netdev = handle->kinfo.netdev;
3152         struct hns3_nic_priv *priv = netdev_priv(netdev);
3153         int ret;
3154
3155         if (netdev->reg_state != NETREG_UNINITIALIZED)
3156                 unregister_netdev(netdev);
3157
3158         hns3_force_clear_all_rx_ring(handle);
3159
3160         ret = hns3_nic_uninit_vector_data(priv);
3161         if (ret)
3162                 netdev_err(netdev, "uninit vector error\n");
3163
3164         ret = hns3_nic_dealloc_vector_data(priv);
3165         if (ret)
3166                 netdev_err(netdev, "dealloc vector error\n");
3167
3168         ret = hns3_uninit_all_ring(priv);
3169         if (ret)
3170                 netdev_err(netdev, "uninit ring error\n");
3171
3172         hns3_put_ring_config(priv);
3173
3174         priv->ring_data = NULL;
3175
3176         hns3_uninit_mac_addr(netdev);
3177
3178         free_netdev(netdev);
3179 }
3180
3181 static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
3182 {
3183         struct net_device *netdev = handle->kinfo.netdev;
3184
3185         if (!netdev)
3186                 return;
3187
3188         if (linkup) {
3189                 netif_carrier_on(netdev);
3190                 netif_tx_wake_all_queues(netdev);
3191                 netdev_info(netdev, "link up\n");
3192         } else {
3193                 netif_carrier_off(netdev);
3194                 netif_tx_stop_all_queues(netdev);
3195                 netdev_info(netdev, "link down\n");
3196         }
3197 }
3198
3199 static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
3200 {
3201         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3202         struct net_device *ndev = kinfo->netdev;
3203         bool if_running;
3204         int ret;
3205         u8 i;
3206
3207         if (tc > HNAE3_MAX_TC)
3208                 return -EINVAL;
3209
3210         if (!ndev)
3211                 return -ENODEV;
3212
3213         if_running = netif_running(ndev);
3214
3215         ret = netdev_set_num_tc(ndev, tc);
3216         if (ret)
3217                 return ret;
3218
3219         if (if_running) {
3220                 (void)hns3_nic_net_stop(ndev);
3221                 msleep(100);
3222         }
3223
3224         ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ?
3225                 kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP;
3226         if (ret)
3227                 goto err_out;
3228
3229         if (tc <= 1) {
3230                 netdev_reset_tc(ndev);
3231                 goto out;
3232         }
3233
3234         for (i = 0; i < HNAE3_MAX_TC; i++) {
3235                 struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
3236
3237                 if (tc_info->enable)
3238                         netdev_set_tc_queue(ndev,
3239                                             tc_info->tc,
3240                                             tc_info->tqp_count,
3241                                             tc_info->tqp_offset);
3242         }
3243
3244         for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
3245                 netdev_set_prio_tc_map(ndev, i,
3246                                        kinfo->prio_tc[i]);
3247         }
3248
3249 out:
3250         ret = hns3_nic_set_real_num_queue(ndev);
3251
3252 err_out:
3253         if (if_running)
3254                 (void)hns3_nic_net_open(ndev);
3255
3256         return ret;
3257 }
3258
3259 static void hns3_recover_hw_addr(struct net_device *ndev)
3260 {
3261         struct netdev_hw_addr_list *list;
3262         struct netdev_hw_addr *ha, *tmp;
3263
3264         /* go through and sync uc_addr entries to the device */
3265         list = &ndev->uc;
3266         list_for_each_entry_safe(ha, tmp, &list->list, list)
3267                 hns3_nic_uc_sync(ndev, ha->addr);
3268
3269         /* go through and sync mc_addr entries to the device */
3270         list = &ndev->mc;
3271         list_for_each_entry_safe(ha, tmp, &list->list, list)
3272                 hns3_nic_mc_sync(ndev, ha->addr);
3273 }
3274
3275 static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
3276 {
3277         while (ring->next_to_clean != ring->next_to_use) {
3278                 ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0;
3279                 hns3_free_buffer_detach(ring, ring->next_to_clean);
3280                 ring_ptr_move_fw(ring, next_to_clean);
3281         }
3282 }
3283
3284 static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
3285 {
3286         struct hns3_desc_cb res_cbs;
3287         int ret;
3288
3289         while (ring->next_to_use != ring->next_to_clean) {
3290                 /* When a buffer is not reused, it's memory has been
3291                  * freed in hns3_handle_rx_bd or will be freed by
3292                  * stack, so we need to replace the buffer here.
3293                  */
3294                 if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
3295                         ret = hns3_reserve_buffer_map(ring, &res_cbs);
3296                         if (ret) {
3297                                 u64_stats_update_begin(&ring->syncp);
3298                                 ring->stats.sw_err_cnt++;
3299                                 u64_stats_update_end(&ring->syncp);
3300                                 /* if alloc new buffer fail, exit directly
3301                                  * and reclear in up flow.
3302                                  */
3303                                 netdev_warn(ring->tqp->handle->kinfo.netdev,
3304                                             "reserve buffer map failed, ret = %d\n",
3305                                             ret);
3306                                 return ret;
3307                         }
3308                         hns3_replace_buffer(ring, ring->next_to_use,
3309                                             &res_cbs);
3310                 }
3311                 ring_ptr_move_fw(ring, next_to_use);
3312         }
3313
3314         return 0;
3315 }
3316
3317 static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
3318 {
3319         while (ring->next_to_use != ring->next_to_clean) {
3320                 /* When a buffer is not reused, it's memory has been
3321                  * freed in hns3_handle_rx_bd or will be freed by
3322                  * stack, so only need to unmap the buffer here.
3323                  */
3324                 if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
3325                         hns3_unmap_buffer(ring,
3326                                           &ring->desc_cb[ring->next_to_use]);
3327                         ring->desc_cb[ring->next_to_use].dma = 0;
3328                 }
3329
3330                 ring_ptr_move_fw(ring, next_to_use);
3331         }
3332 }
3333
3334 static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h)
3335 {
3336         struct net_device *ndev = h->kinfo.netdev;
3337         struct hns3_nic_priv *priv = netdev_priv(ndev);
3338         struct hns3_enet_ring *ring;
3339         u32 i;
3340
3341         for (i = 0; i < h->kinfo.num_tqps; i++) {
3342                 ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
3343                 hns3_force_clear_rx_ring(ring);
3344         }
3345 }
3346
3347 static void hns3_clear_all_ring(struct hnae3_handle *h)
3348 {
3349         struct net_device *ndev = h->kinfo.netdev;
3350         struct hns3_nic_priv *priv = netdev_priv(ndev);
3351         u32 i;
3352
3353         for (i = 0; i < h->kinfo.num_tqps; i++) {
3354                 struct netdev_queue *dev_queue;
3355                 struct hns3_enet_ring *ring;
3356
3357                 ring = priv->ring_data[i].ring;
3358                 hns3_clear_tx_ring(ring);
3359                 dev_queue = netdev_get_tx_queue(ndev,
3360                                                 priv->ring_data[i].queue_index);
3361                 netdev_tx_reset_queue(dev_queue);
3362
3363                 ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
3364                 /* Continue to clear other rings even if clearing some
3365                  * rings failed.
3366                  */
3367                 hns3_clear_rx_ring(ring);
3368         }
3369 }
3370
3371 int hns3_nic_reset_all_ring(struct hnae3_handle *h)
3372 {
3373         struct net_device *ndev = h->kinfo.netdev;
3374         struct hns3_nic_priv *priv = netdev_priv(ndev);
3375         struct hns3_enet_ring *rx_ring;
3376         int i, j;
3377         int ret;
3378
3379         for (i = 0; i < h->kinfo.num_tqps; i++) {
3380                 h->ae_algo->ops->reset_queue(h, i);
3381                 hns3_init_ring_hw(priv->ring_data[i].ring);
3382
3383                 /* We need to clear tx ring here because self test will
3384                  * use the ring and will not run down before up
3385                  */
3386                 hns3_clear_tx_ring(priv->ring_data[i].ring);
3387                 priv->ring_data[i].ring->next_to_clean = 0;
3388                 priv->ring_data[i].ring->next_to_use = 0;
3389
3390                 rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
3391                 hns3_init_ring_hw(rx_ring);
3392                 ret = hns3_clear_rx_ring(rx_ring);
3393                 if (ret)
3394                         return ret;
3395
3396                 /* We can not know the hardware head and tail when this
3397                  * function is called in reset flow, so we reuse all desc.
3398                  */
3399                 for (j = 0; j < rx_ring->desc_num; j++)
3400                         hns3_reuse_buffer(rx_ring, j);
3401
3402                 rx_ring->next_to_clean = 0;
3403                 rx_ring->next_to_use = 0;
3404         }
3405
3406         return 0;
3407 }
3408
3409 static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
3410 {
3411         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3412         struct net_device *ndev = kinfo->netdev;
3413
3414         if (!netif_running(ndev))
3415                 return -EIO;
3416
3417         return hns3_nic_net_stop(ndev);
3418 }
3419
3420 static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
3421 {
3422         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3423         int ret = 0;
3424
3425         if (netif_running(kinfo->netdev)) {
3426                 ret = hns3_nic_net_up(kinfo->netdev);
3427                 if (ret) {
3428                         netdev_err(kinfo->netdev,
3429                                    "hns net up fail, ret=%d!\n", ret);
3430                         return ret;
3431                 }
3432                 handle->last_reset_time = jiffies;
3433         }
3434
3435         return ret;
3436 }
3437
3438 static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
3439 {
3440         struct net_device *netdev = handle->kinfo.netdev;
3441         struct hns3_nic_priv *priv = netdev_priv(netdev);
3442         int ret;
3443
3444         hns3_init_mac_addr(netdev, false);
3445         hns3_nic_set_rx_mode(netdev);
3446         hns3_recover_hw_addr(netdev);
3447
3448         /* Hardware table is only clear when pf resets */
3449         if (!(handle->flags & HNAE3_SUPPORT_VF))
3450                 hns3_restore_vlan(netdev);
3451
3452         /* Carrier off reporting is important to ethtool even BEFORE open */
3453         netif_carrier_off(netdev);
3454
3455         ret = hns3_get_ring_config(priv);
3456         if (ret)
3457                 return ret;
3458
3459         ret = hns3_nic_init_vector_data(priv);
3460         if (ret)
3461                 return ret;
3462
3463         ret = hns3_init_all_ring(priv);
3464         if (ret) {
3465                 hns3_nic_uninit_vector_data(priv);
3466                 priv->ring_data = NULL;
3467         }
3468
3469         return ret;
3470 }
3471
3472 static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
3473 {
3474         struct net_device *netdev = handle->kinfo.netdev;
3475         struct hns3_nic_priv *priv = netdev_priv(netdev);
3476         int ret;
3477
3478         hns3_force_clear_all_rx_ring(handle);
3479
3480         ret = hns3_nic_uninit_vector_data(priv);
3481         if (ret) {
3482                 netdev_err(netdev, "uninit vector error\n");
3483                 return ret;
3484         }
3485
3486         ret = hns3_uninit_all_ring(priv);
3487         if (ret)
3488                 netdev_err(netdev, "uninit ring error\n");
3489
3490         hns3_put_ring_config(priv);
3491
3492         priv->ring_data = NULL;
3493
3494         hns3_uninit_mac_addr(netdev);
3495
3496         return ret;
3497 }
3498
3499 static int hns3_reset_notify(struct hnae3_handle *handle,
3500                              enum hnae3_reset_notify_type type)
3501 {
3502         int ret = 0;
3503
3504         switch (type) {
3505         case HNAE3_UP_CLIENT:
3506                 ret = hns3_reset_notify_up_enet(handle);
3507                 break;
3508         case HNAE3_DOWN_CLIENT:
3509                 ret = hns3_reset_notify_down_enet(handle);
3510                 break;
3511         case HNAE3_INIT_CLIENT:
3512                 ret = hns3_reset_notify_init_enet(handle);
3513                 break;
3514         case HNAE3_UNINIT_CLIENT:
3515                 ret = hns3_reset_notify_uninit_enet(handle);
3516                 break;
3517         default:
3518                 break;
3519         }
3520
3521         return ret;
3522 }
3523
3524 static void hns3_restore_coal(struct hns3_nic_priv *priv,
3525                               struct hns3_enet_coalesce *tx,
3526                               struct hns3_enet_coalesce *rx)
3527 {
3528         u16 vector_num = priv->vector_num;
3529         int i;
3530
3531         for (i = 0; i < vector_num; i++) {
3532                 memcpy(&priv->tqp_vector[i].tx_group.coal, tx,
3533                        sizeof(struct hns3_enet_coalesce));
3534                 memcpy(&priv->tqp_vector[i].rx_group.coal, rx,
3535                        sizeof(struct hns3_enet_coalesce));
3536         }
3537 }
3538
3539 static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num,
3540                                struct hns3_enet_coalesce *tx,
3541                                struct hns3_enet_coalesce *rx)
3542 {
3543         struct hns3_nic_priv *priv = netdev_priv(netdev);
3544         struct hnae3_handle *h = hns3_get_handle(netdev);
3545         int ret;
3546
3547         ret = h->ae_algo->ops->set_channels(h, new_tqp_num);
3548         if (ret)
3549                 return ret;
3550
3551         ret = hns3_get_ring_config(priv);
3552         if (ret)
3553                 return ret;
3554
3555         ret = hns3_nic_alloc_vector_data(priv);
3556         if (ret)
3557                 goto err_alloc_vector;
3558
3559         hns3_restore_coal(priv, tx, rx);
3560
3561         ret = hns3_nic_init_vector_data(priv);
3562         if (ret)
3563                 goto err_uninit_vector;
3564
3565         ret = hns3_init_all_ring(priv);
3566         if (ret)
3567                 goto err_put_ring;
3568
3569         return 0;
3570
3571 err_put_ring:
3572         hns3_put_ring_config(priv);
3573 err_uninit_vector:
3574         hns3_nic_uninit_vector_data(priv);
3575 err_alloc_vector:
3576         hns3_nic_dealloc_vector_data(priv);
3577         return ret;
3578 }
3579
3580 static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num)
3581 {
3582         return (new_tqp_num / num_tc) * num_tc;
3583 }
3584
3585 int hns3_set_channels(struct net_device *netdev,
3586                       struct ethtool_channels *ch)
3587 {
3588         struct hns3_nic_priv *priv = netdev_priv(netdev);
3589         struct hnae3_handle *h = hns3_get_handle(netdev);
3590         struct hnae3_knic_private_info *kinfo = &h->kinfo;
3591         struct hns3_enet_coalesce tx_coal, rx_coal;
3592         bool if_running = netif_running(netdev);
3593         u32 new_tqp_num = ch->combined_count;
3594         u16 org_tqp_num;
3595         int ret;
3596
3597         if (ch->rx_count || ch->tx_count)
3598                 return -EINVAL;
3599
3600         if (new_tqp_num > hns3_get_max_available_channels(h) ||
3601             new_tqp_num < kinfo->num_tc) {
3602                 dev_err(&netdev->dev,
3603                         "Change tqps fail, the tqp range is from %d to %d",
3604                         kinfo->num_tc,
3605                         hns3_get_max_available_channels(h));
3606                 return -EINVAL;
3607         }
3608
3609         new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num);
3610         if (kinfo->num_tqps == new_tqp_num)
3611                 return 0;
3612
3613         if (if_running)
3614                 hns3_nic_net_stop(netdev);
3615
3616         ret = hns3_nic_uninit_vector_data(priv);
3617         if (ret) {
3618                 dev_err(&netdev->dev,
3619                         "Unbind vector with tqp fail, nothing is changed");
3620                 goto open_netdev;
3621         }
3622
3623         /* Changing the tqp num may also change the vector num,
3624          * ethtool only support setting and querying one coal
3625          * configuation for now, so save the vector 0' coal
3626          * configuation here in order to restore it.
3627          */
3628         memcpy(&tx_coal, &priv->tqp_vector[0].tx_group.coal,
3629                sizeof(struct hns3_enet_coalesce));
3630         memcpy(&rx_coal, &priv->tqp_vector[0].rx_group.coal,
3631                sizeof(struct hns3_enet_coalesce));
3632
3633         hns3_nic_dealloc_vector_data(priv);
3634
3635         hns3_uninit_all_ring(priv);
3636         hns3_put_ring_config(priv);
3637
3638         org_tqp_num = h->kinfo.num_tqps;
3639         ret = hns3_modify_tqp_num(netdev, new_tqp_num, &tx_coal, &rx_coal);
3640         if (ret) {
3641                 ret = hns3_modify_tqp_num(netdev, org_tqp_num,
3642                                           &tx_coal, &rx_coal);
3643                 if (ret) {
3644                         /* If revert to old tqp failed, fatal error occurred */
3645                         dev_err(&netdev->dev,
3646                                 "Revert to old tqp num fail, ret=%d", ret);
3647                         return ret;
3648                 }
3649                 dev_info(&netdev->dev,
3650                          "Change tqp num fail, Revert to old tqp num");
3651         }
3652
3653 open_netdev:
3654         if (if_running)
3655                 hns3_nic_net_open(netdev);
3656
3657         return ret;
3658 }
3659
3660 static const struct hnae3_client_ops client_ops = {
3661         .init_instance = hns3_client_init,
3662         .uninit_instance = hns3_client_uninit,
3663         .link_status_change = hns3_link_status_change,
3664         .setup_tc = hns3_client_setup_tc,
3665         .reset_notify = hns3_reset_notify,
3666 };
3667
3668 /* hns3_init_module - Driver registration routine
3669  * hns3_init_module is the first routine called when the driver is
3670  * loaded. All it does is register with the PCI subsystem.
3671  */
3672 static int __init hns3_init_module(void)
3673 {
3674         int ret;
3675
3676         pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
3677         pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
3678
3679         client.type = HNAE3_CLIENT_KNIC;
3680         snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s",
3681                  hns3_driver_name);
3682
3683         client.ops = &client_ops;
3684
3685         INIT_LIST_HEAD(&client.node);
3686
3687         ret = hnae3_register_client(&client);
3688         if (ret)
3689                 return ret;
3690
3691         ret = pci_register_driver(&hns3_driver);
3692         if (ret)
3693                 hnae3_unregister_client(&client);
3694
3695         return ret;
3696 }
3697 module_init(hns3_init_module);
3698
3699 /* hns3_exit_module - Driver exit cleanup routine
3700  * hns3_exit_module is called just before the driver is removed
3701  * from memory.
3702  */
3703 static void __exit hns3_exit_module(void)
3704 {
3705         pci_unregister_driver(&hns3_driver);
3706         hnae3_unregister_client(&client);
3707 }
3708 module_exit(hns3_exit_module);
3709
3710 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
3711 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
3712 MODULE_LICENSE("GPL");
3713 MODULE_ALIAS("pci:hns-nic");
3714 MODULE_VERSION(HNS3_MOD_VERSION);