net: hns3: refactor hns3_get_new_int_gl function
[platform/kernel/linux-rpi.git] / drivers / net / ethernet / hisilicon / hns3 / hns3_enet.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/dma-mapping.h>
5 #include <linux/etherdevice.h>
6 #include <linux/interrupt.h>
7 #ifdef CONFIG_RFS_ACCEL
8 #include <linux/cpu_rmap.h>
9 #endif
10 #include <linux/if_vlan.h>
11 #include <linux/ip.h>
12 #include <linux/ipv6.h>
13 #include <linux/module.h>
14 #include <linux/pci.h>
15 #include <linux/aer.h>
16 #include <linux/skbuff.h>
17 #include <linux/sctp.h>
18 #include <linux/vermagic.h>
19 #include <net/gre.h>
20 #include <net/ip6_checksum.h>
21 #include <net/pkt_cls.h>
22 #include <net/tcp.h>
23 #include <net/vxlan.h>
24
25 #include "hnae3.h"
26 #include "hns3_enet.h"
27
28 #define hns3_set_field(origin, shift, val)      ((origin) |= ((val) << (shift)))
29 #define hns3_tx_bd_count(S)     DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
30
31 static void hns3_clear_all_ring(struct hnae3_handle *h);
32 static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
33 static void hns3_remove_hw_addr(struct net_device *netdev);
34
35 static const char hns3_driver_name[] = "hns3";
36 const char hns3_driver_version[] = VERMAGIC_STRING;
37 static const char hns3_driver_string[] =
38                         "Hisilicon Ethernet Network Driver for Hip08 Family";
39 static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
40 static struct hnae3_client client;
41
42 static int debug = -1;
43 module_param(debug, int, 0);
44 MODULE_PARM_DESC(debug, " Network interface message level setting");
45
46 #define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \
47                            NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
48
49 /* hns3_pci_tbl - PCI Device ID Table
50  *
51  * Last entry must be all 0s
52  *
53  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
54  *   Class, Class Mask, private data (not used) }
55  */
56 static const struct pci_device_id hns3_pci_tbl[] = {
57         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
58         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
59         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
60          HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
61         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
62          HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
63         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
64          HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
65         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
66          HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
67         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
68          HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
69         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
70         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF),
71          HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
72         /* required last entry */
73         {0, }
74 };
75 MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
76
77 static irqreturn_t hns3_irq_handle(int irq, void *vector)
78 {
79         struct hns3_enet_tqp_vector *tqp_vector = vector;
80
81         napi_schedule_irqoff(&tqp_vector->napi);
82
83         return IRQ_HANDLED;
84 }
85
86 static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
87 {
88         struct hns3_enet_tqp_vector *tqp_vectors;
89         unsigned int i;
90
91         for (i = 0; i < priv->vector_num; i++) {
92                 tqp_vectors = &priv->tqp_vector[i];
93
94                 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
95                         continue;
96
97                 /* clear the affinity mask */
98                 irq_set_affinity_hint(tqp_vectors->vector_irq, NULL);
99
100                 /* release the irq resource */
101                 free_irq(tqp_vectors->vector_irq, tqp_vectors);
102                 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
103         }
104 }
105
106 static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
107 {
108         struct hns3_enet_tqp_vector *tqp_vectors;
109         int txrx_int_idx = 0;
110         int rx_int_idx = 0;
111         int tx_int_idx = 0;
112         unsigned int i;
113         int ret;
114
115         for (i = 0; i < priv->vector_num; i++) {
116                 tqp_vectors = &priv->tqp_vector[i];
117
118                 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
119                         continue;
120
121                 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
122                         snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
123                                  "%s-%s-%d", priv->netdev->name, "TxRx",
124                                  txrx_int_idx++);
125                         txrx_int_idx++;
126                 } else if (tqp_vectors->rx_group.ring) {
127                         snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
128                                  "%s-%s-%d", priv->netdev->name, "Rx",
129                                  rx_int_idx++);
130                 } else if (tqp_vectors->tx_group.ring) {
131                         snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
132                                  "%s-%s-%d", priv->netdev->name, "Tx",
133                                  tx_int_idx++);
134                 } else {
135                         /* Skip this unused q_vector */
136                         continue;
137                 }
138
139                 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
140
141                 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
142                                   tqp_vectors->name,
143                                        tqp_vectors);
144                 if (ret) {
145                         netdev_err(priv->netdev, "request irq(%d) fail\n",
146                                    tqp_vectors->vector_irq);
147                         return ret;
148                 }
149
150                 irq_set_affinity_hint(tqp_vectors->vector_irq,
151                                       &tqp_vectors->affinity_mask);
152
153                 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
154         }
155
156         return 0;
157 }
158
159 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
160                                  u32 mask_en)
161 {
162         writel(mask_en, tqp_vector->mask_addr);
163 }
164
165 static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
166 {
167         napi_enable(&tqp_vector->napi);
168
169         /* enable vector */
170         hns3_mask_vector_irq(tqp_vector, 1);
171 }
172
173 static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
174 {
175         /* disable vector */
176         hns3_mask_vector_irq(tqp_vector, 0);
177
178         disable_irq(tqp_vector->vector_irq);
179         napi_disable(&tqp_vector->napi);
180 }
181
182 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
183                                  u32 rl_value)
184 {
185         u32 rl_reg = hns3_rl_usec_to_reg(rl_value);
186
187         /* this defines the configuration for RL (Interrupt Rate Limiter).
188          * Rl defines rate of interrupts i.e. number of interrupts-per-second
189          * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
190          */
191
192         if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable &&
193             !tqp_vector->rx_group.coal.gl_adapt_enable)
194                 /* According to the hardware, the range of rl_reg is
195                  * 0-59 and the unit is 4.
196                  */
197                 rl_reg |=  HNS3_INT_RL_ENABLE_MASK;
198
199         writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
200 }
201
202 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
203                                     u32 gl_value)
204 {
205         u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value);
206
207         writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
208 }
209
210 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
211                                     u32 gl_value)
212 {
213         u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value);
214
215         writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
216 }
217
218 static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
219                                    struct hns3_nic_priv *priv)
220 {
221         /* initialize the configuration for interrupt coalescing.
222          * 1. GL (Interrupt Gap Limiter)
223          * 2. RL (Interrupt Rate Limiter)
224          */
225
226         /* Default: enable interrupt coalescing self-adaptive and GL */
227         tqp_vector->tx_group.coal.gl_adapt_enable = 1;
228         tqp_vector->rx_group.coal.gl_adapt_enable = 1;
229
230         tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K;
231         tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K;
232
233         tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW;
234         tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW;
235 }
236
237 static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector,
238                                       struct hns3_nic_priv *priv)
239 {
240         struct hnae3_handle *h = priv->ae_handle;
241
242         hns3_set_vector_coalesce_tx_gl(tqp_vector,
243                                        tqp_vector->tx_group.coal.int_gl);
244         hns3_set_vector_coalesce_rx_gl(tqp_vector,
245                                        tqp_vector->rx_group.coal.int_gl);
246         hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
247 }
248
249 static int hns3_nic_set_real_num_queue(struct net_device *netdev)
250 {
251         struct hnae3_handle *h = hns3_get_handle(netdev);
252         struct hnae3_knic_private_info *kinfo = &h->kinfo;
253         unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
254         int i, ret;
255
256         if (kinfo->num_tc <= 1) {
257                 netdev_reset_tc(netdev);
258         } else {
259                 ret = netdev_set_num_tc(netdev, kinfo->num_tc);
260                 if (ret) {
261                         netdev_err(netdev,
262                                    "netdev_set_num_tc fail, ret=%d!\n", ret);
263                         return ret;
264                 }
265
266                 for (i = 0; i < HNAE3_MAX_TC; i++) {
267                         if (!kinfo->tc_info[i].enable)
268                                 continue;
269
270                         netdev_set_tc_queue(netdev,
271                                             kinfo->tc_info[i].tc,
272                                             kinfo->tc_info[i].tqp_count,
273                                             kinfo->tc_info[i].tqp_offset);
274                 }
275         }
276
277         ret = netif_set_real_num_tx_queues(netdev, queue_size);
278         if (ret) {
279                 netdev_err(netdev,
280                            "netif_set_real_num_tx_queues fail, ret=%d!\n",
281                            ret);
282                 return ret;
283         }
284
285         ret = netif_set_real_num_rx_queues(netdev, queue_size);
286         if (ret) {
287                 netdev_err(netdev,
288                            "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
289                 return ret;
290         }
291
292         return 0;
293 }
294
295 static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
296 {
297         u16 alloc_tqps, max_rss_size, rss_size;
298
299         h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size);
300         rss_size = alloc_tqps / h->kinfo.num_tc;
301
302         return min_t(u16, rss_size, max_rss_size);
303 }
304
305 static void hns3_tqp_enable(struct hnae3_queue *tqp)
306 {
307         u32 rcb_reg;
308
309         rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
310         rcb_reg |= BIT(HNS3_RING_EN_B);
311         hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
312 }
313
314 static void hns3_tqp_disable(struct hnae3_queue *tqp)
315 {
316         u32 rcb_reg;
317
318         rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
319         rcb_reg &= ~BIT(HNS3_RING_EN_B);
320         hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
321 }
322
323 static void hns3_free_rx_cpu_rmap(struct net_device *netdev)
324 {
325 #ifdef CONFIG_RFS_ACCEL
326         free_irq_cpu_rmap(netdev->rx_cpu_rmap);
327         netdev->rx_cpu_rmap = NULL;
328 #endif
329 }
330
331 static int hns3_set_rx_cpu_rmap(struct net_device *netdev)
332 {
333 #ifdef CONFIG_RFS_ACCEL
334         struct hns3_nic_priv *priv = netdev_priv(netdev);
335         struct hns3_enet_tqp_vector *tqp_vector;
336         int i, ret;
337
338         if (!netdev->rx_cpu_rmap) {
339                 netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->vector_num);
340                 if (!netdev->rx_cpu_rmap)
341                         return -ENOMEM;
342         }
343
344         for (i = 0; i < priv->vector_num; i++) {
345                 tqp_vector = &priv->tqp_vector[i];
346                 ret = irq_cpu_rmap_add(netdev->rx_cpu_rmap,
347                                        tqp_vector->vector_irq);
348                 if (ret) {
349                         hns3_free_rx_cpu_rmap(netdev);
350                         return ret;
351                 }
352         }
353 #endif
354         return 0;
355 }
356
357 static int hns3_nic_net_up(struct net_device *netdev)
358 {
359         struct hns3_nic_priv *priv = netdev_priv(netdev);
360         struct hnae3_handle *h = priv->ae_handle;
361         int i, j;
362         int ret;
363
364         ret = hns3_nic_reset_all_ring(h);
365         if (ret)
366                 return ret;
367
368         /* the device can work without cpu rmap, only aRFS needs it */
369         ret = hns3_set_rx_cpu_rmap(netdev);
370         if (ret)
371                 netdev_warn(netdev, "set rx cpu rmap fail, ret=%d!\n", ret);
372
373         /* get irq resource for all vectors */
374         ret = hns3_nic_init_irq(priv);
375         if (ret) {
376                 netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
377                 goto free_rmap;
378         }
379
380         clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
381
382         /* enable the vectors */
383         for (i = 0; i < priv->vector_num; i++)
384                 hns3_vector_enable(&priv->tqp_vector[i]);
385
386         /* enable rcb */
387         for (j = 0; j < h->kinfo.num_tqps; j++)
388                 hns3_tqp_enable(h->kinfo.tqp[j]);
389
390         /* start the ae_dev */
391         ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
392         if (ret)
393                 goto out_start_err;
394
395         return 0;
396
397 out_start_err:
398         set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
399         while (j--)
400                 hns3_tqp_disable(h->kinfo.tqp[j]);
401
402         for (j = i - 1; j >= 0; j--)
403                 hns3_vector_disable(&priv->tqp_vector[j]);
404
405         hns3_nic_uninit_irq(priv);
406 free_rmap:
407         hns3_free_rx_cpu_rmap(netdev);
408         return ret;
409 }
410
411 static void hns3_config_xps(struct hns3_nic_priv *priv)
412 {
413         int i;
414
415         for (i = 0; i < priv->vector_num; i++) {
416                 struct hns3_enet_tqp_vector *tqp_vector = &priv->tqp_vector[i];
417                 struct hns3_enet_ring *ring = tqp_vector->tx_group.ring;
418
419                 while (ring) {
420                         int ret;
421
422                         ret = netif_set_xps_queue(priv->netdev,
423                                                   &tqp_vector->affinity_mask,
424                                                   ring->tqp->tqp_index);
425                         if (ret)
426                                 netdev_warn(priv->netdev,
427                                             "set xps queue failed: %d", ret);
428
429                         ring = ring->next;
430                 }
431         }
432 }
433
434 static int hns3_nic_net_open(struct net_device *netdev)
435 {
436         struct hns3_nic_priv *priv = netdev_priv(netdev);
437         struct hnae3_handle *h = hns3_get_handle(netdev);
438         struct hnae3_knic_private_info *kinfo;
439         int i, ret;
440
441         if (hns3_nic_resetting(netdev))
442                 return -EBUSY;
443
444         netif_carrier_off(netdev);
445
446         ret = hns3_nic_set_real_num_queue(netdev);
447         if (ret)
448                 return ret;
449
450         ret = hns3_nic_net_up(netdev);
451         if (ret) {
452                 netdev_err(netdev,
453                            "hns net up fail, ret=%d!\n", ret);
454                 return ret;
455         }
456
457         kinfo = &h->kinfo;
458         for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
459                 netdev_set_prio_tc_map(netdev, i,
460                                        kinfo->prio_tc[i]);
461         }
462
463         if (h->ae_algo->ops->set_timer_task)
464                 h->ae_algo->ops->set_timer_task(priv->ae_handle, true);
465
466         hns3_config_xps(priv);
467         return 0;
468 }
469
470 static void hns3_nic_net_down(struct net_device *netdev)
471 {
472         struct hns3_nic_priv *priv = netdev_priv(netdev);
473         struct hnae3_handle *h = hns3_get_handle(netdev);
474         const struct hnae3_ae_ops *ops;
475         int i;
476
477         /* disable vectors */
478         for (i = 0; i < priv->vector_num; i++)
479                 hns3_vector_disable(&priv->tqp_vector[i]);
480
481         /* disable rcb */
482         for (i = 0; i < h->kinfo.num_tqps; i++)
483                 hns3_tqp_disable(h->kinfo.tqp[i]);
484
485         /* stop ae_dev */
486         ops = priv->ae_handle->ae_algo->ops;
487         if (ops->stop)
488                 ops->stop(priv->ae_handle);
489
490         hns3_free_rx_cpu_rmap(netdev);
491
492         /* free irq resources */
493         hns3_nic_uninit_irq(priv);
494
495         hns3_clear_all_ring(priv->ae_handle);
496 }
497
498 static int hns3_nic_net_stop(struct net_device *netdev)
499 {
500         struct hns3_nic_priv *priv = netdev_priv(netdev);
501         struct hnae3_handle *h = hns3_get_handle(netdev);
502
503         if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
504                 return 0;
505
506         if (h->ae_algo->ops->set_timer_task)
507                 h->ae_algo->ops->set_timer_task(priv->ae_handle, false);
508
509         netif_tx_stop_all_queues(netdev);
510         netif_carrier_off(netdev);
511
512         hns3_nic_net_down(netdev);
513
514         return 0;
515 }
516
517 static int hns3_nic_uc_sync(struct net_device *netdev,
518                             const unsigned char *addr)
519 {
520         struct hnae3_handle *h = hns3_get_handle(netdev);
521
522         if (h->ae_algo->ops->add_uc_addr)
523                 return h->ae_algo->ops->add_uc_addr(h, addr);
524
525         return 0;
526 }
527
528 static int hns3_nic_uc_unsync(struct net_device *netdev,
529                               const unsigned char *addr)
530 {
531         struct hnae3_handle *h = hns3_get_handle(netdev);
532
533         if (h->ae_algo->ops->rm_uc_addr)
534                 return h->ae_algo->ops->rm_uc_addr(h, addr);
535
536         return 0;
537 }
538
539 static int hns3_nic_mc_sync(struct net_device *netdev,
540                             const unsigned char *addr)
541 {
542         struct hnae3_handle *h = hns3_get_handle(netdev);
543
544         if (h->ae_algo->ops->add_mc_addr)
545                 return h->ae_algo->ops->add_mc_addr(h, addr);
546
547         return 0;
548 }
549
550 static int hns3_nic_mc_unsync(struct net_device *netdev,
551                               const unsigned char *addr)
552 {
553         struct hnae3_handle *h = hns3_get_handle(netdev);
554
555         if (h->ae_algo->ops->rm_mc_addr)
556                 return h->ae_algo->ops->rm_mc_addr(h, addr);
557
558         return 0;
559 }
560
561 static u8 hns3_get_netdev_flags(struct net_device *netdev)
562 {
563         u8 flags = 0;
564
565         if (netdev->flags & IFF_PROMISC) {
566                 flags = HNAE3_USER_UPE | HNAE3_USER_MPE | HNAE3_BPE;
567         } else {
568                 flags |= HNAE3_VLAN_FLTR;
569                 if (netdev->flags & IFF_ALLMULTI)
570                         flags |= HNAE3_USER_MPE;
571         }
572
573         return flags;
574 }
575
576 static void hns3_nic_set_rx_mode(struct net_device *netdev)
577 {
578         struct hnae3_handle *h = hns3_get_handle(netdev);
579         u8 new_flags;
580         int ret;
581
582         new_flags = hns3_get_netdev_flags(netdev);
583
584         ret = __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync);
585         if (ret) {
586                 netdev_err(netdev, "sync uc address fail\n");
587                 if (ret == -ENOSPC)
588                         new_flags |= HNAE3_OVERFLOW_UPE;
589         }
590
591         if (netdev->flags & IFF_MULTICAST) {
592                 ret = __dev_mc_sync(netdev, hns3_nic_mc_sync,
593                                     hns3_nic_mc_unsync);
594                 if (ret) {
595                         netdev_err(netdev, "sync mc address fail\n");
596                         if (ret == -ENOSPC)
597                                 new_flags |= HNAE3_OVERFLOW_MPE;
598                 }
599         }
600
601         /* User mode Promisc mode enable and vlan filtering is disabled to
602          * let all packets in. MAC-VLAN Table overflow Promisc enabled and
603          * vlan fitering is enabled
604          */
605         hns3_enable_vlan_filter(netdev, new_flags & HNAE3_VLAN_FLTR);
606         h->netdev_flags = new_flags;
607         hns3_update_promisc_mode(netdev, new_flags);
608 }
609
610 int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags)
611 {
612         struct hns3_nic_priv *priv = netdev_priv(netdev);
613         struct hnae3_handle *h = priv->ae_handle;
614
615         if (h->ae_algo->ops->set_promisc_mode) {
616                 return h->ae_algo->ops->set_promisc_mode(h,
617                                                 promisc_flags & HNAE3_UPE,
618                                                 promisc_flags & HNAE3_MPE);
619         }
620
621         return 0;
622 }
623
624 void hns3_enable_vlan_filter(struct net_device *netdev, bool enable)
625 {
626         struct hns3_nic_priv *priv = netdev_priv(netdev);
627         struct hnae3_handle *h = priv->ae_handle;
628         bool last_state;
629
630         if (h->pdev->revision >= 0x21 && h->ae_algo->ops->enable_vlan_filter) {
631                 last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false;
632                 if (enable != last_state) {
633                         netdev_info(netdev,
634                                     "%s vlan filter\n",
635                                     enable ? "enable" : "disable");
636                         h->ae_algo->ops->enable_vlan_filter(h, enable);
637                 }
638         }
639 }
640
641 static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
642                         u16 *mss, u32 *type_cs_vlan_tso)
643 {
644         u32 l4_offset, hdr_len;
645         union l3_hdr_info l3;
646         union l4_hdr_info l4;
647         u32 l4_paylen;
648         int ret;
649
650         if (!skb_is_gso(skb))
651                 return 0;
652
653         ret = skb_cow_head(skb, 0);
654         if (unlikely(ret))
655                 return ret;
656
657         l3.hdr = skb_network_header(skb);
658         l4.hdr = skb_transport_header(skb);
659
660         /* Software should clear the IPv4's checksum field when tso is
661          * needed.
662          */
663         if (l3.v4->version == 4)
664                 l3.v4->check = 0;
665
666         /* tunnel packet.*/
667         if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
668                                          SKB_GSO_GRE_CSUM |
669                                          SKB_GSO_UDP_TUNNEL |
670                                          SKB_GSO_UDP_TUNNEL_CSUM)) {
671                 if ((!(skb_shinfo(skb)->gso_type &
672                     SKB_GSO_PARTIAL)) &&
673                     (skb_shinfo(skb)->gso_type &
674                     SKB_GSO_UDP_TUNNEL_CSUM)) {
675                         /* Software should clear the udp's checksum
676                          * field when tso is needed.
677                          */
678                         l4.udp->check = 0;
679                 }
680                 /* reset l3&l4 pointers from outer to inner headers */
681                 l3.hdr = skb_inner_network_header(skb);
682                 l4.hdr = skb_inner_transport_header(skb);
683
684                 /* Software should clear the IPv4's checksum field when
685                  * tso is needed.
686                  */
687                 if (l3.v4->version == 4)
688                         l3.v4->check = 0;
689         }
690
691         /* normal or tunnel packet*/
692         l4_offset = l4.hdr - skb->data;
693         hdr_len = (l4.tcp->doff << 2) + l4_offset;
694
695         /* remove payload length from inner pseudo checksum when tso*/
696         l4_paylen = skb->len - l4_offset;
697         csum_replace_by_diff(&l4.tcp->check,
698                              (__force __wsum)htonl(l4_paylen));
699
700         /* find the txbd field values */
701         *paylen = skb->len - hdr_len;
702         hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1);
703
704         /* get MSS for TSO */
705         *mss = skb_shinfo(skb)->gso_size;
706
707         return 0;
708 }
709
710 static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
711                                 u8 *il4_proto)
712 {
713         union l3_hdr_info l3;
714         unsigned char *l4_hdr;
715         unsigned char *exthdr;
716         u8 l4_proto_tmp;
717         __be16 frag_off;
718
719         /* find outer header point */
720         l3.hdr = skb_network_header(skb);
721         l4_hdr = skb_transport_header(skb);
722
723         if (skb->protocol == htons(ETH_P_IPV6)) {
724                 exthdr = l3.hdr + sizeof(*l3.v6);
725                 l4_proto_tmp = l3.v6->nexthdr;
726                 if (l4_hdr != exthdr)
727                         ipv6_skip_exthdr(skb, exthdr - skb->data,
728                                          &l4_proto_tmp, &frag_off);
729         } else if (skb->protocol == htons(ETH_P_IP)) {
730                 l4_proto_tmp = l3.v4->protocol;
731         } else {
732                 return -EINVAL;
733         }
734
735         *ol4_proto = l4_proto_tmp;
736
737         /* tunnel packet */
738         if (!skb->encapsulation) {
739                 *il4_proto = 0;
740                 return 0;
741         }
742
743         /* find inner header point */
744         l3.hdr = skb_inner_network_header(skb);
745         l4_hdr = skb_inner_transport_header(skb);
746
747         if (l3.v6->version == 6) {
748                 exthdr = l3.hdr + sizeof(*l3.v6);
749                 l4_proto_tmp = l3.v6->nexthdr;
750                 if (l4_hdr != exthdr)
751                         ipv6_skip_exthdr(skb, exthdr - skb->data,
752                                          &l4_proto_tmp, &frag_off);
753         } else if (l3.v4->version == 4) {
754                 l4_proto_tmp = l3.v4->protocol;
755         }
756
757         *il4_proto = l4_proto_tmp;
758
759         return 0;
760 }
761
762 /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
763  * and it is udp packet, which has a dest port as the IANA assigned.
764  * the hardware is expected to do the checksum offload, but the
765  * hardware will not do the checksum offload when udp dest port is
766  * 4789.
767  */
768 static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
769 {
770         union l4_hdr_info l4;
771
772         l4.hdr = skb_transport_header(skb);
773
774         if (!(!skb->encapsulation &&
775               l4.udp->dest == htons(IANA_VXLAN_UDP_PORT)))
776                 return false;
777
778         skb_checksum_help(skb);
779
780         return true;
781 }
782
783 static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
784                                   u32 *ol_type_vlan_len_msec)
785 {
786         u32 l2_len, l3_len, l4_len;
787         unsigned char *il2_hdr;
788         union l3_hdr_info l3;
789         union l4_hdr_info l4;
790
791         l3.hdr = skb_network_header(skb);
792         l4.hdr = skb_transport_header(skb);
793
794         /* compute OL2 header size, defined in 2 Bytes */
795         l2_len = l3.hdr - skb->data;
796         hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L2LEN_S, l2_len >> 1);
797
798         /* compute OL3 header size, defined in 4 Bytes */
799         l3_len = l4.hdr - l3.hdr;
800         hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2);
801
802         il2_hdr = skb_inner_mac_header(skb);
803         /* compute OL4 header size, defined in 4 Bytes. */
804         l4_len = il2_hdr - l4.hdr;
805         hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2);
806
807         /* define outer network header type */
808         if (skb->protocol == htons(ETH_P_IP)) {
809                 if (skb_is_gso(skb))
810                         hns3_set_field(*ol_type_vlan_len_msec,
811                                        HNS3_TXD_OL3T_S,
812                                        HNS3_OL3T_IPV4_CSUM);
813                 else
814                         hns3_set_field(*ol_type_vlan_len_msec,
815                                        HNS3_TXD_OL3T_S,
816                                        HNS3_OL3T_IPV4_NO_CSUM);
817
818         } else if (skb->protocol == htons(ETH_P_IPV6)) {
819                 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S,
820                                HNS3_OL3T_IPV6);
821         }
822
823         if (ol4_proto == IPPROTO_UDP)
824                 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S,
825                                HNS3_TUN_MAC_IN_UDP);
826         else if (ol4_proto == IPPROTO_GRE)
827                 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S,
828                                HNS3_TUN_NVGRE);
829 }
830
831 static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
832                            u8 il4_proto, u32 *type_cs_vlan_tso,
833                            u32 *ol_type_vlan_len_msec)
834 {
835         unsigned char *l2_hdr = skb->data;
836         u32 l4_proto = ol4_proto;
837         union l4_hdr_info l4;
838         union l3_hdr_info l3;
839         u32 l2_len, l3_len;
840
841         l4.hdr = skb_transport_header(skb);
842         l3.hdr = skb_network_header(skb);
843
844         /* handle encapsulation skb */
845         if (skb->encapsulation) {
846                 /* If this is a not UDP/GRE encapsulation skb */
847                 if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) {
848                         /* drop the skb tunnel packet if hardware don't support,
849                          * because hardware can't calculate csum when TSO.
850                          */
851                         if (skb_is_gso(skb))
852                                 return -EDOM;
853
854                         /* the stack computes the IP header already,
855                          * driver calculate l4 checksum when not TSO.
856                          */
857                         skb_checksum_help(skb);
858                         return 0;
859                 }
860
861                 hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec);
862
863                 /* switch to inner header */
864                 l2_hdr = skb_inner_mac_header(skb);
865                 l3.hdr = skb_inner_network_header(skb);
866                 l4.hdr = skb_inner_transport_header(skb);
867                 l4_proto = il4_proto;
868         }
869
870         if (l3.v4->version == 4) {
871                 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
872                                HNS3_L3T_IPV4);
873
874                 /* the stack computes the IP header already, the only time we
875                  * need the hardware to recompute it is in the case of TSO.
876                  */
877                 if (skb_is_gso(skb))
878                         hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
879         } else if (l3.v6->version == 6) {
880                 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
881                                HNS3_L3T_IPV6);
882         }
883
884         /* compute inner(/normal) L2 header size, defined in 2 Bytes */
885         l2_len = l3.hdr - l2_hdr;
886         hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1);
887
888         /* compute inner(/normal) L3 header size, defined in 4 Bytes */
889         l3_len = l4.hdr - l3.hdr;
890         hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2);
891
892         /* compute inner(/normal) L4 header size, defined in 4 Bytes */
893         switch (l4_proto) {
894         case IPPROTO_TCP:
895                 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
896                 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
897                                HNS3_L4T_TCP);
898                 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
899                                l4.tcp->doff);
900                 break;
901         case IPPROTO_UDP:
902                 if (hns3_tunnel_csum_bug(skb))
903                         break;
904
905                 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
906                 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
907                                HNS3_L4T_UDP);
908                 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
909                                (sizeof(struct udphdr) >> 2));
910                 break;
911         case IPPROTO_SCTP:
912                 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
913                 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
914                                HNS3_L4T_SCTP);
915                 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
916                                (sizeof(struct sctphdr) >> 2));
917                 break;
918         default:
919                 /* drop the skb tunnel packet if hardware don't support,
920                  * because hardware can't calculate csum when TSO.
921                  */
922                 if (skb_is_gso(skb))
923                         return -EDOM;
924
925                 /* the stack computes the IP header already,
926                  * driver calculate l4 checksum when not TSO.
927                  */
928                 skb_checksum_help(skb);
929                 return 0;
930         }
931
932         return 0;
933 }
934
935 static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
936 {
937         /* Config bd buffer end */
938         hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
939         hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
940 }
941
942 static int hns3_fill_desc_vtags(struct sk_buff *skb,
943                                 struct hns3_enet_ring *tx_ring,
944                                 u32 *inner_vlan_flag,
945                                 u32 *out_vlan_flag,
946                                 u16 *inner_vtag,
947                                 u16 *out_vtag)
948 {
949 #define HNS3_TX_VLAN_PRIO_SHIFT 13
950
951         struct hnae3_handle *handle = tx_ring->tqp->handle;
952
953         /* Since HW limitation, if port based insert VLAN enabled, only one VLAN
954          * header is allowed in skb, otherwise it will cause RAS error.
955          */
956         if (unlikely(skb_vlan_tagged_multi(skb) &&
957                      handle->port_base_vlan_state ==
958                      HNAE3_PORT_BASE_VLAN_ENABLE))
959                 return -EINVAL;
960
961         if (skb->protocol == htons(ETH_P_8021Q) &&
962             !(tx_ring->tqp->handle->kinfo.netdev->features &
963             NETIF_F_HW_VLAN_CTAG_TX)) {
964                 /* When HW VLAN acceleration is turned off, and the stack
965                  * sets the protocol to 802.1q, the driver just need to
966                  * set the protocol to the encapsulated ethertype.
967                  */
968                 skb->protocol = vlan_get_protocol(skb);
969                 return 0;
970         }
971
972         if (skb_vlan_tag_present(skb)) {
973                 u16 vlan_tag;
974
975                 vlan_tag = skb_vlan_tag_get(skb);
976                 vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
977
978                 /* Based on hw strategy, use out_vtag in two layer tag case,
979                  * and use inner_vtag in one tag case.
980                  */
981                 if (skb->protocol == htons(ETH_P_8021Q)) {
982                         if (handle->port_base_vlan_state ==
983                             HNAE3_PORT_BASE_VLAN_DISABLE){
984                                 hns3_set_field(*out_vlan_flag,
985                                                HNS3_TXD_OVLAN_B, 1);
986                                 *out_vtag = vlan_tag;
987                         } else {
988                                 hns3_set_field(*inner_vlan_flag,
989                                                HNS3_TXD_VLAN_B, 1);
990                                 *inner_vtag = vlan_tag;
991                         }
992                 } else {
993                         hns3_set_field(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
994                         *inner_vtag = vlan_tag;
995                 }
996         } else if (skb->protocol == htons(ETH_P_8021Q)) {
997                 struct vlan_ethhdr *vhdr;
998                 int rc;
999
1000                 rc = skb_cow_head(skb, 0);
1001                 if (unlikely(rc < 0))
1002                         return rc;
1003                 vhdr = (struct vlan_ethhdr *)skb->data;
1004                 vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
1005                                         << HNS3_TX_VLAN_PRIO_SHIFT);
1006         }
1007
1008         skb->protocol = vlan_get_protocol(skb);
1009         return 0;
1010 }
1011
1012 static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
1013                           int size, int frag_end, enum hns_desc_type type)
1014 {
1015         struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
1016         struct hns3_desc *desc = &ring->desc[ring->next_to_use];
1017         struct device *dev = ring_to_dev(ring);
1018         struct skb_frag_struct *frag;
1019         unsigned int frag_buf_num;
1020         int k, sizeoflast;
1021         dma_addr_t dma;
1022
1023         if (type == DESC_TYPE_SKB) {
1024                 struct sk_buff *skb = (struct sk_buff *)priv;
1025                 u32 ol_type_vlan_len_msec = 0;
1026                 u32 type_cs_vlan_tso = 0;
1027                 u32 paylen = skb->len;
1028                 u16 inner_vtag = 0;
1029                 u16 out_vtag = 0;
1030                 u16 mss = 0;
1031                 int ret;
1032
1033                 ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
1034                                            &ol_type_vlan_len_msec,
1035                                            &inner_vtag, &out_vtag);
1036                 if (unlikely(ret))
1037                         return ret;
1038
1039                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1040                         u8 ol4_proto, il4_proto;
1041
1042                         skb_reset_mac_len(skb);
1043
1044                         ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
1045                         if (unlikely(ret))
1046                                 return ret;
1047
1048                         ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
1049                                               &type_cs_vlan_tso,
1050                                               &ol_type_vlan_len_msec);
1051                         if (unlikely(ret))
1052                                 return ret;
1053
1054                         ret = hns3_set_tso(skb, &paylen, &mss,
1055                                            &type_cs_vlan_tso);
1056                         if (unlikely(ret))
1057                                 return ret;
1058                 }
1059
1060                 /* Set txbd */
1061                 desc->tx.ol_type_vlan_len_msec =
1062                         cpu_to_le32(ol_type_vlan_len_msec);
1063                 desc->tx.type_cs_vlan_tso_len =
1064                         cpu_to_le32(type_cs_vlan_tso);
1065                 desc->tx.paylen = cpu_to_le32(paylen);
1066                 desc->tx.mss = cpu_to_le16(mss);
1067                 desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
1068                 desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
1069
1070                 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
1071         } else {
1072                 frag = (struct skb_frag_struct *)priv;
1073                 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
1074         }
1075
1076         if (unlikely(dma_mapping_error(dev, dma))) {
1077                 ring->stats.sw_err_cnt++;
1078                 return -ENOMEM;
1079         }
1080
1081         desc_cb->length = size;
1082
1083         if (likely(size <= HNS3_MAX_BD_SIZE)) {
1084                 u16 bdtp_fe_sc_vld_ra_ri = 0;
1085
1086                 desc_cb->priv = priv;
1087                 desc_cb->dma = dma;
1088                 desc_cb->type = type;
1089                 desc->addr = cpu_to_le64(dma);
1090                 desc->tx.send_size = cpu_to_le16(size);
1091                 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
1092                 desc->tx.bdtp_fe_sc_vld_ra_ri =
1093                         cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
1094
1095                 ring_ptr_move_fw(ring, next_to_use);
1096                 return 0;
1097         }
1098
1099         frag_buf_num = hns3_tx_bd_count(size);
1100         sizeoflast = size & HNS3_TX_LAST_SIZE_M;
1101         sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
1102
1103         /* When frag size is bigger than hardware limit, split this frag */
1104         for (k = 0; k < frag_buf_num; k++) {
1105                 u16 bdtp_fe_sc_vld_ra_ri = 0;
1106
1107                 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
1108                 desc_cb->priv = priv;
1109                 desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
1110                 desc_cb->type = (type == DESC_TYPE_SKB && !k) ?
1111                                         DESC_TYPE_SKB : DESC_TYPE_PAGE;
1112
1113                 /* now, fill the descriptor */
1114                 desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
1115                 desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
1116                                 (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
1117                 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri,
1118                                        frag_end && (k == frag_buf_num - 1) ?
1119                                                 1 : 0);
1120                 desc->tx.bdtp_fe_sc_vld_ra_ri =
1121                                 cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
1122
1123                 /* move ring pointer to next.*/
1124                 ring_ptr_move_fw(ring, next_to_use);
1125
1126                 desc_cb = &ring->desc_cb[ring->next_to_use];
1127                 desc = &ring->desc[ring->next_to_use];
1128         }
1129
1130         return 0;
1131 }
1132
1133 static int hns3_nic_bd_num(struct sk_buff *skb)
1134 {
1135         int size = skb_headlen(skb);
1136         int i, bd_num;
1137
1138         /* if the total len is within the max bd limit */
1139         if (likely(skb->len <= HNS3_MAX_BD_SIZE))
1140                 return skb_shinfo(skb)->nr_frags + 1;
1141
1142         bd_num = hns3_tx_bd_count(size);
1143
1144         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1145                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1146                 int frag_bd_num;
1147
1148                 size = skb_frag_size(frag);
1149                 frag_bd_num = hns3_tx_bd_count(size);
1150
1151                 if (unlikely(frag_bd_num > HNS3_MAX_BD_PER_FRAG))
1152                         return -ENOMEM;
1153
1154                 bd_num += frag_bd_num;
1155         }
1156
1157         return bd_num;
1158 }
1159
1160 static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
1161 {
1162         if (!skb->encapsulation)
1163                 return skb_transport_offset(skb) + tcp_hdrlen(skb);
1164
1165         return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
1166 }
1167
1168 /* HW need every continuous 8 buffer data to be larger than MSS,
1169  * we simplify it by ensuring skb_headlen + the first continuous
1170  * 7 frags to to be larger than gso header len + mss, and the remaining
1171  * continuous 7 frags to be larger than MSS except the last 7 frags.
1172  */
1173 static bool hns3_skb_need_linearized(struct sk_buff *skb)
1174 {
1175         int bd_limit = HNS3_MAX_BD_PER_FRAG - 1;
1176         unsigned int tot_len = 0;
1177         int i;
1178
1179         for (i = 0; i < bd_limit; i++)
1180                 tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
1181
1182         /* ensure headlen + the first 7 frags is greater than mss + header
1183          * and the first 7 frags is greater than mss.
1184          */
1185         if (((tot_len + skb_headlen(skb)) < (skb_shinfo(skb)->gso_size +
1186             hns3_gso_hdr_len(skb))) || (tot_len < skb_shinfo(skb)->gso_size))
1187                 return true;
1188
1189         /* ensure the remaining continuous 7 buffer is greater than mss */
1190         for (i = 0; i < (skb_shinfo(skb)->nr_frags - bd_limit - 1); i++) {
1191                 tot_len -= skb_frag_size(&skb_shinfo(skb)->frags[i]);
1192                 tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i + bd_limit]);
1193
1194                 if (tot_len < skb_shinfo(skb)->gso_size)
1195                         return true;
1196         }
1197
1198         return false;
1199 }
1200
1201 static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
1202                                   struct sk_buff **out_skb)
1203 {
1204         struct sk_buff *skb = *out_skb;
1205         int bd_num;
1206
1207         bd_num = hns3_nic_bd_num(skb);
1208         if (bd_num < 0)
1209                 return bd_num;
1210
1211         if (unlikely(bd_num > HNS3_MAX_BD_PER_FRAG)) {
1212                 struct sk_buff *new_skb;
1213
1214                 if (skb_is_gso(skb) && !hns3_skb_need_linearized(skb))
1215                         goto out;
1216
1217                 bd_num = hns3_tx_bd_count(skb->len);
1218                 if (unlikely(ring_space(ring) < bd_num))
1219                         return -EBUSY;
1220                 /* manual split the send packet */
1221                 new_skb = skb_copy(skb, GFP_ATOMIC);
1222                 if (!new_skb)
1223                         return -ENOMEM;
1224                 dev_kfree_skb_any(skb);
1225                 *out_skb = new_skb;
1226
1227                 u64_stats_update_begin(&ring->syncp);
1228                 ring->stats.tx_copy++;
1229                 u64_stats_update_end(&ring->syncp);
1230         }
1231
1232 out:
1233         if (unlikely(ring_space(ring) < bd_num))
1234                 return -EBUSY;
1235
1236         return bd_num;
1237 }
1238
1239 static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
1240 {
1241         struct device *dev = ring_to_dev(ring);
1242         unsigned int i;
1243
1244         for (i = 0; i < ring->desc_num; i++) {
1245                 /* check if this is where we started */
1246                 if (ring->next_to_use == next_to_use_orig)
1247                         break;
1248
1249                 /* rollback one */
1250                 ring_ptr_move_bw(ring, next_to_use);
1251
1252                 /* unmap the descriptor dma address */
1253                 if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
1254                         dma_unmap_single(dev,
1255                                          ring->desc_cb[ring->next_to_use].dma,
1256                                         ring->desc_cb[ring->next_to_use].length,
1257                                         DMA_TO_DEVICE);
1258                 else if (ring->desc_cb[ring->next_to_use].length)
1259                         dma_unmap_page(dev,
1260                                        ring->desc_cb[ring->next_to_use].dma,
1261                                        ring->desc_cb[ring->next_to_use].length,
1262                                        DMA_TO_DEVICE);
1263
1264                 ring->desc_cb[ring->next_to_use].length = 0;
1265                 ring->desc_cb[ring->next_to_use].dma = 0;
1266         }
1267 }
1268
1269 netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
1270 {
1271         struct hns3_nic_priv *priv = netdev_priv(netdev);
1272         struct hns3_nic_ring_data *ring_data =
1273                 &tx_ring_data(priv, skb->queue_mapping);
1274         struct hns3_enet_ring *ring = ring_data->ring;
1275         struct netdev_queue *dev_queue;
1276         struct skb_frag_struct *frag;
1277         int next_to_use_head;
1278         int buf_num;
1279         int seg_num;
1280         int size;
1281         int ret;
1282         int i;
1283
1284         /* Prefetch the data used later */
1285         prefetch(skb->data);
1286
1287         buf_num = hns3_nic_maybe_stop_tx(ring, &skb);
1288         if (unlikely(buf_num <= 0)) {
1289                 if (buf_num == -EBUSY) {
1290                         u64_stats_update_begin(&ring->syncp);
1291                         ring->stats.tx_busy++;
1292                         u64_stats_update_end(&ring->syncp);
1293                         goto out_net_tx_busy;
1294                 } else if (buf_num == -ENOMEM) {
1295                         u64_stats_update_begin(&ring->syncp);
1296                         ring->stats.sw_err_cnt++;
1297                         u64_stats_update_end(&ring->syncp);
1298                 }
1299
1300                 if (net_ratelimit())
1301                         netdev_err(netdev, "xmit error: %d!\n", buf_num);
1302
1303                 goto out_err_tx_ok;
1304         }
1305
1306         /* No. of segments (plus a header) */
1307         seg_num = skb_shinfo(skb)->nr_frags + 1;
1308         /* Fill the first part */
1309         size = skb_headlen(skb);
1310
1311         next_to_use_head = ring->next_to_use;
1312
1313         ret = hns3_fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0,
1314                              DESC_TYPE_SKB);
1315         if (unlikely(ret))
1316                 goto fill_err;
1317
1318         /* Fill the fragments */
1319         for (i = 1; i < seg_num; i++) {
1320                 frag = &skb_shinfo(skb)->frags[i - 1];
1321                 size = skb_frag_size(frag);
1322
1323                 ret = hns3_fill_desc(ring, frag, size,
1324                                      seg_num - 1 == i ? 1 : 0,
1325                                      DESC_TYPE_PAGE);
1326
1327                 if (unlikely(ret))
1328                         goto fill_err;
1329         }
1330
1331         /* Complete translate all packets */
1332         dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
1333         netdev_tx_sent_queue(dev_queue, skb->len);
1334
1335         wmb(); /* Commit all data before submit */
1336
1337         hnae3_queue_xmit(ring->tqp, buf_num);
1338
1339         return NETDEV_TX_OK;
1340
1341 fill_err:
1342         hns3_clear_desc(ring, next_to_use_head);
1343
1344 out_err_tx_ok:
1345         dev_kfree_skb_any(skb);
1346         return NETDEV_TX_OK;
1347
1348 out_net_tx_busy:
1349         netif_stop_subqueue(netdev, ring_data->queue_index);
1350         smp_mb(); /* Commit all data before submit */
1351
1352         return NETDEV_TX_BUSY;
1353 }
1354
1355 static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
1356 {
1357         struct hnae3_handle *h = hns3_get_handle(netdev);
1358         struct sockaddr *mac_addr = p;
1359         int ret;
1360
1361         if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1362                 return -EADDRNOTAVAIL;
1363
1364         if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) {
1365                 netdev_info(netdev, "already using mac address %pM\n",
1366                             mac_addr->sa_data);
1367                 return 0;
1368         }
1369
1370         ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
1371         if (ret) {
1372                 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
1373                 return ret;
1374         }
1375
1376         ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
1377
1378         return 0;
1379 }
1380
1381 static int hns3_nic_do_ioctl(struct net_device *netdev,
1382                              struct ifreq *ifr, int cmd)
1383 {
1384         struct hnae3_handle *h = hns3_get_handle(netdev);
1385
1386         if (!netif_running(netdev))
1387                 return -EINVAL;
1388
1389         if (!h->ae_algo->ops->do_ioctl)
1390                 return -EOPNOTSUPP;
1391
1392         return h->ae_algo->ops->do_ioctl(h, ifr, cmd);
1393 }
1394
1395 static int hns3_nic_set_features(struct net_device *netdev,
1396                                  netdev_features_t features)
1397 {
1398         netdev_features_t changed = netdev->features ^ features;
1399         struct hns3_nic_priv *priv = netdev_priv(netdev);
1400         struct hnae3_handle *h = priv->ae_handle;
1401         bool enable;
1402         int ret;
1403
1404         if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) {
1405                 enable = !!(features & NETIF_F_GRO_HW);
1406                 ret = h->ae_algo->ops->set_gro_en(h, enable);
1407                 if (ret)
1408                         return ret;
1409         }
1410
1411         if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
1412             h->ae_algo->ops->enable_vlan_filter) {
1413                 enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER);
1414                 h->ae_algo->ops->enable_vlan_filter(h, enable);
1415         }
1416
1417         if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
1418             h->ae_algo->ops->enable_hw_strip_rxvtag) {
1419                 enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
1420                 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable);
1421                 if (ret)
1422                         return ret;
1423         }
1424
1425         if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) {
1426                 enable = !!(features & NETIF_F_NTUPLE);
1427                 h->ae_algo->ops->enable_fd(h, enable);
1428         }
1429
1430         netdev->features = features;
1431         return 0;
1432 }
1433
1434 static void hns3_nic_get_stats64(struct net_device *netdev,
1435                                  struct rtnl_link_stats64 *stats)
1436 {
1437         struct hns3_nic_priv *priv = netdev_priv(netdev);
1438         int queue_num = priv->ae_handle->kinfo.num_tqps;
1439         struct hnae3_handle *handle = priv->ae_handle;
1440         struct hns3_enet_ring *ring;
1441         u64 rx_length_errors = 0;
1442         u64 rx_crc_errors = 0;
1443         u64 rx_multicast = 0;
1444         unsigned int start;
1445         u64 tx_errors = 0;
1446         u64 rx_errors = 0;
1447         unsigned int idx;
1448         u64 tx_bytes = 0;
1449         u64 rx_bytes = 0;
1450         u64 tx_pkts = 0;
1451         u64 rx_pkts = 0;
1452         u64 tx_drop = 0;
1453         u64 rx_drop = 0;
1454
1455         if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
1456                 return;
1457
1458         handle->ae_algo->ops->update_stats(handle, &netdev->stats);
1459
1460         for (idx = 0; idx < queue_num; idx++) {
1461                 /* fetch the tx stats */
1462                 ring = priv->ring_data[idx].ring;
1463                 do {
1464                         start = u64_stats_fetch_begin_irq(&ring->syncp);
1465                         tx_bytes += ring->stats.tx_bytes;
1466                         tx_pkts += ring->stats.tx_pkts;
1467                         tx_drop += ring->stats.sw_err_cnt;
1468                         tx_errors += ring->stats.sw_err_cnt;
1469                 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1470
1471                 /* fetch the rx stats */
1472                 ring = priv->ring_data[idx + queue_num].ring;
1473                 do {
1474                         start = u64_stats_fetch_begin_irq(&ring->syncp);
1475                         rx_bytes += ring->stats.rx_bytes;
1476                         rx_pkts += ring->stats.rx_pkts;
1477                         rx_drop += ring->stats.non_vld_descs;
1478                         rx_drop += ring->stats.l2_err;
1479                         rx_errors += ring->stats.non_vld_descs;
1480                         rx_errors += ring->stats.l2_err;
1481                         rx_crc_errors += ring->stats.l2_err;
1482                         rx_crc_errors += ring->stats.l3l4_csum_err;
1483                         rx_multicast += ring->stats.rx_multicast;
1484                         rx_length_errors += ring->stats.err_pkt_len;
1485                 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1486         }
1487
1488         stats->tx_bytes = tx_bytes;
1489         stats->tx_packets = tx_pkts;
1490         stats->rx_bytes = rx_bytes;
1491         stats->rx_packets = rx_pkts;
1492
1493         stats->rx_errors = rx_errors;
1494         stats->multicast = rx_multicast;
1495         stats->rx_length_errors = rx_length_errors;
1496         stats->rx_crc_errors = rx_crc_errors;
1497         stats->rx_missed_errors = netdev->stats.rx_missed_errors;
1498
1499         stats->tx_errors = tx_errors;
1500         stats->rx_dropped = rx_drop;
1501         stats->tx_dropped = tx_drop;
1502         stats->collisions = netdev->stats.collisions;
1503         stats->rx_over_errors = netdev->stats.rx_over_errors;
1504         stats->rx_frame_errors = netdev->stats.rx_frame_errors;
1505         stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
1506         stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
1507         stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
1508         stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
1509         stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
1510         stats->tx_window_errors = netdev->stats.tx_window_errors;
1511         stats->rx_compressed = netdev->stats.rx_compressed;
1512         stats->tx_compressed = netdev->stats.tx_compressed;
1513 }
1514
1515 static int hns3_setup_tc(struct net_device *netdev, void *type_data)
1516 {
1517         struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
1518         struct hnae3_handle *h = hns3_get_handle(netdev);
1519         struct hnae3_knic_private_info *kinfo = &h->kinfo;
1520         u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map;
1521         u8 tc = mqprio_qopt->qopt.num_tc;
1522         u16 mode = mqprio_qopt->mode;
1523         u8 hw = mqprio_qopt->qopt.hw;
1524
1525         if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
1526                mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
1527                 return -EOPNOTSUPP;
1528
1529         if (tc > HNAE3_MAX_TC)
1530                 return -EINVAL;
1531
1532         if (!netdev)
1533                 return -EINVAL;
1534
1535         return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
1536                 kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
1537 }
1538
1539 static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
1540                              void *type_data)
1541 {
1542         if (type != TC_SETUP_QDISC_MQPRIO)
1543                 return -EOPNOTSUPP;
1544
1545         return hns3_setup_tc(dev, type_data);
1546 }
1547
1548 static int hns3_vlan_rx_add_vid(struct net_device *netdev,
1549                                 __be16 proto, u16 vid)
1550 {
1551         struct hnae3_handle *h = hns3_get_handle(netdev);
1552         int ret = -EIO;
1553
1554         if (h->ae_algo->ops->set_vlan_filter)
1555                 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
1556
1557         return ret;
1558 }
1559
1560 static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
1561                                  __be16 proto, u16 vid)
1562 {
1563         struct hnae3_handle *h = hns3_get_handle(netdev);
1564         int ret = -EIO;
1565
1566         if (h->ae_algo->ops->set_vlan_filter)
1567                 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
1568
1569         return ret;
1570 }
1571
1572 static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1573                                 u8 qos, __be16 vlan_proto)
1574 {
1575         struct hnae3_handle *h = hns3_get_handle(netdev);
1576         int ret = -EIO;
1577
1578         if (h->ae_algo->ops->set_vf_vlan_filter)
1579                 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
1580                                                    qos, vlan_proto);
1581
1582         return ret;
1583 }
1584
1585 static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
1586 {
1587         struct hnae3_handle *h = hns3_get_handle(netdev);
1588         int ret;
1589
1590         if (hns3_nic_resetting(netdev))
1591                 return -EBUSY;
1592
1593         if (!h->ae_algo->ops->set_mtu)
1594                 return -EOPNOTSUPP;
1595
1596         ret = h->ae_algo->ops->set_mtu(h, new_mtu);
1597         if (ret)
1598                 netdev_err(netdev, "failed to change MTU in hardware %d\n",
1599                            ret);
1600         else
1601                 netdev->mtu = new_mtu;
1602
1603         return ret;
1604 }
1605
1606 static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
1607 {
1608         struct hns3_nic_priv *priv = netdev_priv(ndev);
1609         struct hnae3_handle *h = hns3_get_handle(ndev);
1610         struct hns3_enet_ring *tx_ring = NULL;
1611         struct napi_struct *napi;
1612         int timeout_queue = 0;
1613         int hw_head, hw_tail;
1614         int fbd_num, fbd_oft;
1615         int ebd_num, ebd_oft;
1616         int bd_num, bd_err;
1617         int ring_en, tc;
1618         int i;
1619
1620         /* Find the stopped queue the same way the stack does */
1621         for (i = 0; i < ndev->num_tx_queues; i++) {
1622                 struct netdev_queue *q;
1623                 unsigned long trans_start;
1624
1625                 q = netdev_get_tx_queue(ndev, i);
1626                 trans_start = q->trans_start;
1627                 if (netif_xmit_stopped(q) &&
1628                     time_after(jiffies,
1629                                (trans_start + ndev->watchdog_timeo))) {
1630                         timeout_queue = i;
1631                         break;
1632                 }
1633         }
1634
1635         if (i == ndev->num_tx_queues) {
1636                 netdev_info(ndev,
1637                             "no netdev TX timeout queue found, timeout count: %llu\n",
1638                             priv->tx_timeout_count);
1639                 return false;
1640         }
1641
1642         priv->tx_timeout_count++;
1643
1644         tx_ring = priv->ring_data[timeout_queue].ring;
1645         napi = &tx_ring->tqp_vector->napi;
1646
1647         netdev_info(ndev,
1648                     "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n",
1649                     priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use,
1650                     tx_ring->next_to_clean, napi->state);
1651
1652         netdev_info(ndev,
1653                     "tx_pkts: %llu, tx_bytes: %llu, io_err_cnt: %llu, sw_err_cnt: %llu\n",
1654                     tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes,
1655                     tx_ring->stats.io_err_cnt, tx_ring->stats.sw_err_cnt);
1656
1657         netdev_info(ndev,
1658                     "seg_pkt_cnt: %llu, tx_err_cnt: %llu, restart_queue: %llu, tx_busy: %llu\n",
1659                     tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_err_cnt,
1660                     tx_ring->stats.restart_queue, tx_ring->stats.tx_busy);
1661
1662         /* When mac received many pause frames continuous, it's unable to send
1663          * packets, which may cause tx timeout
1664          */
1665         if (h->ae_algo->ops->update_stats &&
1666             h->ae_algo->ops->get_mac_pause_stats) {
1667                 u64 tx_pause_cnt, rx_pause_cnt;
1668
1669                 h->ae_algo->ops->update_stats(h, &ndev->stats);
1670                 h->ae_algo->ops->get_mac_pause_stats(h, &tx_pause_cnt,
1671                                                      &rx_pause_cnt);
1672                 netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n",
1673                             tx_pause_cnt, rx_pause_cnt);
1674         }
1675
1676         hw_head = readl_relaxed(tx_ring->tqp->io_base +
1677                                 HNS3_RING_TX_RING_HEAD_REG);
1678         hw_tail = readl_relaxed(tx_ring->tqp->io_base +
1679                                 HNS3_RING_TX_RING_TAIL_REG);
1680         fbd_num = readl_relaxed(tx_ring->tqp->io_base +
1681                                 HNS3_RING_TX_RING_FBDNUM_REG);
1682         fbd_oft = readl_relaxed(tx_ring->tqp->io_base +
1683                                 HNS3_RING_TX_RING_OFFSET_REG);
1684         ebd_num = readl_relaxed(tx_ring->tqp->io_base +
1685                                 HNS3_RING_TX_RING_EBDNUM_REG);
1686         ebd_oft = readl_relaxed(tx_ring->tqp->io_base +
1687                                 HNS3_RING_TX_RING_EBD_OFFSET_REG);
1688         bd_num = readl_relaxed(tx_ring->tqp->io_base +
1689                                HNS3_RING_TX_RING_BD_NUM_REG);
1690         bd_err = readl_relaxed(tx_ring->tqp->io_base +
1691                                HNS3_RING_TX_RING_BD_ERR_REG);
1692         ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG);
1693         tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG);
1694
1695         netdev_info(ndev,
1696                     "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n",
1697                     bd_num, hw_head, hw_tail, bd_err,
1698                     readl(tx_ring->tqp_vector->mask_addr));
1699         netdev_info(ndev,
1700                     "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n",
1701                     ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft);
1702
1703         return true;
1704 }
1705
1706 static void hns3_nic_net_timeout(struct net_device *ndev)
1707 {
1708         struct hns3_nic_priv *priv = netdev_priv(ndev);
1709         struct hnae3_handle *h = priv->ae_handle;
1710
1711         if (!hns3_get_tx_timeo_queue_info(ndev))
1712                 return;
1713
1714         /* request the reset, and let the hclge to determine
1715          * which reset level should be done
1716          */
1717         if (h->ae_algo->ops->reset_event)
1718                 h->ae_algo->ops->reset_event(h->pdev, h);
1719 }
1720
1721 #ifdef CONFIG_RFS_ACCEL
1722 static int hns3_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
1723                               u16 rxq_index, u32 flow_id)
1724 {
1725         struct hnae3_handle *h = hns3_get_handle(dev);
1726         struct flow_keys fkeys;
1727
1728         if (!h->ae_algo->ops->add_arfs_entry)
1729                 return -EOPNOTSUPP;
1730
1731         if (skb->encapsulation)
1732                 return -EPROTONOSUPPORT;
1733
1734         if (!skb_flow_dissect_flow_keys(skb, &fkeys, 0))
1735                 return -EPROTONOSUPPORT;
1736
1737         if ((fkeys.basic.n_proto != htons(ETH_P_IP) &&
1738              fkeys.basic.n_proto != htons(ETH_P_IPV6)) ||
1739             (fkeys.basic.ip_proto != IPPROTO_TCP &&
1740              fkeys.basic.ip_proto != IPPROTO_UDP))
1741                 return -EPROTONOSUPPORT;
1742
1743         return h->ae_algo->ops->add_arfs_entry(h, rxq_index, flow_id, &fkeys);
1744 }
1745 #endif
1746
1747 static const struct net_device_ops hns3_nic_netdev_ops = {
1748         .ndo_open               = hns3_nic_net_open,
1749         .ndo_stop               = hns3_nic_net_stop,
1750         .ndo_start_xmit         = hns3_nic_net_xmit,
1751         .ndo_tx_timeout         = hns3_nic_net_timeout,
1752         .ndo_set_mac_address    = hns3_nic_net_set_mac_address,
1753         .ndo_do_ioctl           = hns3_nic_do_ioctl,
1754         .ndo_change_mtu         = hns3_nic_change_mtu,
1755         .ndo_set_features       = hns3_nic_set_features,
1756         .ndo_get_stats64        = hns3_nic_get_stats64,
1757         .ndo_setup_tc           = hns3_nic_setup_tc,
1758         .ndo_set_rx_mode        = hns3_nic_set_rx_mode,
1759         .ndo_vlan_rx_add_vid    = hns3_vlan_rx_add_vid,
1760         .ndo_vlan_rx_kill_vid   = hns3_vlan_rx_kill_vid,
1761         .ndo_set_vf_vlan        = hns3_ndo_set_vf_vlan,
1762 #ifdef CONFIG_RFS_ACCEL
1763         .ndo_rx_flow_steer      = hns3_rx_flow_steer,
1764 #endif
1765
1766 };
1767
1768 bool hns3_is_phys_func(struct pci_dev *pdev)
1769 {
1770         u32 dev_id = pdev->device;
1771
1772         switch (dev_id) {
1773         case HNAE3_DEV_ID_GE:
1774         case HNAE3_DEV_ID_25GE:
1775         case HNAE3_DEV_ID_25GE_RDMA:
1776         case HNAE3_DEV_ID_25GE_RDMA_MACSEC:
1777         case HNAE3_DEV_ID_50GE_RDMA:
1778         case HNAE3_DEV_ID_50GE_RDMA_MACSEC:
1779         case HNAE3_DEV_ID_100G_RDMA_MACSEC:
1780                 return true;
1781         case HNAE3_DEV_ID_100G_VF:
1782         case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF:
1783                 return false;
1784         default:
1785                 dev_warn(&pdev->dev, "un-recognized pci device-id %d",
1786                          dev_id);
1787         }
1788
1789         return false;
1790 }
1791
1792 static void hns3_disable_sriov(struct pci_dev *pdev)
1793 {
1794         /* If our VFs are assigned we cannot shut down SR-IOV
1795          * without causing issues, so just leave the hardware
1796          * available but disabled
1797          */
1798         if (pci_vfs_assigned(pdev)) {
1799                 dev_warn(&pdev->dev,
1800                          "disabling driver while VFs are assigned\n");
1801                 return;
1802         }
1803
1804         pci_disable_sriov(pdev);
1805 }
1806
1807 static void hns3_get_dev_capability(struct pci_dev *pdev,
1808                                     struct hnae3_ae_dev *ae_dev)
1809 {
1810         if (pdev->revision >= 0x21) {
1811                 hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1);
1812                 hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B, 1);
1813         }
1814 }
1815
1816 /* hns3_probe - Device initialization routine
1817  * @pdev: PCI device information struct
1818  * @ent: entry in hns3_pci_tbl
1819  *
1820  * hns3_probe initializes a PF identified by a pci_dev structure.
1821  * The OS initialization, configuring of the PF private structure,
1822  * and a hardware reset occur.
1823  *
1824  * Returns 0 on success, negative on failure
1825  */
1826 static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1827 {
1828         struct hnae3_ae_dev *ae_dev;
1829         int ret;
1830
1831         ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
1832                               GFP_KERNEL);
1833         if (!ae_dev) {
1834                 ret = -ENOMEM;
1835                 return ret;
1836         }
1837
1838         ae_dev->pdev = pdev;
1839         ae_dev->flag = ent->driver_data;
1840         ae_dev->dev_type = HNAE3_DEV_KNIC;
1841         ae_dev->reset_type = HNAE3_NONE_RESET;
1842         hns3_get_dev_capability(pdev, ae_dev);
1843         pci_set_drvdata(pdev, ae_dev);
1844
1845         ret = hnae3_register_ae_dev(ae_dev);
1846         if (ret) {
1847                 devm_kfree(&pdev->dev, ae_dev);
1848                 pci_set_drvdata(pdev, NULL);
1849         }
1850
1851         return ret;
1852 }
1853
1854 /* hns3_remove - Device removal routine
1855  * @pdev: PCI device information struct
1856  */
1857 static void hns3_remove(struct pci_dev *pdev)
1858 {
1859         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1860
1861         if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))
1862                 hns3_disable_sriov(pdev);
1863
1864         hnae3_unregister_ae_dev(ae_dev);
1865         pci_set_drvdata(pdev, NULL);
1866 }
1867
1868 /**
1869  * hns3_pci_sriov_configure
1870  * @pdev: pointer to a pci_dev structure
1871  * @num_vfs: number of VFs to allocate
1872  *
1873  * Enable or change the number of VFs. Called when the user updates the number
1874  * of VFs in sysfs.
1875  **/
1876 static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1877 {
1878         int ret;
1879
1880         if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) {
1881                 dev_warn(&pdev->dev, "Can not config SRIOV\n");
1882                 return -EINVAL;
1883         }
1884
1885         if (num_vfs) {
1886                 ret = pci_enable_sriov(pdev, num_vfs);
1887                 if (ret)
1888                         dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret);
1889                 else
1890                         return num_vfs;
1891         } else if (!pci_vfs_assigned(pdev)) {
1892                 pci_disable_sriov(pdev);
1893         } else {
1894                 dev_warn(&pdev->dev,
1895                          "Unable to free VFs because some are assigned to VMs.\n");
1896         }
1897
1898         return 0;
1899 }
1900
1901 static void hns3_shutdown(struct pci_dev *pdev)
1902 {
1903         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1904
1905         hnae3_unregister_ae_dev(ae_dev);
1906         devm_kfree(&pdev->dev, ae_dev);
1907         pci_set_drvdata(pdev, NULL);
1908
1909         if (system_state == SYSTEM_POWER_OFF)
1910                 pci_set_power_state(pdev, PCI_D3hot);
1911 }
1912
1913 static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
1914                                             pci_channel_state_t state)
1915 {
1916         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1917         pci_ers_result_t ret;
1918
1919         dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state);
1920
1921         if (state == pci_channel_io_perm_failure)
1922                 return PCI_ERS_RESULT_DISCONNECT;
1923
1924         if (!ae_dev || !ae_dev->ops) {
1925                 dev_err(&pdev->dev,
1926                         "Can't recover - error happened before device initialized\n");
1927                 return PCI_ERS_RESULT_NONE;
1928         }
1929
1930         if (ae_dev->ops->handle_hw_ras_error)
1931                 ret = ae_dev->ops->handle_hw_ras_error(ae_dev);
1932         else
1933                 return PCI_ERS_RESULT_NONE;
1934
1935         return ret;
1936 }
1937
1938 static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
1939 {
1940         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1941         struct device *dev = &pdev->dev;
1942
1943         dev_info(dev, "requesting reset due to PCI error\n");
1944
1945         if (!ae_dev || !ae_dev->ops)
1946                 return PCI_ERS_RESULT_NONE;
1947
1948         /* request the reset */
1949         if (ae_dev->ops->reset_event) {
1950                 if (!ae_dev->override_pci_need_reset)
1951                         ae_dev->ops->reset_event(pdev, NULL);
1952
1953                 return PCI_ERS_RESULT_RECOVERED;
1954         }
1955
1956         return PCI_ERS_RESULT_DISCONNECT;
1957 }
1958
1959 static void hns3_reset_prepare(struct pci_dev *pdev)
1960 {
1961         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1962
1963         dev_info(&pdev->dev, "hns3 flr prepare\n");
1964         if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare)
1965                 ae_dev->ops->flr_prepare(ae_dev);
1966 }
1967
1968 static void hns3_reset_done(struct pci_dev *pdev)
1969 {
1970         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1971
1972         dev_info(&pdev->dev, "hns3 flr done\n");
1973         if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done)
1974                 ae_dev->ops->flr_done(ae_dev);
1975 }
1976
1977 static const struct pci_error_handlers hns3_err_handler = {
1978         .error_detected = hns3_error_detected,
1979         .slot_reset     = hns3_slot_reset,
1980         .reset_prepare  = hns3_reset_prepare,
1981         .reset_done     = hns3_reset_done,
1982 };
1983
1984 static struct pci_driver hns3_driver = {
1985         .name     = hns3_driver_name,
1986         .id_table = hns3_pci_tbl,
1987         .probe    = hns3_probe,
1988         .remove   = hns3_remove,
1989         .shutdown = hns3_shutdown,
1990         .sriov_configure = hns3_pci_sriov_configure,
1991         .err_handler    = &hns3_err_handler,
1992 };
1993
1994 /* set default feature to hns3 */
1995 static void hns3_set_default_feature(struct net_device *netdev)
1996 {
1997         struct hnae3_handle *h = hns3_get_handle(netdev);
1998         struct pci_dev *pdev = h->pdev;
1999
2000         netdev->priv_flags |= IFF_UNICAST_FLT;
2001
2002         netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2003                 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
2004                 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
2005                 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
2006                 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
2007
2008         netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
2009
2010         netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
2011
2012         netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2013                 NETIF_F_HW_VLAN_CTAG_FILTER |
2014                 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2015                 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
2016                 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
2017                 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
2018                 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
2019
2020         netdev->vlan_features |=
2021                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2022                 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
2023                 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
2024                 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
2025                 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
2026
2027         netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2028                 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2029                 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
2030                 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
2031                 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
2032                 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
2033
2034         if (pdev->revision >= 0x21) {
2035                 netdev->hw_features |= NETIF_F_GRO_HW;
2036                 netdev->features |= NETIF_F_GRO_HW;
2037
2038                 if (!(h->flags & HNAE3_SUPPORT_VF)) {
2039                         netdev->hw_features |= NETIF_F_NTUPLE;
2040                         netdev->features |= NETIF_F_NTUPLE;
2041                 }
2042         }
2043 }
2044
2045 static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
2046                              struct hns3_desc_cb *cb)
2047 {
2048         unsigned int order = hnae3_page_order(ring);
2049         struct page *p;
2050
2051         p = dev_alloc_pages(order);
2052         if (!p)
2053                 return -ENOMEM;
2054
2055         cb->priv = p;
2056         cb->page_offset = 0;
2057         cb->reuse_flag = 0;
2058         cb->buf  = page_address(p);
2059         cb->length = hnae3_page_size(ring);
2060         cb->type = DESC_TYPE_PAGE;
2061
2062         return 0;
2063 }
2064
2065 static void hns3_free_buffer(struct hns3_enet_ring *ring,
2066                              struct hns3_desc_cb *cb)
2067 {
2068         if (cb->type == DESC_TYPE_SKB)
2069                 dev_kfree_skb_any((struct sk_buff *)cb->priv);
2070         else if (!HNAE3_IS_TX_RING(ring))
2071                 put_page((struct page *)cb->priv);
2072         memset(cb, 0, sizeof(*cb));
2073 }
2074
2075 static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
2076 {
2077         cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
2078                                cb->length, ring_to_dma_dir(ring));
2079
2080         if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma)))
2081                 return -EIO;
2082
2083         return 0;
2084 }
2085
2086 static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
2087                               struct hns3_desc_cb *cb)
2088 {
2089         if (cb->type == DESC_TYPE_SKB)
2090                 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
2091                                  ring_to_dma_dir(ring));
2092         else if (cb->length)
2093                 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
2094                                ring_to_dma_dir(ring));
2095 }
2096
2097 static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
2098 {
2099         hns3_unmap_buffer(ring, &ring->desc_cb[i]);
2100         ring->desc[i].addr = 0;
2101 }
2102
2103 static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
2104 {
2105         struct hns3_desc_cb *cb = &ring->desc_cb[i];
2106
2107         if (!ring->desc_cb[i].dma)
2108                 return;
2109
2110         hns3_buffer_detach(ring, i);
2111         hns3_free_buffer(ring, cb);
2112 }
2113
2114 static void hns3_free_buffers(struct hns3_enet_ring *ring)
2115 {
2116         int i;
2117
2118         for (i = 0; i < ring->desc_num; i++)
2119                 hns3_free_buffer_detach(ring, i);
2120 }
2121
2122 /* free desc along with its attached buffer */
2123 static void hns3_free_desc(struct hns3_enet_ring *ring)
2124 {
2125         int size = ring->desc_num * sizeof(ring->desc[0]);
2126
2127         hns3_free_buffers(ring);
2128
2129         if (ring->desc) {
2130                 dma_free_coherent(ring_to_dev(ring), size,
2131                                   ring->desc, ring->desc_dma_addr);
2132                 ring->desc = NULL;
2133         }
2134 }
2135
2136 static int hns3_alloc_desc(struct hns3_enet_ring *ring)
2137 {
2138         int size = ring->desc_num * sizeof(ring->desc[0]);
2139
2140         ring->desc = dma_alloc_coherent(ring_to_dev(ring), size,
2141                                         &ring->desc_dma_addr, GFP_KERNEL);
2142         if (!ring->desc)
2143                 return -ENOMEM;
2144
2145         return 0;
2146 }
2147
2148 static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
2149                                    struct hns3_desc_cb *cb)
2150 {
2151         int ret;
2152
2153         ret = hns3_alloc_buffer(ring, cb);
2154         if (ret)
2155                 goto out;
2156
2157         ret = hns3_map_buffer(ring, cb);
2158         if (ret)
2159                 goto out_with_buf;
2160
2161         return 0;
2162
2163 out_with_buf:
2164         hns3_free_buffer(ring, cb);
2165 out:
2166         return ret;
2167 }
2168
2169 static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i)
2170 {
2171         int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]);
2172
2173         if (ret)
2174                 return ret;
2175
2176         ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
2177
2178         return 0;
2179 }
2180
2181 /* Allocate memory for raw pkg, and map with dma */
2182 static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
2183 {
2184         int i, j, ret;
2185
2186         for (i = 0; i < ring->desc_num; i++) {
2187                 ret = hns3_alloc_buffer_attach(ring, i);
2188                 if (ret)
2189                         goto out_buffer_fail;
2190         }
2191
2192         return 0;
2193
2194 out_buffer_fail:
2195         for (j = i - 1; j >= 0; j--)
2196                 hns3_free_buffer_detach(ring, j);
2197         return ret;
2198 }
2199
2200 /* detach a in-used buffer and replace with a reserved one  */
2201 static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
2202                                 struct hns3_desc_cb *res_cb)
2203 {
2204         hns3_unmap_buffer(ring, &ring->desc_cb[i]);
2205         ring->desc_cb[i] = *res_cb;
2206         ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
2207         ring->desc[i].rx.bd_base_info = 0;
2208 }
2209
2210 static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
2211 {
2212         ring->desc_cb[i].reuse_flag = 0;
2213         ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
2214                 + ring->desc_cb[i].page_offset);
2215         ring->desc[i].rx.bd_base_info = 0;
2216 }
2217
2218 static void hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, int head,
2219                                   int *bytes, int *pkts)
2220 {
2221         int ntc = ring->next_to_clean;
2222         struct hns3_desc_cb *desc_cb;
2223
2224         while (head != ntc) {
2225                 desc_cb = &ring->desc_cb[ntc];
2226                 (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
2227                 (*bytes) += desc_cb->length;
2228                 /* desc_cb will be cleaned, after hnae3_free_buffer_detach */
2229                 hns3_free_buffer_detach(ring, ntc);
2230
2231                 if (++ntc == ring->desc_num)
2232                         ntc = 0;
2233
2234                 /* Issue prefetch for next Tx descriptor */
2235                 prefetch(&ring->desc_cb[ntc]);
2236         }
2237
2238         /* This smp_store_release() pairs with smp_load_acquire() in
2239          * ring_space called by hns3_nic_net_xmit.
2240          */
2241         smp_store_release(&ring->next_to_clean, ntc);
2242 }
2243
2244 static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
2245 {
2246         int u = ring->next_to_use;
2247         int c = ring->next_to_clean;
2248
2249         if (unlikely(h > ring->desc_num))
2250                 return 0;
2251
2252         return u > c ? (h > c && h <= u) : (h > c || h <= u);
2253 }
2254
2255 void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
2256 {
2257         struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2258         struct hns3_nic_priv *priv = netdev_priv(netdev);
2259         struct netdev_queue *dev_queue;
2260         int bytes, pkts;
2261         int head;
2262
2263         head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
2264         rmb(); /* Make sure head is ready before touch any data */
2265
2266         if (is_ring_empty(ring) || head == ring->next_to_clean)
2267                 return; /* no data to poll */
2268
2269         if (unlikely(!is_valid_clean_head(ring, head))) {
2270                 netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
2271                            ring->next_to_use, ring->next_to_clean);
2272
2273                 u64_stats_update_begin(&ring->syncp);
2274                 ring->stats.io_err_cnt++;
2275                 u64_stats_update_end(&ring->syncp);
2276                 return;
2277         }
2278
2279         bytes = 0;
2280         pkts = 0;
2281         hns3_nic_reclaim_desc(ring, head, &bytes, &pkts);
2282
2283         ring->tqp_vector->tx_group.total_bytes += bytes;
2284         ring->tqp_vector->tx_group.total_packets += pkts;
2285
2286         u64_stats_update_begin(&ring->syncp);
2287         ring->stats.tx_bytes += bytes;
2288         ring->stats.tx_pkts += pkts;
2289         u64_stats_update_end(&ring->syncp);
2290
2291         dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
2292         netdev_tx_completed_queue(dev_queue, pkts, bytes);
2293
2294         if (unlikely(pkts && netif_carrier_ok(netdev) &&
2295                      (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
2296                 /* Make sure that anybody stopping the queue after this
2297                  * sees the new next_to_clean.
2298                  */
2299                 smp_mb();
2300                 if (netif_tx_queue_stopped(dev_queue) &&
2301                     !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
2302                         netif_tx_wake_queue(dev_queue);
2303                         ring->stats.restart_queue++;
2304                 }
2305         }
2306 }
2307
2308 static int hns3_desc_unused(struct hns3_enet_ring *ring)
2309 {
2310         int ntc = ring->next_to_clean;
2311         int ntu = ring->next_to_use;
2312
2313         return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
2314 }
2315
2316 static void
2317 hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
2318 {
2319         struct hns3_desc_cb *desc_cb;
2320         struct hns3_desc_cb res_cbs;
2321         int i, ret;
2322
2323         for (i = 0; i < cleand_count; i++) {
2324                 desc_cb = &ring->desc_cb[ring->next_to_use];
2325                 if (desc_cb->reuse_flag) {
2326                         u64_stats_update_begin(&ring->syncp);
2327                         ring->stats.reuse_pg_cnt++;
2328                         u64_stats_update_end(&ring->syncp);
2329
2330                         hns3_reuse_buffer(ring, ring->next_to_use);
2331                 } else {
2332                         ret = hns3_reserve_buffer_map(ring, &res_cbs);
2333                         if (ret) {
2334                                 u64_stats_update_begin(&ring->syncp);
2335                                 ring->stats.sw_err_cnt++;
2336                                 u64_stats_update_end(&ring->syncp);
2337
2338                                 netdev_err(ring->tqp->handle->kinfo.netdev,
2339                                            "hnae reserve buffer map failed.\n");
2340                                 break;
2341                         }
2342                         hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
2343
2344                         u64_stats_update_begin(&ring->syncp);
2345                         ring->stats.non_reuse_pg++;
2346                         u64_stats_update_end(&ring->syncp);
2347                 }
2348
2349                 ring_ptr_move_fw(ring, next_to_use);
2350         }
2351
2352         wmb(); /* Make all data has been write before submit */
2353         writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
2354 }
2355
2356 static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
2357                                 struct hns3_enet_ring *ring, int pull_len,
2358                                 struct hns3_desc_cb *desc_cb)
2359 {
2360         struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
2361         int size = le16_to_cpu(desc->rx.size);
2362         u32 truesize = hnae3_buf_size(ring);
2363
2364         skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
2365                         size - pull_len, truesize);
2366
2367         /* Avoid re-using remote pages, or the stack is still using the page
2368          * when page_offset rollback to zero, flag default unreuse
2369          */
2370         if (unlikely(page_to_nid(desc_cb->priv) != numa_mem_id()) ||
2371             (!desc_cb->page_offset && page_count(desc_cb->priv) > 1))
2372                 return;
2373
2374         /* Move offset up to the next cache line */
2375         desc_cb->page_offset += truesize;
2376
2377         if (desc_cb->page_offset + truesize <= hnae3_page_size(ring)) {
2378                 desc_cb->reuse_flag = 1;
2379                 /* Bump ref count on page before it is given*/
2380                 get_page(desc_cb->priv);
2381         } else if (page_count(desc_cb->priv) == 1) {
2382                 desc_cb->reuse_flag = 1;
2383                 desc_cb->page_offset = 0;
2384                 get_page(desc_cb->priv);
2385         }
2386 }
2387
2388 static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
2389 {
2390         __be16 type = skb->protocol;
2391         struct tcphdr *th;
2392         int depth = 0;
2393
2394         while (eth_type_vlan(type)) {
2395                 struct vlan_hdr *vh;
2396
2397                 if ((depth + VLAN_HLEN) > skb_headlen(skb))
2398                         return -EFAULT;
2399
2400                 vh = (struct vlan_hdr *)(skb->data + depth);
2401                 type = vh->h_vlan_encapsulated_proto;
2402                 depth += VLAN_HLEN;
2403         }
2404
2405         skb_set_network_header(skb, depth);
2406
2407         if (type == htons(ETH_P_IP)) {
2408                 const struct iphdr *iph = ip_hdr(skb);
2409
2410                 depth += sizeof(struct iphdr);
2411                 skb_set_transport_header(skb, depth);
2412                 th = tcp_hdr(skb);
2413                 th->check = ~tcp_v4_check(skb->len - depth, iph->saddr,
2414                                           iph->daddr, 0);
2415         } else if (type == htons(ETH_P_IPV6)) {
2416                 const struct ipv6hdr *iph = ipv6_hdr(skb);
2417
2418                 depth += sizeof(struct ipv6hdr);
2419                 skb_set_transport_header(skb, depth);
2420                 th = tcp_hdr(skb);
2421                 th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr,
2422                                           &iph->daddr, 0);
2423         } else {
2424                 netdev_err(skb->dev,
2425                            "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
2426                            be16_to_cpu(type), depth);
2427                 return -EFAULT;
2428         }
2429
2430         skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
2431         if (th->cwr)
2432                 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
2433
2434         if (l234info & BIT(HNS3_RXD_GRO_FIXID_B))
2435                 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
2436
2437         skb->csum_start = (unsigned char *)th - skb->head;
2438         skb->csum_offset = offsetof(struct tcphdr, check);
2439         skb->ip_summed = CHECKSUM_PARTIAL;
2440         return 0;
2441 }
2442
2443 static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
2444                              u32 l234info, u32 bd_base_info, u32 ol_info)
2445 {
2446         struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2447         int l3_type, l4_type;
2448         int ol4_type;
2449
2450         skb->ip_summed = CHECKSUM_NONE;
2451
2452         skb_checksum_none_assert(skb);
2453
2454         if (!(netdev->features & NETIF_F_RXCSUM))
2455                 return;
2456
2457         /* check if hardware has done checksum */
2458         if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
2459                 return;
2460
2461         if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) |
2462                                  BIT(HNS3_RXD_OL3E_B) |
2463                                  BIT(HNS3_RXD_OL4E_B)))) {
2464                 u64_stats_update_begin(&ring->syncp);
2465                 ring->stats.l3l4_csum_err++;
2466                 u64_stats_update_end(&ring->syncp);
2467
2468                 return;
2469         }
2470
2471         ol4_type = hnae3_get_field(ol_info, HNS3_RXD_OL4ID_M,
2472                                    HNS3_RXD_OL4ID_S);
2473         switch (ol4_type) {
2474         case HNS3_OL4_TYPE_MAC_IN_UDP:
2475         case HNS3_OL4_TYPE_NVGRE:
2476                 skb->csum_level = 1;
2477                 /* fall through */
2478         case HNS3_OL4_TYPE_NO_TUN:
2479                 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
2480                                           HNS3_RXD_L3ID_S);
2481                 l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
2482                                           HNS3_RXD_L4ID_S);
2483
2484                 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
2485                 if ((l3_type == HNS3_L3_TYPE_IPV4 ||
2486                      l3_type == HNS3_L3_TYPE_IPV6) &&
2487                     (l4_type == HNS3_L4_TYPE_UDP ||
2488                      l4_type == HNS3_L4_TYPE_TCP ||
2489                      l4_type == HNS3_L4_TYPE_SCTP))
2490                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2491                 break;
2492         default:
2493                 break;
2494         }
2495 }
2496
2497 static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
2498 {
2499         if (skb_has_frag_list(skb))
2500                 napi_gro_flush(&ring->tqp_vector->napi, false);
2501
2502         napi_gro_receive(&ring->tqp_vector->napi, skb);
2503 }
2504
2505 static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
2506                                 struct hns3_desc *desc, u32 l234info,
2507                                 u16 *vlan_tag)
2508 {
2509         struct hnae3_handle *handle = ring->tqp->handle;
2510         struct pci_dev *pdev = ring->tqp->handle->pdev;
2511
2512         if (pdev->revision == 0x20) {
2513                 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2514                 if (!(*vlan_tag & VLAN_VID_MASK))
2515                         *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2516
2517                 return (*vlan_tag != 0);
2518         }
2519
2520 #define HNS3_STRP_OUTER_VLAN    0x1
2521 #define HNS3_STRP_INNER_VLAN    0x2
2522 #define HNS3_STRP_BOTH          0x3
2523
2524         /* Hardware always insert VLAN tag into RX descriptor when
2525          * remove the tag from packet, driver needs to determine
2526          * reporting which tag to stack.
2527          */
2528         switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
2529                                 HNS3_RXD_STRP_TAGP_S)) {
2530         case HNS3_STRP_OUTER_VLAN:
2531                 if (handle->port_base_vlan_state !=
2532                                 HNAE3_PORT_BASE_VLAN_DISABLE)
2533                         return false;
2534
2535                 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2536                 return true;
2537         case HNS3_STRP_INNER_VLAN:
2538                 if (handle->port_base_vlan_state !=
2539                                 HNAE3_PORT_BASE_VLAN_DISABLE)
2540                         return false;
2541
2542                 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2543                 return true;
2544         case HNS3_STRP_BOTH:
2545                 if (handle->port_base_vlan_state ==
2546                                 HNAE3_PORT_BASE_VLAN_DISABLE)
2547                         *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2548                 else
2549                         *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2550
2551                 return true;
2552         default:
2553                 return false;
2554         }
2555 }
2556
2557 static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length,
2558                           unsigned char *va)
2559 {
2560 #define HNS3_NEED_ADD_FRAG      1
2561         struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
2562         struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2563         struct sk_buff *skb;
2564
2565         ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE);
2566         skb = ring->skb;
2567         if (unlikely(!skb)) {
2568                 netdev_err(netdev, "alloc rx skb fail\n");
2569
2570                 u64_stats_update_begin(&ring->syncp);
2571                 ring->stats.sw_err_cnt++;
2572                 u64_stats_update_end(&ring->syncp);
2573
2574                 return -ENOMEM;
2575         }
2576
2577         prefetchw(skb->data);
2578
2579         ring->pending_buf = 1;
2580         ring->frag_num = 0;
2581         ring->tail_skb = NULL;
2582         if (length <= HNS3_RX_HEAD_SIZE) {
2583                 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
2584
2585                 /* We can reuse buffer as-is, just make sure it is local */
2586                 if (likely(page_to_nid(desc_cb->priv) == numa_mem_id()))
2587                         desc_cb->reuse_flag = 1;
2588                 else /* This page cannot be reused so discard it */
2589                         put_page(desc_cb->priv);
2590
2591                 ring_ptr_move_fw(ring, next_to_clean);
2592                 return 0;
2593         }
2594         u64_stats_update_begin(&ring->syncp);
2595         ring->stats.seg_pkt_cnt++;
2596         u64_stats_update_end(&ring->syncp);
2597
2598         ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE);
2599         __skb_put(skb, ring->pull_len);
2600         hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len,
2601                             desc_cb);
2602         ring_ptr_move_fw(ring, next_to_clean);
2603
2604         return HNS3_NEED_ADD_FRAG;
2605 }
2606
2607 static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
2608                          struct sk_buff **out_skb, bool pending)
2609 {
2610         struct sk_buff *skb = *out_skb;
2611         struct sk_buff *head_skb = *out_skb;
2612         struct sk_buff *new_skb;
2613         struct hns3_desc_cb *desc_cb;
2614         struct hns3_desc *pre_desc;
2615         u32 bd_base_info;
2616         int pre_bd;
2617
2618         /* if there is pending bd, the SW param next_to_clean has moved
2619          * to next and the next is NULL
2620          */
2621         if (pending) {
2622                 pre_bd = (ring->next_to_clean - 1 + ring->desc_num) %
2623                         ring->desc_num;
2624                 pre_desc = &ring->desc[pre_bd];
2625                 bd_base_info = le32_to_cpu(pre_desc->rx.bd_base_info);
2626         } else {
2627                 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2628         }
2629
2630         while (!(bd_base_info & BIT(HNS3_RXD_FE_B))) {
2631                 desc = &ring->desc[ring->next_to_clean];
2632                 desc_cb = &ring->desc_cb[ring->next_to_clean];
2633                 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2634                 /* make sure HW write desc complete */
2635                 dma_rmb();
2636                 if (!(bd_base_info & BIT(HNS3_RXD_VLD_B)))
2637                         return -ENXIO;
2638
2639                 if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) {
2640                         new_skb = napi_alloc_skb(&ring->tqp_vector->napi,
2641                                                  HNS3_RX_HEAD_SIZE);
2642                         if (unlikely(!new_skb)) {
2643                                 netdev_err(ring->tqp->handle->kinfo.netdev,
2644                                            "alloc rx skb frag fail\n");
2645                                 return -ENXIO;
2646                         }
2647                         ring->frag_num = 0;
2648
2649                         if (ring->tail_skb) {
2650                                 ring->tail_skb->next = new_skb;
2651                                 ring->tail_skb = new_skb;
2652                         } else {
2653                                 skb_shinfo(skb)->frag_list = new_skb;
2654                                 ring->tail_skb = new_skb;
2655                         }
2656                 }
2657
2658                 if (ring->tail_skb) {
2659                         head_skb->truesize += hnae3_buf_size(ring);
2660                         head_skb->data_len += le16_to_cpu(desc->rx.size);
2661                         head_skb->len += le16_to_cpu(desc->rx.size);
2662                         skb = ring->tail_skb;
2663                 }
2664
2665                 hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
2666                 ring_ptr_move_fw(ring, next_to_clean);
2667                 ring->pending_buf++;
2668         }
2669
2670         return 0;
2671 }
2672
2673 static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
2674                                      struct sk_buff *skb, u32 l234info,
2675                                      u32 bd_base_info, u32 ol_info)
2676 {
2677         u32 l3_type;
2678
2679         skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
2680                                                     HNS3_RXD_GRO_SIZE_M,
2681                                                     HNS3_RXD_GRO_SIZE_S);
2682         /* if there is no HW GRO, do not set gro params */
2683         if (!skb_shinfo(skb)->gso_size) {
2684                 hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info);
2685                 return 0;
2686         }
2687
2688         NAPI_GRO_CB(skb)->count = hnae3_get_field(l234info,
2689                                                   HNS3_RXD_GRO_COUNT_M,
2690                                                   HNS3_RXD_GRO_COUNT_S);
2691
2692         l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
2693                                   HNS3_RXD_L3ID_S);
2694         if (l3_type == HNS3_L3_TYPE_IPV4)
2695                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2696         else if (l3_type == HNS3_L3_TYPE_IPV6)
2697                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
2698         else
2699                 return -EFAULT;
2700
2701         return  hns3_gro_complete(skb, l234info);
2702 }
2703
2704 static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
2705                                      struct sk_buff *skb, u32 rss_hash)
2706 {
2707         struct hnae3_handle *handle = ring->tqp->handle;
2708         enum pkt_hash_types rss_type;
2709
2710         if (rss_hash)
2711                 rss_type = handle->kinfo.rss_type;
2712         else
2713                 rss_type = PKT_HASH_TYPE_NONE;
2714
2715         skb_set_hash(skb, rss_hash, rss_type);
2716 }
2717
2718 static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
2719 {
2720         struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2721         enum hns3_pkt_l2t_type l2_frame_type;
2722         u32 bd_base_info, l234info, ol_info;
2723         struct hns3_desc *desc;
2724         unsigned int len;
2725         int pre_ntc, ret;
2726
2727         /* bdinfo handled below is only valid on the last BD of the
2728          * current packet, and ring->next_to_clean indicates the first
2729          * descriptor of next packet, so need - 1 below.
2730          */
2731         pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) :
2732                                         (ring->desc_num - 1);
2733         desc = &ring->desc[pre_ntc];
2734         bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2735         l234info = le32_to_cpu(desc->rx.l234_info);
2736         ol_info = le32_to_cpu(desc->rx.ol_info);
2737
2738         /* Based on hw strategy, the tag offloaded will be stored at
2739          * ot_vlan_tag in two layer tag case, and stored at vlan_tag
2740          * in one layer tag case.
2741          */
2742         if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
2743                 u16 vlan_tag;
2744
2745                 if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
2746                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2747                                                vlan_tag);
2748         }
2749
2750         if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) {
2751                 u64_stats_update_begin(&ring->syncp);
2752                 ring->stats.non_vld_descs++;
2753                 u64_stats_update_end(&ring->syncp);
2754
2755                 return -EINVAL;
2756         }
2757
2758         if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
2759                                   BIT(HNS3_RXD_L2E_B))))) {
2760                 u64_stats_update_begin(&ring->syncp);
2761                 if (l234info & BIT(HNS3_RXD_L2E_B))
2762                         ring->stats.l2_err++;
2763                 else
2764                         ring->stats.err_pkt_len++;
2765                 u64_stats_update_end(&ring->syncp);
2766
2767                 return -EFAULT;
2768         }
2769
2770         len = skb->len;
2771
2772         /* Do update ip stack process */
2773         skb->protocol = eth_type_trans(skb, netdev);
2774
2775         /* This is needed in order to enable forwarding support */
2776         ret = hns3_set_gro_and_checksum(ring, skb, l234info,
2777                                         bd_base_info, ol_info);
2778         if (unlikely(ret)) {
2779                 u64_stats_update_begin(&ring->syncp);
2780                 ring->stats.rx_err_cnt++;
2781                 u64_stats_update_end(&ring->syncp);
2782                 return ret;
2783         }
2784
2785         l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M,
2786                                         HNS3_RXD_DMAC_S);
2787
2788         u64_stats_update_begin(&ring->syncp);
2789         ring->stats.rx_pkts++;
2790         ring->stats.rx_bytes += len;
2791
2792         if (l2_frame_type == HNS3_L2_TYPE_MULTICAST)
2793                 ring->stats.rx_multicast++;
2794
2795         u64_stats_update_end(&ring->syncp);
2796
2797         ring->tqp_vector->rx_group.total_bytes += len;
2798
2799         hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash));
2800         return 0;
2801 }
2802
2803 static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
2804                              struct sk_buff **out_skb)
2805 {
2806         struct sk_buff *skb = ring->skb;
2807         struct hns3_desc_cb *desc_cb;
2808         struct hns3_desc *desc;
2809         u32 bd_base_info;
2810         int length;
2811         int ret;
2812
2813         desc = &ring->desc[ring->next_to_clean];
2814         desc_cb = &ring->desc_cb[ring->next_to_clean];
2815
2816         prefetch(desc);
2817
2818         length = le16_to_cpu(desc->rx.size);
2819         bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2820
2821         /* Check valid BD */
2822         if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
2823                 return -ENXIO;
2824
2825         if (!skb)
2826                 ring->va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
2827
2828         /* Prefetch first cache line of first page
2829          * Idea is to cache few bytes of the header of the packet. Our L1 Cache
2830          * line size is 64B so need to prefetch twice to make it 128B. But in
2831          * actual we can have greater size of caches with 128B Level 1 cache
2832          * lines. In such a case, single fetch would suffice to cache in the
2833          * relevant part of the header.
2834          */
2835         prefetch(ring->va);
2836 #if L1_CACHE_BYTES < 128
2837         prefetch(ring->va + L1_CACHE_BYTES);
2838 #endif
2839
2840         if (!skb) {
2841                 ret = hns3_alloc_skb(ring, length, ring->va);
2842                 *out_skb = skb = ring->skb;
2843
2844                 if (ret < 0) /* alloc buffer fail */
2845                         return ret;
2846                 if (ret > 0) { /* need add frag */
2847                         ret = hns3_add_frag(ring, desc, &skb, false);
2848                         if (ret)
2849                                 return ret;
2850
2851                         /* As the head data may be changed when GRO enable, copy
2852                          * the head data in after other data rx completed
2853                          */
2854                         memcpy(skb->data, ring->va,
2855                                ALIGN(ring->pull_len, sizeof(long)));
2856                 }
2857         } else {
2858                 ret = hns3_add_frag(ring, desc, &skb, true);
2859                 if (ret)
2860                         return ret;
2861
2862                 /* As the head data may be changed when GRO enable, copy
2863                  * the head data in after other data rx completed
2864                  */
2865                 memcpy(skb->data, ring->va,
2866                        ALIGN(ring->pull_len, sizeof(long)));
2867         }
2868
2869         ret = hns3_handle_bdinfo(ring, skb);
2870         if (unlikely(ret)) {
2871                 dev_kfree_skb_any(skb);
2872                 return ret;
2873         }
2874
2875         skb_record_rx_queue(skb, ring->tqp->tqp_index);
2876         *out_skb = skb;
2877
2878         return 0;
2879 }
2880
2881 int hns3_clean_rx_ring(
2882                 struct hns3_enet_ring *ring, int budget,
2883                 void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
2884 {
2885 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2886         int recv_pkts, recv_bds, clean_count, err;
2887         int unused_count = hns3_desc_unused(ring);
2888         struct sk_buff *skb = ring->skb;
2889         int num;
2890
2891         num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
2892         rmb(); /* Make sure num taken effect before the other data is touched */
2893
2894         recv_pkts = 0, recv_bds = 0, clean_count = 0;
2895         num -= unused_count;
2896         unused_count -= ring->pending_buf;
2897
2898         while (recv_pkts < budget && recv_bds < num) {
2899                 /* Reuse or realloc buffers */
2900                 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
2901                         hns3_nic_alloc_rx_buffers(ring,
2902                                                   clean_count + unused_count);
2903                         clean_count = 0;
2904                         unused_count = hns3_desc_unused(ring) -
2905                                         ring->pending_buf;
2906                 }
2907
2908                 /* Poll one pkt */
2909                 err = hns3_handle_rx_bd(ring, &skb);
2910                 if (unlikely(!skb)) /* This fault cannot be repaired */
2911                         goto out;
2912
2913                 if (err == -ENXIO) { /* Do not get FE for the packet */
2914                         goto out;
2915                 } else if (unlikely(err)) {  /* Do jump the err */
2916                         recv_bds += ring->pending_buf;
2917                         clean_count += ring->pending_buf;
2918                         ring->skb = NULL;
2919                         ring->pending_buf = 0;
2920                         continue;
2921                 }
2922
2923                 rx_fn(ring, skb);
2924                 recv_bds += ring->pending_buf;
2925                 clean_count += ring->pending_buf;
2926                 ring->skb = NULL;
2927                 ring->pending_buf = 0;
2928
2929                 recv_pkts++;
2930         }
2931
2932 out:
2933         /* Make all data has been write before submit */
2934         if (clean_count + unused_count > 0)
2935                 hns3_nic_alloc_rx_buffers(ring,
2936                                           clean_count + unused_count);
2937
2938         return recv_pkts;
2939 }
2940
2941 static bool hns3_get_new_flow_lvl(struct hns3_enet_ring_group *ring_group)
2942 {
2943 #define HNS3_RX_LOW_BYTE_RATE 10000
2944 #define HNS3_RX_MID_BYTE_RATE 20000
2945 #define HNS3_RX_ULTRA_PACKET_RATE 40
2946
2947         enum hns3_flow_level_range new_flow_level;
2948         struct hns3_enet_tqp_vector *tqp_vector;
2949         int packets_per_msecs, bytes_per_msecs;
2950         u32 time_passed_ms;
2951
2952         tqp_vector = ring_group->ring->tqp_vector;
2953         time_passed_ms =
2954                 jiffies_to_msecs(jiffies - tqp_vector->last_jiffies);
2955         if (!time_passed_ms)
2956                 return false;
2957
2958         do_div(ring_group->total_packets, time_passed_ms);
2959         packets_per_msecs = ring_group->total_packets;
2960
2961         do_div(ring_group->total_bytes, time_passed_ms);
2962         bytes_per_msecs = ring_group->total_bytes;
2963
2964         new_flow_level = ring_group->coal.flow_level;
2965
2966         /* Simple throttlerate management
2967          * 0-10MB/s   lower     (50000 ints/s)
2968          * 10-20MB/s   middle    (20000 ints/s)
2969          * 20-1249MB/s high      (18000 ints/s)
2970          * > 40000pps  ultra     (8000 ints/s)
2971          */
2972         switch (new_flow_level) {
2973         case HNS3_FLOW_LOW:
2974                 if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE)
2975                         new_flow_level = HNS3_FLOW_MID;
2976                 break;
2977         case HNS3_FLOW_MID:
2978                 if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE)
2979                         new_flow_level = HNS3_FLOW_HIGH;
2980                 else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE)
2981                         new_flow_level = HNS3_FLOW_LOW;
2982                 break;
2983         case HNS3_FLOW_HIGH:
2984         case HNS3_FLOW_ULTRA:
2985         default:
2986                 if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE)
2987                         new_flow_level = HNS3_FLOW_MID;
2988                 break;
2989         }
2990
2991         if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE &&
2992             &tqp_vector->rx_group == ring_group)
2993                 new_flow_level = HNS3_FLOW_ULTRA;
2994
2995         ring_group->total_bytes = 0;
2996         ring_group->total_packets = 0;
2997         ring_group->coal.flow_level = new_flow_level;
2998
2999         return true;
3000 }
3001
3002 static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
3003 {
3004         struct hns3_enet_tqp_vector *tqp_vector;
3005         u16 new_int_gl;
3006
3007         if (!ring_group->ring)
3008                 return false;
3009
3010         tqp_vector = ring_group->ring->tqp_vector;
3011         if (!tqp_vector->last_jiffies)
3012                 return false;
3013
3014         if (ring_group->total_packets == 0) {
3015                 ring_group->coal.int_gl = HNS3_INT_GL_50K;
3016                 ring_group->coal.flow_level = HNS3_FLOW_LOW;
3017                 return true;
3018         }
3019
3020         if (!hns3_get_new_flow_lvl(ring_group))
3021                 return false;
3022
3023         new_int_gl = ring_group->coal.int_gl;
3024         switch (ring_group->coal.flow_level) {
3025         case HNS3_FLOW_LOW:
3026                 new_int_gl = HNS3_INT_GL_50K;
3027                 break;
3028         case HNS3_FLOW_MID:
3029                 new_int_gl = HNS3_INT_GL_20K;
3030                 break;
3031         case HNS3_FLOW_HIGH:
3032                 new_int_gl = HNS3_INT_GL_18K;
3033                 break;
3034         case HNS3_FLOW_ULTRA:
3035                 new_int_gl = HNS3_INT_GL_8K;
3036                 break;
3037         default:
3038                 break;
3039         }
3040
3041         if (new_int_gl != ring_group->coal.int_gl) {
3042                 ring_group->coal.int_gl = new_int_gl;
3043                 return true;
3044         }
3045         return false;
3046 }
3047
3048 static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
3049 {
3050         struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
3051         struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
3052         bool rx_update, tx_update;
3053
3054         /* update param every 1000ms */
3055         if (time_before(jiffies,
3056                         tqp_vector->last_jiffies + msecs_to_jiffies(1000)))
3057                 return;
3058
3059         if (rx_group->coal.gl_adapt_enable) {
3060                 rx_update = hns3_get_new_int_gl(rx_group);
3061                 if (rx_update)
3062                         hns3_set_vector_coalesce_rx_gl(tqp_vector,
3063                                                        rx_group->coal.int_gl);
3064         }
3065
3066         if (tx_group->coal.gl_adapt_enable) {
3067                 tx_update = hns3_get_new_int_gl(tx_group);
3068                 if (tx_update)
3069                         hns3_set_vector_coalesce_tx_gl(tqp_vector,
3070                                                        tx_group->coal.int_gl);
3071         }
3072
3073         tqp_vector->last_jiffies = jiffies;
3074 }
3075
3076 static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
3077 {
3078         struct hns3_nic_priv *priv = netdev_priv(napi->dev);
3079         struct hns3_enet_ring *ring;
3080         int rx_pkt_total = 0;
3081
3082         struct hns3_enet_tqp_vector *tqp_vector =
3083                 container_of(napi, struct hns3_enet_tqp_vector, napi);
3084         bool clean_complete = true;
3085         int rx_budget = budget;
3086
3087         if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
3088                 napi_complete(napi);
3089                 return 0;
3090         }
3091
3092         /* Since the actual Tx work is minimal, we can give the Tx a larger
3093          * budget and be more aggressive about cleaning up the Tx descriptors.
3094          */
3095         hns3_for_each_ring(ring, tqp_vector->tx_group)
3096                 hns3_clean_tx_ring(ring);
3097
3098         /* make sure rx ring budget not smaller than 1 */
3099         if (tqp_vector->num_tqps > 1)
3100                 rx_budget = max(budget / tqp_vector->num_tqps, 1);
3101
3102         hns3_for_each_ring(ring, tqp_vector->rx_group) {
3103                 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
3104                                                     hns3_rx_skb);
3105
3106                 if (rx_cleaned >= rx_budget)
3107                         clean_complete = false;
3108
3109                 rx_pkt_total += rx_cleaned;
3110         }
3111
3112         tqp_vector->rx_group.total_packets += rx_pkt_total;
3113
3114         if (!clean_complete)
3115                 return budget;
3116
3117         if (napi_complete(napi) &&
3118             likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
3119                 hns3_update_new_int_gl(tqp_vector);
3120                 hns3_mask_vector_irq(tqp_vector, 1);
3121         }
3122
3123         return rx_pkt_total;
3124 }
3125
3126 static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
3127                                       struct hnae3_ring_chain_node *head)
3128 {
3129         struct pci_dev *pdev = tqp_vector->handle->pdev;
3130         struct hnae3_ring_chain_node *cur_chain = head;
3131         struct hnae3_ring_chain_node *chain;
3132         struct hns3_enet_ring *tx_ring;
3133         struct hns3_enet_ring *rx_ring;
3134
3135         tx_ring = tqp_vector->tx_group.ring;
3136         if (tx_ring) {
3137                 cur_chain->tqp_index = tx_ring->tqp->tqp_index;
3138                 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
3139                               HNAE3_RING_TYPE_TX);
3140                 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
3141                                 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
3142
3143                 cur_chain->next = NULL;
3144
3145                 while (tx_ring->next) {
3146                         tx_ring = tx_ring->next;
3147
3148                         chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
3149                                              GFP_KERNEL);
3150                         if (!chain)
3151                                 goto err_free_chain;
3152
3153                         cur_chain->next = chain;
3154                         chain->tqp_index = tx_ring->tqp->tqp_index;
3155                         hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
3156                                       HNAE3_RING_TYPE_TX);
3157                         hnae3_set_field(chain->int_gl_idx,
3158                                         HNAE3_RING_GL_IDX_M,
3159                                         HNAE3_RING_GL_IDX_S,
3160                                         HNAE3_RING_GL_TX);
3161
3162                         cur_chain = chain;
3163                 }
3164         }
3165
3166         rx_ring = tqp_vector->rx_group.ring;
3167         if (!tx_ring && rx_ring) {
3168                 cur_chain->next = NULL;
3169                 cur_chain->tqp_index = rx_ring->tqp->tqp_index;
3170                 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
3171                               HNAE3_RING_TYPE_RX);
3172                 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
3173                                 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
3174
3175                 rx_ring = rx_ring->next;
3176         }
3177
3178         while (rx_ring) {
3179                 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
3180                 if (!chain)
3181                         goto err_free_chain;
3182
3183                 cur_chain->next = chain;
3184                 chain->tqp_index = rx_ring->tqp->tqp_index;
3185                 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
3186                               HNAE3_RING_TYPE_RX);
3187                 hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
3188                                 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
3189
3190                 cur_chain = chain;
3191
3192                 rx_ring = rx_ring->next;
3193         }
3194
3195         return 0;
3196
3197 err_free_chain:
3198         cur_chain = head->next;
3199         while (cur_chain) {
3200                 chain = cur_chain->next;
3201                 devm_kfree(&pdev->dev, cur_chain);
3202                 cur_chain = chain;
3203         }
3204         head->next = NULL;
3205
3206         return -ENOMEM;
3207 }
3208
3209 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
3210                                         struct hnae3_ring_chain_node *head)
3211 {
3212         struct pci_dev *pdev = tqp_vector->handle->pdev;
3213         struct hnae3_ring_chain_node *chain_tmp, *chain;
3214
3215         chain = head->next;
3216
3217         while (chain) {
3218                 chain_tmp = chain->next;
3219                 devm_kfree(&pdev->dev, chain);
3220                 chain = chain_tmp;
3221         }
3222 }
3223
3224 static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
3225                                    struct hns3_enet_ring *ring)
3226 {
3227         ring->next = group->ring;
3228         group->ring = ring;
3229
3230         group->count++;
3231 }
3232
3233 static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv)
3234 {
3235         struct pci_dev *pdev = priv->ae_handle->pdev;
3236         struct hns3_enet_tqp_vector *tqp_vector;
3237         int num_vectors = priv->vector_num;
3238         int numa_node;
3239         int vector_i;
3240
3241         numa_node = dev_to_node(&pdev->dev);
3242
3243         for (vector_i = 0; vector_i < num_vectors; vector_i++) {
3244                 tqp_vector = &priv->tqp_vector[vector_i];
3245                 cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node),
3246                                 &tqp_vector->affinity_mask);
3247         }
3248 }
3249
3250 static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
3251 {
3252         struct hnae3_ring_chain_node vector_ring_chain;
3253         struct hnae3_handle *h = priv->ae_handle;
3254         struct hns3_enet_tqp_vector *tqp_vector;
3255         int ret = 0;
3256         int i;
3257
3258         hns3_nic_set_cpumask(priv);
3259
3260         for (i = 0; i < priv->vector_num; i++) {
3261                 tqp_vector = &priv->tqp_vector[i];
3262                 hns3_vector_gl_rl_init_hw(tqp_vector, priv);
3263                 tqp_vector->num_tqps = 0;
3264         }
3265
3266         for (i = 0; i < h->kinfo.num_tqps; i++) {
3267                 u16 vector_i = i % priv->vector_num;
3268                 u16 tqp_num = h->kinfo.num_tqps;
3269
3270                 tqp_vector = &priv->tqp_vector[vector_i];
3271
3272                 hns3_add_ring_to_group(&tqp_vector->tx_group,
3273                                        priv->ring_data[i].ring);
3274
3275                 hns3_add_ring_to_group(&tqp_vector->rx_group,
3276                                        priv->ring_data[i + tqp_num].ring);
3277
3278                 priv->ring_data[i].ring->tqp_vector = tqp_vector;
3279                 priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
3280                 tqp_vector->num_tqps++;
3281         }
3282
3283         for (i = 0; i < priv->vector_num; i++) {
3284                 tqp_vector = &priv->tqp_vector[i];
3285
3286                 tqp_vector->rx_group.total_bytes = 0;
3287                 tqp_vector->rx_group.total_packets = 0;
3288                 tqp_vector->tx_group.total_bytes = 0;
3289                 tqp_vector->tx_group.total_packets = 0;
3290                 tqp_vector->handle = h;
3291
3292                 ret = hns3_get_vector_ring_chain(tqp_vector,
3293                                                  &vector_ring_chain);
3294                 if (ret)
3295                         goto map_ring_fail;
3296
3297                 ret = h->ae_algo->ops->map_ring_to_vector(h,
3298                         tqp_vector->vector_irq, &vector_ring_chain);
3299
3300                 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
3301
3302                 if (ret)
3303                         goto map_ring_fail;
3304
3305                 netif_napi_add(priv->netdev, &tqp_vector->napi,
3306                                hns3_nic_common_poll, NAPI_POLL_WEIGHT);
3307         }
3308
3309         return 0;
3310
3311 map_ring_fail:
3312         while (i--)
3313                 netif_napi_del(&priv->tqp_vector[i].napi);
3314
3315         return ret;
3316 }
3317
3318 static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
3319 {
3320 #define HNS3_VECTOR_PF_MAX_NUM          64
3321
3322         struct hnae3_handle *h = priv->ae_handle;
3323         struct hns3_enet_tqp_vector *tqp_vector;
3324         struct hnae3_vector_info *vector;
3325         struct pci_dev *pdev = h->pdev;
3326         u16 tqp_num = h->kinfo.num_tqps;
3327         u16 vector_num;
3328         int ret = 0;
3329         u16 i;
3330
3331         /* RSS size, cpu online and vector_num should be the same */
3332         /* Should consider 2p/4p later */
3333         vector_num = min_t(u16, num_online_cpus(), tqp_num);
3334         vector_num = min_t(u16, vector_num, HNS3_VECTOR_PF_MAX_NUM);
3335
3336         vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
3337                               GFP_KERNEL);
3338         if (!vector)
3339                 return -ENOMEM;
3340
3341         vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
3342
3343         priv->vector_num = vector_num;
3344         priv->tqp_vector = (struct hns3_enet_tqp_vector *)
3345                 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
3346                              GFP_KERNEL);
3347         if (!priv->tqp_vector) {
3348                 ret = -ENOMEM;
3349                 goto out;
3350         }
3351
3352         for (i = 0; i < priv->vector_num; i++) {
3353                 tqp_vector = &priv->tqp_vector[i];
3354                 tqp_vector->idx = i;
3355                 tqp_vector->mask_addr = vector[i].io_addr;
3356                 tqp_vector->vector_irq = vector[i].vector;
3357                 hns3_vector_gl_rl_init(tqp_vector, priv);
3358         }
3359
3360 out:
3361         devm_kfree(&pdev->dev, vector);
3362         return ret;
3363 }
3364
3365 static void hns3_clear_ring_group(struct hns3_enet_ring_group *group)
3366 {
3367         group->ring = NULL;
3368         group->count = 0;
3369 }
3370
3371 static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
3372 {
3373         struct hnae3_ring_chain_node vector_ring_chain;
3374         struct hnae3_handle *h = priv->ae_handle;
3375         struct hns3_enet_tqp_vector *tqp_vector;
3376         int i;
3377
3378         for (i = 0; i < priv->vector_num; i++) {
3379                 tqp_vector = &priv->tqp_vector[i];
3380
3381                 if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring)
3382                         continue;
3383
3384                 hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain);
3385
3386                 h->ae_algo->ops->unmap_ring_from_vector(h,
3387                         tqp_vector->vector_irq, &vector_ring_chain);
3388
3389                 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
3390
3391                 if (tqp_vector->irq_init_flag == HNS3_VECTOR_INITED) {
3392                         irq_set_affinity_hint(tqp_vector->vector_irq, NULL);
3393                         free_irq(tqp_vector->vector_irq, tqp_vector);
3394                         tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED;
3395                 }
3396
3397                 hns3_clear_ring_group(&tqp_vector->rx_group);
3398                 hns3_clear_ring_group(&tqp_vector->tx_group);
3399                 netif_napi_del(&priv->tqp_vector[i].napi);
3400         }
3401 }
3402
3403 static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
3404 {
3405         struct hnae3_handle *h = priv->ae_handle;
3406         struct pci_dev *pdev = h->pdev;
3407         int i, ret;
3408
3409         for (i = 0; i < priv->vector_num; i++) {
3410                 struct hns3_enet_tqp_vector *tqp_vector;
3411
3412                 tqp_vector = &priv->tqp_vector[i];
3413                 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
3414                 if (ret)
3415                         return ret;
3416         }
3417
3418         devm_kfree(&pdev->dev, priv->tqp_vector);
3419         return 0;
3420 }
3421
3422 static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
3423                              int ring_type)
3424 {
3425         struct hns3_nic_ring_data *ring_data = priv->ring_data;
3426         int queue_num = priv->ae_handle->kinfo.num_tqps;
3427         struct pci_dev *pdev = priv->ae_handle->pdev;
3428         struct hns3_enet_ring *ring;
3429         int desc_num;
3430
3431         ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
3432         if (!ring)
3433                 return -ENOMEM;
3434
3435         if (ring_type == HNAE3_RING_TYPE_TX) {
3436                 desc_num = priv->ae_handle->kinfo.num_tx_desc;
3437                 ring_data[q->tqp_index].ring = ring;
3438                 ring_data[q->tqp_index].queue_index = q->tqp_index;
3439                 ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
3440         } else {
3441                 desc_num = priv->ae_handle->kinfo.num_rx_desc;
3442                 ring_data[q->tqp_index + queue_num].ring = ring;
3443                 ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index;
3444                 ring->io_base = q->io_base;
3445         }
3446
3447         hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
3448
3449         ring->tqp = q;
3450         ring->desc = NULL;
3451         ring->desc_cb = NULL;
3452         ring->dev = priv->dev;
3453         ring->desc_dma_addr = 0;
3454         ring->buf_size = q->buf_size;
3455         ring->desc_num = desc_num;
3456         ring->next_to_use = 0;
3457         ring->next_to_clean = 0;
3458
3459         return 0;
3460 }
3461
3462 static int hns3_queue_to_ring(struct hnae3_queue *tqp,
3463                               struct hns3_nic_priv *priv)
3464 {
3465         int ret;
3466
3467         ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
3468         if (ret)
3469                 return ret;
3470
3471         ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
3472         if (ret) {
3473                 devm_kfree(priv->dev, priv->ring_data[tqp->tqp_index].ring);
3474                 return ret;
3475         }
3476
3477         return 0;
3478 }
3479
3480 static int hns3_get_ring_config(struct hns3_nic_priv *priv)
3481 {
3482         struct hnae3_handle *h = priv->ae_handle;
3483         struct pci_dev *pdev = h->pdev;
3484         int i, ret;
3485
3486         priv->ring_data =  devm_kzalloc(&pdev->dev,
3487                                         array3_size(h->kinfo.num_tqps,
3488                                                     sizeof(*priv->ring_data),
3489                                                     2),
3490                                         GFP_KERNEL);
3491         if (!priv->ring_data)
3492                 return -ENOMEM;
3493
3494         for (i = 0; i < h->kinfo.num_tqps; i++) {
3495                 ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
3496                 if (ret)
3497                         goto err;
3498         }
3499
3500         return 0;
3501 err:
3502         while (i--) {
3503                 devm_kfree(priv->dev, priv->ring_data[i].ring);
3504                 devm_kfree(priv->dev,
3505                            priv->ring_data[i + h->kinfo.num_tqps].ring);
3506         }
3507
3508         devm_kfree(&pdev->dev, priv->ring_data);
3509         priv->ring_data = NULL;
3510         return ret;
3511 }
3512
3513 static void hns3_put_ring_config(struct hns3_nic_priv *priv)
3514 {
3515         struct hnae3_handle *h = priv->ae_handle;
3516         int i;
3517
3518         if (!priv->ring_data)
3519                 return;
3520
3521         for (i = 0; i < h->kinfo.num_tqps; i++) {
3522                 devm_kfree(priv->dev, priv->ring_data[i].ring);
3523                 devm_kfree(priv->dev,
3524                            priv->ring_data[i + h->kinfo.num_tqps].ring);
3525         }
3526         devm_kfree(priv->dev, priv->ring_data);
3527         priv->ring_data = NULL;
3528 }
3529
3530 static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
3531 {
3532         int ret;
3533
3534         if (ring->desc_num <= 0 || ring->buf_size <= 0)
3535                 return -EINVAL;
3536
3537         ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num,
3538                                      sizeof(ring->desc_cb[0]), GFP_KERNEL);
3539         if (!ring->desc_cb) {
3540                 ret = -ENOMEM;
3541                 goto out;
3542         }
3543
3544         ret = hns3_alloc_desc(ring);
3545         if (ret)
3546                 goto out_with_desc_cb;
3547
3548         if (!HNAE3_IS_TX_RING(ring)) {
3549                 ret = hns3_alloc_ring_buffers(ring);
3550                 if (ret)
3551                         goto out_with_desc;
3552         }
3553
3554         return 0;
3555
3556 out_with_desc:
3557         hns3_free_desc(ring);
3558 out_with_desc_cb:
3559         devm_kfree(ring_to_dev(ring), ring->desc_cb);
3560         ring->desc_cb = NULL;
3561 out:
3562         return ret;
3563 }
3564
3565 static void hns3_fini_ring(struct hns3_enet_ring *ring)
3566 {
3567         hns3_free_desc(ring);
3568         devm_kfree(ring_to_dev(ring), ring->desc_cb);
3569         ring->desc_cb = NULL;
3570         ring->next_to_clean = 0;
3571         ring->next_to_use = 0;
3572         ring->pending_buf = 0;
3573         if (ring->skb) {
3574                 dev_kfree_skb_any(ring->skb);
3575                 ring->skb = NULL;
3576         }
3577 }
3578
3579 static int hns3_buf_size2type(u32 buf_size)
3580 {
3581         int bd_size_type;
3582
3583         switch (buf_size) {
3584         case 512:
3585                 bd_size_type = HNS3_BD_SIZE_512_TYPE;
3586                 break;
3587         case 1024:
3588                 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
3589                 break;
3590         case 2048:
3591                 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
3592                 break;
3593         case 4096:
3594                 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
3595                 break;
3596         default:
3597                 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
3598         }
3599
3600         return bd_size_type;
3601 }
3602
3603 static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
3604 {
3605         dma_addr_t dma = ring->desc_dma_addr;
3606         struct hnae3_queue *q = ring->tqp;
3607
3608         if (!HNAE3_IS_TX_RING(ring)) {
3609                 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
3610                                (u32)dma);
3611                 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
3612                                (u32)((dma >> 31) >> 1));
3613
3614                 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
3615                                hns3_buf_size2type(ring->buf_size));
3616                 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
3617                                ring->desc_num / 8 - 1);
3618
3619         } else {
3620                 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
3621                                (u32)dma);
3622                 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
3623                                (u32)((dma >> 31) >> 1));
3624
3625                 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
3626                                ring->desc_num / 8 - 1);
3627         }
3628 }
3629
3630 static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
3631 {
3632         struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
3633         int i;
3634
3635         for (i = 0; i < HNAE3_MAX_TC; i++) {
3636                 struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
3637                 int j;
3638
3639                 if (!tc_info->enable)
3640                         continue;
3641
3642                 for (j = 0; j < tc_info->tqp_count; j++) {
3643                         struct hnae3_queue *q;
3644
3645                         q = priv->ring_data[tc_info->tqp_offset + j].ring->tqp;
3646                         hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG,
3647                                        tc_info->tc);
3648                 }
3649         }
3650 }
3651
3652 int hns3_init_all_ring(struct hns3_nic_priv *priv)
3653 {
3654         struct hnae3_handle *h = priv->ae_handle;
3655         int ring_num = h->kinfo.num_tqps * 2;
3656         int i, j;
3657         int ret;
3658
3659         for (i = 0; i < ring_num; i++) {
3660                 ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
3661                 if (ret) {
3662                         dev_err(priv->dev,
3663                                 "Alloc ring memory fail! ret=%d\n", ret);
3664                         goto out_when_alloc_ring_memory;
3665                 }
3666
3667                 u64_stats_init(&priv->ring_data[i].ring->syncp);
3668         }
3669
3670         return 0;
3671
3672 out_when_alloc_ring_memory:
3673         for (j = i - 1; j >= 0; j--)
3674                 hns3_fini_ring(priv->ring_data[j].ring);
3675
3676         return -ENOMEM;
3677 }
3678
3679 int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
3680 {
3681         struct hnae3_handle *h = priv->ae_handle;
3682         int i;
3683
3684         for (i = 0; i < h->kinfo.num_tqps; i++) {
3685                 hns3_fini_ring(priv->ring_data[i].ring);
3686                 hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
3687         }
3688         return 0;
3689 }
3690
3691 /* Set mac addr if it is configured. or leave it to the AE driver */
3692 static int hns3_init_mac_addr(struct net_device *netdev, bool init)
3693 {
3694         struct hns3_nic_priv *priv = netdev_priv(netdev);
3695         struct hnae3_handle *h = priv->ae_handle;
3696         u8 mac_addr_temp[ETH_ALEN];
3697         int ret = 0;
3698
3699         if (h->ae_algo->ops->get_mac_addr && init) {
3700                 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
3701                 ether_addr_copy(netdev->dev_addr, mac_addr_temp);
3702         }
3703
3704         /* Check if the MAC address is valid, if not get a random one */
3705         if (!is_valid_ether_addr(netdev->dev_addr)) {
3706                 eth_hw_addr_random(netdev);
3707                 dev_warn(priv->dev, "using random MAC address %pM\n",
3708                          netdev->dev_addr);
3709         }
3710
3711         if (h->ae_algo->ops->set_mac_addr)
3712                 ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true);
3713
3714         return ret;
3715 }
3716
3717 static int hns3_init_phy(struct net_device *netdev)
3718 {
3719         struct hnae3_handle *h = hns3_get_handle(netdev);
3720         int ret = 0;
3721
3722         if (h->ae_algo->ops->mac_connect_phy)
3723                 ret = h->ae_algo->ops->mac_connect_phy(h);
3724
3725         return ret;
3726 }
3727
3728 static void hns3_uninit_phy(struct net_device *netdev)
3729 {
3730         struct hnae3_handle *h = hns3_get_handle(netdev);
3731
3732         if (h->ae_algo->ops->mac_disconnect_phy)
3733                 h->ae_algo->ops->mac_disconnect_phy(h);
3734 }
3735
3736 static int hns3_restore_fd_rules(struct net_device *netdev)
3737 {
3738         struct hnae3_handle *h = hns3_get_handle(netdev);
3739         int ret = 0;
3740
3741         if (h->ae_algo->ops->restore_fd_rules)
3742                 ret = h->ae_algo->ops->restore_fd_rules(h);
3743
3744         return ret;
3745 }
3746
3747 static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list)
3748 {
3749         struct hnae3_handle *h = hns3_get_handle(netdev);
3750
3751         if (h->ae_algo->ops->del_all_fd_entries)
3752                 h->ae_algo->ops->del_all_fd_entries(h, clear_list);
3753 }
3754
3755 static int hns3_client_start(struct hnae3_handle *handle)
3756 {
3757         if (!handle->ae_algo->ops->client_start)
3758                 return 0;
3759
3760         return handle->ae_algo->ops->client_start(handle);
3761 }
3762
3763 static void hns3_client_stop(struct hnae3_handle *handle)
3764 {
3765         if (!handle->ae_algo->ops->client_stop)
3766                 return;
3767
3768         handle->ae_algo->ops->client_stop(handle);
3769 }
3770
3771 static void hns3_info_show(struct hns3_nic_priv *priv)
3772 {
3773         struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
3774
3775         dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr);
3776         dev_info(priv->dev, "Task queue pairs numbers: %d\n", kinfo->num_tqps);
3777         dev_info(priv->dev, "RSS size: %d\n", kinfo->rss_size);
3778         dev_info(priv->dev, "Allocated RSS size: %d\n", kinfo->req_rss_size);
3779         dev_info(priv->dev, "RX buffer length: %d\n", kinfo->rx_buf_len);
3780         dev_info(priv->dev, "Desc num per TX queue: %d\n", kinfo->num_tx_desc);
3781         dev_info(priv->dev, "Desc num per RX queue: %d\n", kinfo->num_rx_desc);
3782         dev_info(priv->dev, "Total number of enabled TCs: %d\n", kinfo->num_tc);
3783         dev_info(priv->dev, "Max mtu size: %d\n", priv->netdev->max_mtu);
3784 }
3785
3786 static int hns3_client_init(struct hnae3_handle *handle)
3787 {
3788         struct pci_dev *pdev = handle->pdev;
3789         u16 alloc_tqps, max_rss_size;
3790         struct hns3_nic_priv *priv;
3791         struct net_device *netdev;
3792         int ret;
3793
3794         handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps,
3795                                                     &max_rss_size);
3796         netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps);
3797         if (!netdev)
3798                 return -ENOMEM;
3799
3800         priv = netdev_priv(netdev);
3801         priv->dev = &pdev->dev;
3802         priv->netdev = netdev;
3803         priv->ae_handle = handle;
3804         priv->tx_timeout_count = 0;
3805         set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
3806
3807         handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
3808
3809         handle->kinfo.netdev = netdev;
3810         handle->priv = (void *)priv;
3811
3812         hns3_init_mac_addr(netdev, true);
3813
3814         hns3_set_default_feature(netdev);
3815
3816         netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
3817         netdev->priv_flags |= IFF_UNICAST_FLT;
3818         netdev->netdev_ops = &hns3_nic_netdev_ops;
3819         SET_NETDEV_DEV(netdev, &pdev->dev);
3820         hns3_ethtool_set_ops(netdev);
3821
3822         /* Carrier off reporting is important to ethtool even BEFORE open */
3823         netif_carrier_off(netdev);
3824
3825         ret = hns3_get_ring_config(priv);
3826         if (ret) {
3827                 ret = -ENOMEM;
3828                 goto out_get_ring_cfg;
3829         }
3830
3831         ret = hns3_nic_alloc_vector_data(priv);
3832         if (ret) {
3833                 ret = -ENOMEM;
3834                 goto out_alloc_vector_data;
3835         }
3836
3837         ret = hns3_nic_init_vector_data(priv);
3838         if (ret) {
3839                 ret = -ENOMEM;
3840                 goto out_init_vector_data;
3841         }
3842
3843         ret = hns3_init_all_ring(priv);
3844         if (ret) {
3845                 ret = -ENOMEM;
3846                 goto out_init_ring_data;
3847         }
3848
3849         ret = hns3_init_phy(netdev);
3850         if (ret)
3851                 goto out_init_phy;
3852
3853         ret = register_netdev(netdev);
3854         if (ret) {
3855                 dev_err(priv->dev, "probe register netdev fail!\n");
3856                 goto out_reg_netdev_fail;
3857         }
3858
3859         ret = hns3_client_start(handle);
3860         if (ret) {
3861                 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
3862                         goto out_client_start;
3863         }
3864
3865         hns3_dcbnl_setup(handle);
3866
3867         hns3_dbg_init(handle);
3868
3869         /* MTU range: (ETH_MIN_MTU(kernel default) - 9702) */
3870         netdev->max_mtu = HNS3_MAX_MTU;
3871
3872         set_bit(HNS3_NIC_STATE_INITED, &priv->state);
3873
3874         if (netif_msg_drv(handle))
3875                 hns3_info_show(priv);
3876
3877         return ret;
3878
3879 out_client_start:
3880         unregister_netdev(netdev);
3881 out_reg_netdev_fail:
3882         hns3_uninit_phy(netdev);
3883 out_init_phy:
3884         hns3_uninit_all_ring(priv);
3885 out_init_ring_data:
3886         hns3_nic_uninit_vector_data(priv);
3887 out_init_vector_data:
3888         hns3_nic_dealloc_vector_data(priv);
3889 out_alloc_vector_data:
3890         priv->ring_data = NULL;
3891 out_get_ring_cfg:
3892         priv->ae_handle = NULL;
3893         free_netdev(netdev);
3894         return ret;
3895 }
3896
3897 static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
3898 {
3899         struct net_device *netdev = handle->kinfo.netdev;
3900         struct hns3_nic_priv *priv = netdev_priv(netdev);
3901         int ret;
3902
3903         hns3_remove_hw_addr(netdev);
3904
3905         if (netdev->reg_state != NETREG_UNINITIALIZED)
3906                 unregister_netdev(netdev);
3907
3908         hns3_client_stop(handle);
3909
3910         hns3_uninit_phy(netdev);
3911
3912         if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
3913                 netdev_warn(netdev, "already uninitialized\n");
3914                 goto out_netdev_free;
3915         }
3916
3917         hns3_del_all_fd_rules(netdev, true);
3918
3919         hns3_force_clear_all_rx_ring(handle);
3920
3921         hns3_nic_uninit_vector_data(priv);
3922
3923         ret = hns3_nic_dealloc_vector_data(priv);
3924         if (ret)
3925                 netdev_err(netdev, "dealloc vector error\n");
3926
3927         ret = hns3_uninit_all_ring(priv);
3928         if (ret)
3929                 netdev_err(netdev, "uninit ring error\n");
3930
3931         hns3_put_ring_config(priv);
3932
3933         hns3_dbg_uninit(handle);
3934
3935 out_netdev_free:
3936         free_netdev(netdev);
3937 }
3938
3939 static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
3940 {
3941         struct net_device *netdev = handle->kinfo.netdev;
3942
3943         if (!netdev)
3944                 return;
3945
3946         if (linkup) {
3947                 netif_carrier_on(netdev);
3948                 netif_tx_wake_all_queues(netdev);
3949                 if (netif_msg_link(handle))
3950                         netdev_info(netdev, "link up\n");
3951         } else {
3952                 netif_carrier_off(netdev);
3953                 netif_tx_stop_all_queues(netdev);
3954                 if (netif_msg_link(handle))
3955                         netdev_info(netdev, "link down\n");
3956         }
3957 }
3958
3959 static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
3960 {
3961         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3962         struct net_device *ndev = kinfo->netdev;
3963
3964         if (tc > HNAE3_MAX_TC)
3965                 return -EINVAL;
3966
3967         if (!ndev)
3968                 return -ENODEV;
3969
3970         return hns3_nic_set_real_num_queue(ndev);
3971 }
3972
3973 static int hns3_recover_hw_addr(struct net_device *ndev)
3974 {
3975         struct netdev_hw_addr_list *list;
3976         struct netdev_hw_addr *ha, *tmp;
3977         int ret = 0;
3978
3979         netif_addr_lock_bh(ndev);
3980         /* go through and sync uc_addr entries to the device */
3981         list = &ndev->uc;
3982         list_for_each_entry_safe(ha, tmp, &list->list, list) {
3983                 ret = hns3_nic_uc_sync(ndev, ha->addr);
3984                 if (ret)
3985                         goto out;
3986         }
3987
3988         /* go through and sync mc_addr entries to the device */
3989         list = &ndev->mc;
3990         list_for_each_entry_safe(ha, tmp, &list->list, list) {
3991                 ret = hns3_nic_mc_sync(ndev, ha->addr);
3992                 if (ret)
3993                         goto out;
3994         }
3995
3996 out:
3997         netif_addr_unlock_bh(ndev);
3998         return ret;
3999 }
4000
4001 static void hns3_remove_hw_addr(struct net_device *netdev)
4002 {
4003         struct netdev_hw_addr_list *list;
4004         struct netdev_hw_addr *ha, *tmp;
4005
4006         hns3_nic_uc_unsync(netdev, netdev->dev_addr);
4007
4008         netif_addr_lock_bh(netdev);
4009         /* go through and unsync uc_addr entries to the device */
4010         list = &netdev->uc;
4011         list_for_each_entry_safe(ha, tmp, &list->list, list)
4012                 hns3_nic_uc_unsync(netdev, ha->addr);
4013
4014         /* go through and unsync mc_addr entries to the device */
4015         list = &netdev->mc;
4016         list_for_each_entry_safe(ha, tmp, &list->list, list)
4017                 if (ha->refcount > 1)
4018                         hns3_nic_mc_unsync(netdev, ha->addr);
4019
4020         netif_addr_unlock_bh(netdev);
4021 }
4022
4023 static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
4024 {
4025         while (ring->next_to_clean != ring->next_to_use) {
4026                 ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0;
4027                 hns3_free_buffer_detach(ring, ring->next_to_clean);
4028                 ring_ptr_move_fw(ring, next_to_clean);
4029         }
4030 }
4031
4032 static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
4033 {
4034         struct hns3_desc_cb res_cbs;
4035         int ret;
4036
4037         while (ring->next_to_use != ring->next_to_clean) {
4038                 /* When a buffer is not reused, it's memory has been
4039                  * freed in hns3_handle_rx_bd or will be freed by
4040                  * stack, so we need to replace the buffer here.
4041                  */
4042                 if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
4043                         ret = hns3_reserve_buffer_map(ring, &res_cbs);
4044                         if (ret) {
4045                                 u64_stats_update_begin(&ring->syncp);
4046                                 ring->stats.sw_err_cnt++;
4047                                 u64_stats_update_end(&ring->syncp);
4048                                 /* if alloc new buffer fail, exit directly
4049                                  * and reclear in up flow.
4050                                  */
4051                                 netdev_warn(ring->tqp->handle->kinfo.netdev,
4052                                             "reserve buffer map failed, ret = %d\n",
4053                                             ret);
4054                                 return ret;
4055                         }
4056                         hns3_replace_buffer(ring, ring->next_to_use,
4057                                             &res_cbs);
4058                 }
4059                 ring_ptr_move_fw(ring, next_to_use);
4060         }
4061
4062         /* Free the pending skb in rx ring */
4063         if (ring->skb) {
4064                 dev_kfree_skb_any(ring->skb);
4065                 ring->skb = NULL;
4066                 ring->pending_buf = 0;
4067         }
4068
4069         return 0;
4070 }
4071
4072 static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
4073 {
4074         while (ring->next_to_use != ring->next_to_clean) {
4075                 /* When a buffer is not reused, it's memory has been
4076                  * freed in hns3_handle_rx_bd or will be freed by
4077                  * stack, so only need to unmap the buffer here.
4078                  */
4079                 if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
4080                         hns3_unmap_buffer(ring,
4081                                           &ring->desc_cb[ring->next_to_use]);
4082                         ring->desc_cb[ring->next_to_use].dma = 0;
4083                 }
4084
4085                 ring_ptr_move_fw(ring, next_to_use);
4086         }
4087 }
4088
4089 static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h)
4090 {
4091         struct net_device *ndev = h->kinfo.netdev;
4092         struct hns3_nic_priv *priv = netdev_priv(ndev);
4093         struct hns3_enet_ring *ring;
4094         u32 i;
4095
4096         for (i = 0; i < h->kinfo.num_tqps; i++) {
4097                 ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
4098                 hns3_force_clear_rx_ring(ring);
4099         }
4100 }
4101
4102 static void hns3_clear_all_ring(struct hnae3_handle *h)
4103 {
4104         struct net_device *ndev = h->kinfo.netdev;
4105         struct hns3_nic_priv *priv = netdev_priv(ndev);
4106         u32 i;
4107
4108         for (i = 0; i < h->kinfo.num_tqps; i++) {
4109                 struct netdev_queue *dev_queue;
4110                 struct hns3_enet_ring *ring;
4111
4112                 ring = priv->ring_data[i].ring;
4113                 hns3_clear_tx_ring(ring);
4114                 dev_queue = netdev_get_tx_queue(ndev,
4115                                                 priv->ring_data[i].queue_index);
4116                 netdev_tx_reset_queue(dev_queue);
4117
4118                 ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
4119                 /* Continue to clear other rings even if clearing some
4120                  * rings failed.
4121                  */
4122                 hns3_clear_rx_ring(ring);
4123         }
4124 }
4125
4126 int hns3_nic_reset_all_ring(struct hnae3_handle *h)
4127 {
4128         struct net_device *ndev = h->kinfo.netdev;
4129         struct hns3_nic_priv *priv = netdev_priv(ndev);
4130         struct hns3_enet_ring *rx_ring;
4131         int i, j;
4132         int ret;
4133
4134         for (i = 0; i < h->kinfo.num_tqps; i++) {
4135                 ret = h->ae_algo->ops->reset_queue(h, i);
4136                 if (ret)
4137                         return ret;
4138
4139                 hns3_init_ring_hw(priv->ring_data[i].ring);
4140
4141                 /* We need to clear tx ring here because self test will
4142                  * use the ring and will not run down before up
4143                  */
4144                 hns3_clear_tx_ring(priv->ring_data[i].ring);
4145                 priv->ring_data[i].ring->next_to_clean = 0;
4146                 priv->ring_data[i].ring->next_to_use = 0;
4147
4148                 rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
4149                 hns3_init_ring_hw(rx_ring);
4150                 ret = hns3_clear_rx_ring(rx_ring);
4151                 if (ret)
4152                         return ret;
4153
4154                 /* We can not know the hardware head and tail when this
4155                  * function is called in reset flow, so we reuse all desc.
4156                  */
4157                 for (j = 0; j < rx_ring->desc_num; j++)
4158                         hns3_reuse_buffer(rx_ring, j);
4159
4160                 rx_ring->next_to_clean = 0;
4161                 rx_ring->next_to_use = 0;
4162         }
4163
4164         hns3_init_tx_ring_tc(priv);
4165
4166         return 0;
4167 }
4168
4169 static void hns3_store_coal(struct hns3_nic_priv *priv)
4170 {
4171         /* ethtool only support setting and querying one coal
4172          * configuation for now, so save the vector 0' coal
4173          * configuation here in order to restore it.
4174          */
4175         memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal,
4176                sizeof(struct hns3_enet_coalesce));
4177         memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal,
4178                sizeof(struct hns3_enet_coalesce));
4179 }
4180
4181 static void hns3_restore_coal(struct hns3_nic_priv *priv)
4182 {
4183         u16 vector_num = priv->vector_num;
4184         int i;
4185
4186         for (i = 0; i < vector_num; i++) {
4187                 memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal,
4188                        sizeof(struct hns3_enet_coalesce));
4189                 memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal,
4190                        sizeof(struct hns3_enet_coalesce));
4191         }
4192 }
4193
4194 static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
4195 {
4196         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4197         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
4198         struct net_device *ndev = kinfo->netdev;
4199         struct hns3_nic_priv *priv = netdev_priv(ndev);
4200
4201         if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
4202                 return 0;
4203
4204         /* it is cumbersome for hardware to pick-and-choose entries for deletion
4205          * from table space. Hence, for function reset software intervention is
4206          * required to delete the entries
4207          */
4208         if (hns3_dev_ongoing_func_reset(ae_dev)) {
4209                 hns3_remove_hw_addr(ndev);
4210                 hns3_del_all_fd_rules(ndev, false);
4211         }
4212
4213         if (!netif_running(ndev))
4214                 return 0;
4215
4216         return hns3_nic_net_stop(ndev);
4217 }
4218
4219 static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
4220 {
4221         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
4222         struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev);
4223         int ret = 0;
4224
4225         clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
4226
4227         if (netif_running(kinfo->netdev)) {
4228                 ret = hns3_nic_net_open(kinfo->netdev);
4229                 if (ret) {
4230                         set_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
4231                         netdev_err(kinfo->netdev,
4232                                    "hns net up fail, ret=%d!\n", ret);
4233                         return ret;
4234                 }
4235         }
4236
4237         return ret;
4238 }
4239
4240 static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
4241 {
4242         struct net_device *netdev = handle->kinfo.netdev;
4243         struct hns3_nic_priv *priv = netdev_priv(netdev);
4244         int ret;
4245
4246         /* Carrier off reporting is important to ethtool even BEFORE open */
4247         netif_carrier_off(netdev);
4248
4249         ret = hns3_get_ring_config(priv);
4250         if (ret)
4251                 return ret;
4252
4253         ret = hns3_nic_alloc_vector_data(priv);
4254         if (ret)
4255                 goto err_put_ring;
4256
4257         hns3_restore_coal(priv);
4258
4259         ret = hns3_nic_init_vector_data(priv);
4260         if (ret)
4261                 goto err_dealloc_vector;
4262
4263         ret = hns3_init_all_ring(priv);
4264         if (ret)
4265                 goto err_uninit_vector;
4266
4267         ret = hns3_client_start(handle);
4268         if (ret) {
4269                 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
4270                 goto err_uninit_ring;
4271         }
4272
4273         set_bit(HNS3_NIC_STATE_INITED, &priv->state);
4274
4275         return ret;
4276
4277 err_uninit_ring:
4278         hns3_uninit_all_ring(priv);
4279 err_uninit_vector:
4280         hns3_nic_uninit_vector_data(priv);
4281 err_dealloc_vector:
4282         hns3_nic_dealloc_vector_data(priv);
4283 err_put_ring:
4284         hns3_put_ring_config(priv);
4285
4286         return ret;
4287 }
4288
4289 static int hns3_reset_notify_restore_enet(struct hnae3_handle *handle)
4290 {
4291         struct net_device *netdev = handle->kinfo.netdev;
4292         bool vlan_filter_enable;
4293         int ret;
4294
4295         ret = hns3_init_mac_addr(netdev, false);
4296         if (ret)
4297                 return ret;
4298
4299         ret = hns3_recover_hw_addr(netdev);
4300         if (ret)
4301                 return ret;
4302
4303         ret = hns3_update_promisc_mode(netdev, handle->netdev_flags);
4304         if (ret)
4305                 return ret;
4306
4307         vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true;
4308         hns3_enable_vlan_filter(netdev, vlan_filter_enable);
4309
4310         if (handle->ae_algo->ops->restore_vlan_table)
4311                 handle->ae_algo->ops->restore_vlan_table(handle);
4312
4313         return hns3_restore_fd_rules(netdev);
4314 }
4315
4316 static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
4317 {
4318         struct net_device *netdev = handle->kinfo.netdev;
4319         struct hns3_nic_priv *priv = netdev_priv(netdev);
4320         int ret;
4321
4322         if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
4323                 netdev_warn(netdev, "already uninitialized\n");
4324                 return 0;
4325         }
4326
4327         hns3_force_clear_all_rx_ring(handle);
4328
4329         hns3_nic_uninit_vector_data(priv);
4330
4331         hns3_store_coal(priv);
4332
4333         ret = hns3_nic_dealloc_vector_data(priv);
4334         if (ret)
4335                 netdev_err(netdev, "dealloc vector error\n");
4336
4337         ret = hns3_uninit_all_ring(priv);
4338         if (ret)
4339                 netdev_err(netdev, "uninit ring error\n");
4340
4341         hns3_put_ring_config(priv);
4342
4343         return ret;
4344 }
4345
4346 static int hns3_reset_notify(struct hnae3_handle *handle,
4347                              enum hnae3_reset_notify_type type)
4348 {
4349         int ret = 0;
4350
4351         switch (type) {
4352         case HNAE3_UP_CLIENT:
4353                 ret = hns3_reset_notify_up_enet(handle);
4354                 break;
4355         case HNAE3_DOWN_CLIENT:
4356                 ret = hns3_reset_notify_down_enet(handle);
4357                 break;
4358         case HNAE3_INIT_CLIENT:
4359                 ret = hns3_reset_notify_init_enet(handle);
4360                 break;
4361         case HNAE3_UNINIT_CLIENT:
4362                 ret = hns3_reset_notify_uninit_enet(handle);
4363                 break;
4364         case HNAE3_RESTORE_CLIENT:
4365                 ret = hns3_reset_notify_restore_enet(handle);
4366                 break;
4367         default:
4368                 break;
4369         }
4370
4371         return ret;
4372 }
4373
4374 int hns3_set_channels(struct net_device *netdev,
4375                       struct ethtool_channels *ch)
4376 {
4377         struct hnae3_handle *h = hns3_get_handle(netdev);
4378         struct hnae3_knic_private_info *kinfo = &h->kinfo;
4379         bool rxfh_configured = netif_is_rxfh_configured(netdev);
4380         u32 new_tqp_num = ch->combined_count;
4381         u16 org_tqp_num;
4382         int ret;
4383
4384         if (ch->rx_count || ch->tx_count)
4385                 return -EINVAL;
4386
4387         if (new_tqp_num > hns3_get_max_available_channels(h) ||
4388             new_tqp_num < 1) {
4389                 dev_err(&netdev->dev,
4390                         "Change tqps fail, the tqp range is from 1 to %d",
4391                         hns3_get_max_available_channels(h));
4392                 return -EINVAL;
4393         }
4394
4395         if (kinfo->rss_size == new_tqp_num)
4396                 return 0;
4397
4398         ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT);
4399         if (ret)
4400                 return ret;
4401
4402         ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT);
4403         if (ret)
4404                 return ret;
4405
4406         org_tqp_num = h->kinfo.num_tqps;
4407         ret = h->ae_algo->ops->set_channels(h, new_tqp_num, rxfh_configured);
4408         if (ret) {
4409                 ret = h->ae_algo->ops->set_channels(h, org_tqp_num,
4410                                                     rxfh_configured);
4411                 if (ret) {
4412                         /* If revert to old tqp failed, fatal error occurred */
4413                         dev_err(&netdev->dev,
4414                                 "Revert to old tqp num fail, ret=%d", ret);
4415                         return ret;
4416                 }
4417                 dev_info(&netdev->dev,
4418                          "Change tqp num fail, Revert to old tqp num");
4419         }
4420         ret = hns3_reset_notify(h, HNAE3_INIT_CLIENT);
4421         if (ret)
4422                 return ret;
4423
4424         return hns3_reset_notify(h, HNAE3_UP_CLIENT);
4425 }
4426
4427 static const struct hnae3_client_ops client_ops = {
4428         .init_instance = hns3_client_init,
4429         .uninit_instance = hns3_client_uninit,
4430         .link_status_change = hns3_link_status_change,
4431         .setup_tc = hns3_client_setup_tc,
4432         .reset_notify = hns3_reset_notify,
4433 };
4434
4435 /* hns3_init_module - Driver registration routine
4436  * hns3_init_module is the first routine called when the driver is
4437  * loaded. All it does is register with the PCI subsystem.
4438  */
4439 static int __init hns3_init_module(void)
4440 {
4441         int ret;
4442
4443         pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
4444         pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
4445
4446         client.type = HNAE3_CLIENT_KNIC;
4447         snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s",
4448                  hns3_driver_name);
4449
4450         client.ops = &client_ops;
4451
4452         INIT_LIST_HEAD(&client.node);
4453
4454         hns3_dbg_register_debugfs(hns3_driver_name);
4455
4456         ret = hnae3_register_client(&client);
4457         if (ret)
4458                 goto err_reg_client;
4459
4460         ret = pci_register_driver(&hns3_driver);
4461         if (ret)
4462                 goto err_reg_driver;
4463
4464         return ret;
4465
4466 err_reg_driver:
4467         hnae3_unregister_client(&client);
4468 err_reg_client:
4469         hns3_dbg_unregister_debugfs();
4470         return ret;
4471 }
4472 module_init(hns3_init_module);
4473
4474 /* hns3_exit_module - Driver exit cleanup routine
4475  * hns3_exit_module is called just before the driver is removed
4476  * from memory.
4477  */
4478 static void __exit hns3_exit_module(void)
4479 {
4480         pci_unregister_driver(&hns3_driver);
4481         hnae3_unregister_client(&client);
4482         hns3_dbg_unregister_debugfs();
4483 }
4484 module_exit(hns3_exit_module);
4485
4486 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
4487 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
4488 MODULE_LICENSE("GPL");
4489 MODULE_ALIAS("pci:hns-nic");
4490 MODULE_VERSION(HNS3_MOD_VERSION);