net: hns3: add interrupt affinity support for misc interrupt
[platform/kernel/linux-rpi.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME                      "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT     256U
31 #define HCLGE_BUF_MUL_BY        2
32 #define HCLGE_BUF_DIV_BY        2
33 #define NEED_RESERVE_TC_NUM     2
34 #define BUF_MAX_PERCENT         100
35 #define BUF_RESERVE_PERCENT     90
36
37 #define HCLGE_RESET_MAX_FAIL_CNT        5
38
39 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
40 static int hclge_init_vlan_config(struct hclge_dev *hdev);
41 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
42 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
43 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
44 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
45                                u16 *allocated_size, bool is_alloc);
46 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
47 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
48 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
49                                                    unsigned long *addr);
50
51 static struct hnae3_ae_algo ae_algo;
52
53 static const struct pci_device_id ae_algo_pci_tbl[] = {
54         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
55         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
56         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
57         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
58         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
59         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
60         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
61         /* required last entry */
62         {0, }
63 };
64
65 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
66
67 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
68                                          HCLGE_CMDQ_TX_ADDR_H_REG,
69                                          HCLGE_CMDQ_TX_DEPTH_REG,
70                                          HCLGE_CMDQ_TX_TAIL_REG,
71                                          HCLGE_CMDQ_TX_HEAD_REG,
72                                          HCLGE_CMDQ_RX_ADDR_L_REG,
73                                          HCLGE_CMDQ_RX_ADDR_H_REG,
74                                          HCLGE_CMDQ_RX_DEPTH_REG,
75                                          HCLGE_CMDQ_RX_TAIL_REG,
76                                          HCLGE_CMDQ_RX_HEAD_REG,
77                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
78                                          HCLGE_CMDQ_INTR_STS_REG,
79                                          HCLGE_CMDQ_INTR_EN_REG,
80                                          HCLGE_CMDQ_INTR_GEN_REG};
81
82 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
83                                            HCLGE_VECTOR0_OTER_EN_REG,
84                                            HCLGE_MISC_RESET_STS_REG,
85                                            HCLGE_MISC_VECTOR_INT_STS,
86                                            HCLGE_GLOBAL_RESET_REG,
87                                            HCLGE_FUN_RST_ING,
88                                            HCLGE_GRO_EN_REG};
89
90 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
91                                          HCLGE_RING_RX_ADDR_H_REG,
92                                          HCLGE_RING_RX_BD_NUM_REG,
93                                          HCLGE_RING_RX_BD_LENGTH_REG,
94                                          HCLGE_RING_RX_MERGE_EN_REG,
95                                          HCLGE_RING_RX_TAIL_REG,
96                                          HCLGE_RING_RX_HEAD_REG,
97                                          HCLGE_RING_RX_FBD_NUM_REG,
98                                          HCLGE_RING_RX_OFFSET_REG,
99                                          HCLGE_RING_RX_FBD_OFFSET_REG,
100                                          HCLGE_RING_RX_STASH_REG,
101                                          HCLGE_RING_RX_BD_ERR_REG,
102                                          HCLGE_RING_TX_ADDR_L_REG,
103                                          HCLGE_RING_TX_ADDR_H_REG,
104                                          HCLGE_RING_TX_BD_NUM_REG,
105                                          HCLGE_RING_TX_PRIORITY_REG,
106                                          HCLGE_RING_TX_TC_REG,
107                                          HCLGE_RING_TX_MERGE_EN_REG,
108                                          HCLGE_RING_TX_TAIL_REG,
109                                          HCLGE_RING_TX_HEAD_REG,
110                                          HCLGE_RING_TX_FBD_NUM_REG,
111                                          HCLGE_RING_TX_OFFSET_REG,
112                                          HCLGE_RING_TX_EBD_NUM_REG,
113                                          HCLGE_RING_TX_EBD_OFFSET_REG,
114                                          HCLGE_RING_TX_BD_ERR_REG,
115                                          HCLGE_RING_EN_REG};
116
117 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
118                                              HCLGE_TQP_INTR_GL0_REG,
119                                              HCLGE_TQP_INTR_GL1_REG,
120                                              HCLGE_TQP_INTR_GL2_REG,
121                                              HCLGE_TQP_INTR_RL_REG};
122
123 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
124         "App    Loopback test",
125         "Serdes serial Loopback test",
126         "Serdes parallel Loopback test",
127         "Phy    Loopback test"
128 };
129
130 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
131         {"mac_tx_mac_pause_num",
132                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
133         {"mac_rx_mac_pause_num",
134                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
135         {"mac_tx_control_pkt_num",
136                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
137         {"mac_rx_control_pkt_num",
138                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
139         {"mac_tx_pfc_pkt_num",
140                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
141         {"mac_tx_pfc_pri0_pkt_num",
142                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
143         {"mac_tx_pfc_pri1_pkt_num",
144                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
145         {"mac_tx_pfc_pri2_pkt_num",
146                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
147         {"mac_tx_pfc_pri3_pkt_num",
148                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
149         {"mac_tx_pfc_pri4_pkt_num",
150                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
151         {"mac_tx_pfc_pri5_pkt_num",
152                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
153         {"mac_tx_pfc_pri6_pkt_num",
154                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
155         {"mac_tx_pfc_pri7_pkt_num",
156                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
157         {"mac_rx_pfc_pkt_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
159         {"mac_rx_pfc_pri0_pkt_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
161         {"mac_rx_pfc_pri1_pkt_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
163         {"mac_rx_pfc_pri2_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
165         {"mac_rx_pfc_pri3_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
167         {"mac_rx_pfc_pri4_pkt_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
169         {"mac_rx_pfc_pri5_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
171         {"mac_rx_pfc_pri6_pkt_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
173         {"mac_rx_pfc_pri7_pkt_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
175         {"mac_tx_total_pkt_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
177         {"mac_tx_total_oct_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
179         {"mac_tx_good_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
181         {"mac_tx_bad_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
183         {"mac_tx_good_oct_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
185         {"mac_tx_bad_oct_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
187         {"mac_tx_uni_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
189         {"mac_tx_multi_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
191         {"mac_tx_broad_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
193         {"mac_tx_undersize_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
195         {"mac_tx_oversize_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
197         {"mac_tx_64_oct_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
199         {"mac_tx_65_127_oct_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
201         {"mac_tx_128_255_oct_pkt_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
203         {"mac_tx_256_511_oct_pkt_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
205         {"mac_tx_512_1023_oct_pkt_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
207         {"mac_tx_1024_1518_oct_pkt_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
209         {"mac_tx_1519_2047_oct_pkt_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
211         {"mac_tx_2048_4095_oct_pkt_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
213         {"mac_tx_4096_8191_oct_pkt_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
215         {"mac_tx_8192_9216_oct_pkt_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
217         {"mac_tx_9217_12287_oct_pkt_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
219         {"mac_tx_12288_16383_oct_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
221         {"mac_tx_1519_max_good_pkt_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
223         {"mac_tx_1519_max_bad_pkt_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
225         {"mac_rx_total_pkt_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
227         {"mac_rx_total_oct_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
229         {"mac_rx_good_pkt_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
231         {"mac_rx_bad_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
233         {"mac_rx_good_oct_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
235         {"mac_rx_bad_oct_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
237         {"mac_rx_uni_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
239         {"mac_rx_multi_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
241         {"mac_rx_broad_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
243         {"mac_rx_undersize_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
245         {"mac_rx_oversize_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
247         {"mac_rx_64_oct_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
249         {"mac_rx_65_127_oct_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
251         {"mac_rx_128_255_oct_pkt_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
253         {"mac_rx_256_511_oct_pkt_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
255         {"mac_rx_512_1023_oct_pkt_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
257         {"mac_rx_1024_1518_oct_pkt_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
259         {"mac_rx_1519_2047_oct_pkt_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
261         {"mac_rx_2048_4095_oct_pkt_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
263         {"mac_rx_4096_8191_oct_pkt_num",
264                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
265         {"mac_rx_8192_9216_oct_pkt_num",
266                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
267         {"mac_rx_9217_12287_oct_pkt_num",
268                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
269         {"mac_rx_12288_16383_oct_pkt_num",
270                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
271         {"mac_rx_1519_max_good_pkt_num",
272                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
273         {"mac_rx_1519_max_bad_pkt_num",
274                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
275
276         {"mac_tx_fragment_pkt_num",
277                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
278         {"mac_tx_undermin_pkt_num",
279                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
280         {"mac_tx_jabber_pkt_num",
281                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
282         {"mac_tx_err_all_pkt_num",
283                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
284         {"mac_tx_from_app_good_pkt_num",
285                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
286         {"mac_tx_from_app_bad_pkt_num",
287                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
288         {"mac_rx_fragment_pkt_num",
289                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
290         {"mac_rx_undermin_pkt_num",
291                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
292         {"mac_rx_jabber_pkt_num",
293                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
294         {"mac_rx_fcs_err_pkt_num",
295                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
296         {"mac_rx_send_app_good_pkt_num",
297                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
298         {"mac_rx_send_app_bad_pkt_num",
299                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
300 };
301
302 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
303         {
304                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
305                 .ethter_type = cpu_to_le16(ETH_P_LLDP),
306                 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
307                 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
308                 .i_port_bitmap = 0x1,
309         },
310 };
311
312 static const u8 hclge_hash_key[] = {
313         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
314         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
315         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
316         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
317         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
318 };
319
320 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
321 {
322 #define HCLGE_MAC_CMD_NUM 21
323
324         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
325         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
326         __le64 *desc_data;
327         int i, k, n;
328         int ret;
329
330         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
331         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
332         if (ret) {
333                 dev_err(&hdev->pdev->dev,
334                         "Get MAC pkt stats fail, status = %d.\n", ret);
335
336                 return ret;
337         }
338
339         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
340                 /* for special opcode 0032, only the first desc has the head */
341                 if (unlikely(i == 0)) {
342                         desc_data = (__le64 *)(&desc[i].data[0]);
343                         n = HCLGE_RD_FIRST_STATS_NUM;
344                 } else {
345                         desc_data = (__le64 *)(&desc[i]);
346                         n = HCLGE_RD_OTHER_STATS_NUM;
347                 }
348
349                 for (k = 0; k < n; k++) {
350                         *data += le64_to_cpu(*desc_data);
351                         data++;
352                         desc_data++;
353                 }
354         }
355
356         return 0;
357 }
358
359 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
360 {
361         u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
362         struct hclge_desc *desc;
363         __le64 *desc_data;
364         u16 i, k, n;
365         int ret;
366
367         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
368         if (!desc)
369                 return -ENOMEM;
370         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
371         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
372         if (ret) {
373                 kfree(desc);
374                 return ret;
375         }
376
377         for (i = 0; i < desc_num; i++) {
378                 /* for special opcode 0034, only the first desc has the head */
379                 if (i == 0) {
380                         desc_data = (__le64 *)(&desc[i].data[0]);
381                         n = HCLGE_RD_FIRST_STATS_NUM;
382                 } else {
383                         desc_data = (__le64 *)(&desc[i]);
384                         n = HCLGE_RD_OTHER_STATS_NUM;
385                 }
386
387                 for (k = 0; k < n; k++) {
388                         *data += le64_to_cpu(*desc_data);
389                         data++;
390                         desc_data++;
391                 }
392         }
393
394         kfree(desc);
395
396         return 0;
397 }
398
399 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
400 {
401         struct hclge_desc desc;
402         __le32 *desc_data;
403         u32 reg_num;
404         int ret;
405
406         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
407         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
408         if (ret)
409                 return ret;
410
411         desc_data = (__le32 *)(&desc.data[0]);
412         reg_num = le32_to_cpu(*desc_data);
413
414         *desc_num = 1 + ((reg_num - 3) >> 2) +
415                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
416
417         return 0;
418 }
419
420 static int hclge_mac_update_stats(struct hclge_dev *hdev)
421 {
422         u32 desc_num;
423         int ret;
424
425         ret = hclge_mac_query_reg_num(hdev, &desc_num);
426
427         /* The firmware supports the new statistics acquisition method */
428         if (!ret)
429                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
430         else if (ret == -EOPNOTSUPP)
431                 ret = hclge_mac_update_stats_defective(hdev);
432         else
433                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
434
435         return ret;
436 }
437
438 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
439 {
440         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
441         struct hclge_vport *vport = hclge_get_vport(handle);
442         struct hclge_dev *hdev = vport->back;
443         struct hnae3_queue *queue;
444         struct hclge_desc desc[1];
445         struct hclge_tqp *tqp;
446         int ret, i;
447
448         for (i = 0; i < kinfo->num_tqps; i++) {
449                 queue = handle->kinfo.tqp[i];
450                 tqp = container_of(queue, struct hclge_tqp, q);
451                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
452                 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
453                                            true);
454
455                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
456                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
457                 if (ret) {
458                         dev_err(&hdev->pdev->dev,
459                                 "Query tqp stat fail, status = %d,queue = %d\n",
460                                 ret, i);
461                         return ret;
462                 }
463                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
464                         le32_to_cpu(desc[0].data[1]);
465         }
466
467         for (i = 0; i < kinfo->num_tqps; i++) {
468                 queue = handle->kinfo.tqp[i];
469                 tqp = container_of(queue, struct hclge_tqp, q);
470                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
471                 hclge_cmd_setup_basic_desc(&desc[0],
472                                            HCLGE_OPC_QUERY_TX_STATUS,
473                                            true);
474
475                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
476                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
477                 if (ret) {
478                         dev_err(&hdev->pdev->dev,
479                                 "Query tqp stat fail, status = %d,queue = %d\n",
480                                 ret, i);
481                         return ret;
482                 }
483                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
484                         le32_to_cpu(desc[0].data[1]);
485         }
486
487         return 0;
488 }
489
490 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
491 {
492         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
493         struct hclge_tqp *tqp;
494         u64 *buff = data;
495         int i;
496
497         for (i = 0; i < kinfo->num_tqps; i++) {
498                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
499                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
500         }
501
502         for (i = 0; i < kinfo->num_tqps; i++) {
503                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
504                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
505         }
506
507         return buff;
508 }
509
510 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
511 {
512         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
513
514         /* each tqp has TX & RX two queues */
515         return kinfo->num_tqps * (2);
516 }
517
518 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
519 {
520         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
521         u8 *buff = data;
522         int i = 0;
523
524         for (i = 0; i < kinfo->num_tqps; i++) {
525                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
526                         struct hclge_tqp, q);
527                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
528                          tqp->index);
529                 buff = buff + ETH_GSTRING_LEN;
530         }
531
532         for (i = 0; i < kinfo->num_tqps; i++) {
533                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
534                         struct hclge_tqp, q);
535                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
536                          tqp->index);
537                 buff = buff + ETH_GSTRING_LEN;
538         }
539
540         return buff;
541 }
542
543 static u64 *hclge_comm_get_stats(const void *comm_stats,
544                                  const struct hclge_comm_stats_str strs[],
545                                  int size, u64 *data)
546 {
547         u64 *buf = data;
548         u32 i;
549
550         for (i = 0; i < size; i++)
551                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
552
553         return buf + size;
554 }
555
556 static u8 *hclge_comm_get_strings(u32 stringset,
557                                   const struct hclge_comm_stats_str strs[],
558                                   int size, u8 *data)
559 {
560         char *buff = (char *)data;
561         u32 i;
562
563         if (stringset != ETH_SS_STATS)
564                 return buff;
565
566         for (i = 0; i < size; i++) {
567                 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
568                 buff = buff + ETH_GSTRING_LEN;
569         }
570
571         return (u8 *)buff;
572 }
573
574 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
575 {
576         struct hnae3_handle *handle;
577         int status;
578
579         handle = &hdev->vport[0].nic;
580         if (handle->client) {
581                 status = hclge_tqps_update_stats(handle);
582                 if (status) {
583                         dev_err(&hdev->pdev->dev,
584                                 "Update TQPS stats fail, status = %d.\n",
585                                 status);
586                 }
587         }
588
589         status = hclge_mac_update_stats(hdev);
590         if (status)
591                 dev_err(&hdev->pdev->dev,
592                         "Update MAC stats fail, status = %d.\n", status);
593 }
594
595 static void hclge_update_stats(struct hnae3_handle *handle,
596                                struct net_device_stats *net_stats)
597 {
598         struct hclge_vport *vport = hclge_get_vport(handle);
599         struct hclge_dev *hdev = vport->back;
600         int status;
601
602         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
603                 return;
604
605         status = hclge_mac_update_stats(hdev);
606         if (status)
607                 dev_err(&hdev->pdev->dev,
608                         "Update MAC stats fail, status = %d.\n",
609                         status);
610
611         status = hclge_tqps_update_stats(handle);
612         if (status)
613                 dev_err(&hdev->pdev->dev,
614                         "Update TQPS stats fail, status = %d.\n",
615                         status);
616
617         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
618 }
619
620 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
621 {
622 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
623                 HNAE3_SUPPORT_PHY_LOOPBACK |\
624                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
625                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
626
627         struct hclge_vport *vport = hclge_get_vport(handle);
628         struct hclge_dev *hdev = vport->back;
629         int count = 0;
630
631         /* Loopback test support rules:
632          * mac: only GE mode support
633          * serdes: all mac mode will support include GE/XGE/LGE/CGE
634          * phy: only support when phy device exist on board
635          */
636         if (stringset == ETH_SS_TEST) {
637                 /* clear loopback bit flags at first */
638                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
639                 if (hdev->pdev->revision >= 0x21 ||
640                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
641                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
642                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
643                         count += 1;
644                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
645                 }
646
647                 count += 2;
648                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
649                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
650         } else if (stringset == ETH_SS_STATS) {
651                 count = ARRAY_SIZE(g_mac_stats_string) +
652                         hclge_tqps_get_sset_count(handle, stringset);
653         }
654
655         return count;
656 }
657
658 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
659                               u8 *data)
660 {
661         u8 *p = (char *)data;
662         int size;
663
664         if (stringset == ETH_SS_STATS) {
665                 size = ARRAY_SIZE(g_mac_stats_string);
666                 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
667                                            size, p);
668                 p = hclge_tqps_get_strings(handle, p);
669         } else if (stringset == ETH_SS_TEST) {
670                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
671                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
672                                ETH_GSTRING_LEN);
673                         p += ETH_GSTRING_LEN;
674                 }
675                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
676                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
677                                ETH_GSTRING_LEN);
678                         p += ETH_GSTRING_LEN;
679                 }
680                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
681                         memcpy(p,
682                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
683                                ETH_GSTRING_LEN);
684                         p += ETH_GSTRING_LEN;
685                 }
686                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
687                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
688                                ETH_GSTRING_LEN);
689                         p += ETH_GSTRING_LEN;
690                 }
691         }
692 }
693
694 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
695 {
696         struct hclge_vport *vport = hclge_get_vport(handle);
697         struct hclge_dev *hdev = vport->back;
698         u64 *p;
699
700         p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, g_mac_stats_string,
701                                  ARRAY_SIZE(g_mac_stats_string), data);
702         p = hclge_tqps_get_stats(handle, p);
703 }
704
705 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
706                                      u64 *rx_cnt)
707 {
708         struct hclge_vport *vport = hclge_get_vport(handle);
709         struct hclge_dev *hdev = vport->back;
710
711         *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
712         *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
713 }
714
715 static int hclge_parse_func_status(struct hclge_dev *hdev,
716                                    struct hclge_func_status_cmd *status)
717 {
718         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
719                 return -EINVAL;
720
721         /* Set the pf to main pf */
722         if (status->pf_state & HCLGE_PF_STATE_MAIN)
723                 hdev->flag |= HCLGE_FLAG_MAIN;
724         else
725                 hdev->flag &= ~HCLGE_FLAG_MAIN;
726
727         return 0;
728 }
729
730 static int hclge_query_function_status(struct hclge_dev *hdev)
731 {
732 #define HCLGE_QUERY_MAX_CNT     5
733
734         struct hclge_func_status_cmd *req;
735         struct hclge_desc desc;
736         int timeout = 0;
737         int ret;
738
739         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
740         req = (struct hclge_func_status_cmd *)desc.data;
741
742         do {
743                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
744                 if (ret) {
745                         dev_err(&hdev->pdev->dev,
746                                 "query function status failed %d.\n", ret);
747                         return ret;
748                 }
749
750                 /* Check pf reset is done */
751                 if (req->pf_state)
752                         break;
753                 usleep_range(1000, 2000);
754         } while (timeout++ < HCLGE_QUERY_MAX_CNT);
755
756         ret = hclge_parse_func_status(hdev, req);
757
758         return ret;
759 }
760
761 static int hclge_query_pf_resource(struct hclge_dev *hdev)
762 {
763         struct hclge_pf_res_cmd *req;
764         struct hclge_desc desc;
765         int ret;
766
767         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
768         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
769         if (ret) {
770                 dev_err(&hdev->pdev->dev,
771                         "query pf resource failed %d.\n", ret);
772                 return ret;
773         }
774
775         req = (struct hclge_pf_res_cmd *)desc.data;
776         hdev->num_tqps = __le16_to_cpu(req->tqp_num);
777         hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
778
779         if (req->tx_buf_size)
780                 hdev->tx_buf_size =
781                         __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
782         else
783                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
784
785         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
786
787         if (req->dv_buf_size)
788                 hdev->dv_buf_size =
789                         __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
790         else
791                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
792
793         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
794
795         if (hnae3_dev_roce_supported(hdev)) {
796                 hdev->roce_base_msix_offset =
797                 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
798                                 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
799                 hdev->num_roce_msi =
800                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
801                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
802
803                 /* PF should have NIC vectors and Roce vectors,
804                  * NIC vectors are queued before Roce vectors.
805                  */
806                 hdev->num_msi = hdev->num_roce_msi +
807                                 hdev->roce_base_msix_offset;
808         } else {
809                 hdev->num_msi =
810                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
811                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
812         }
813
814         return 0;
815 }
816
817 static int hclge_parse_speed(int speed_cmd, int *speed)
818 {
819         switch (speed_cmd) {
820         case 6:
821                 *speed = HCLGE_MAC_SPEED_10M;
822                 break;
823         case 7:
824                 *speed = HCLGE_MAC_SPEED_100M;
825                 break;
826         case 0:
827                 *speed = HCLGE_MAC_SPEED_1G;
828                 break;
829         case 1:
830                 *speed = HCLGE_MAC_SPEED_10G;
831                 break;
832         case 2:
833                 *speed = HCLGE_MAC_SPEED_25G;
834                 break;
835         case 3:
836                 *speed = HCLGE_MAC_SPEED_40G;
837                 break;
838         case 4:
839                 *speed = HCLGE_MAC_SPEED_50G;
840                 break;
841         case 5:
842                 *speed = HCLGE_MAC_SPEED_100G;
843                 break;
844         default:
845                 return -EINVAL;
846         }
847
848         return 0;
849 }
850
851 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
852 {
853         struct hclge_vport *vport = hclge_get_vport(handle);
854         struct hclge_dev *hdev = vport->back;
855         u32 speed_ability = hdev->hw.mac.speed_ability;
856         u32 speed_bit = 0;
857
858         switch (speed) {
859         case HCLGE_MAC_SPEED_10M:
860                 speed_bit = HCLGE_SUPPORT_10M_BIT;
861                 break;
862         case HCLGE_MAC_SPEED_100M:
863                 speed_bit = HCLGE_SUPPORT_100M_BIT;
864                 break;
865         case HCLGE_MAC_SPEED_1G:
866                 speed_bit = HCLGE_SUPPORT_1G_BIT;
867                 break;
868         case HCLGE_MAC_SPEED_10G:
869                 speed_bit = HCLGE_SUPPORT_10G_BIT;
870                 break;
871         case HCLGE_MAC_SPEED_25G:
872                 speed_bit = HCLGE_SUPPORT_25G_BIT;
873                 break;
874         case HCLGE_MAC_SPEED_40G:
875                 speed_bit = HCLGE_SUPPORT_40G_BIT;
876                 break;
877         case HCLGE_MAC_SPEED_50G:
878                 speed_bit = HCLGE_SUPPORT_50G_BIT;
879                 break;
880         case HCLGE_MAC_SPEED_100G:
881                 speed_bit = HCLGE_SUPPORT_100G_BIT;
882                 break;
883         default:
884                 return -EINVAL;
885         }
886
887         if (speed_bit & speed_ability)
888                 return 0;
889
890         return -EINVAL;
891 }
892
893 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
894 {
895         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
896                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
897                                  mac->supported);
898         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
899                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
900                                  mac->supported);
901         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
902                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
903                                  mac->supported);
904         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
905                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
906                                  mac->supported);
907         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
908                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
909                                  mac->supported);
910 }
911
912 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
913 {
914         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
915                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
916                                  mac->supported);
917         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
918                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
919                                  mac->supported);
920         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
921                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
922                                  mac->supported);
923         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
924                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
925                                  mac->supported);
926         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
927                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
928                                  mac->supported);
929 }
930
931 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
932 {
933         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
934                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
935                                  mac->supported);
936         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
937                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
938                                  mac->supported);
939         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
940                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
941                                  mac->supported);
942         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
943                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
944                                  mac->supported);
945         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
946                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
947                                  mac->supported);
948 }
949
950 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
951 {
952         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
953                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
954                                  mac->supported);
955         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
956                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
957                                  mac->supported);
958         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
959                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
960                                  mac->supported);
961         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
962                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
963                                  mac->supported);
964         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
965                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
966                                  mac->supported);
967         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
968                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
969                                  mac->supported);
970 }
971
972 static void hclge_convert_setting_fec(struct hclge_mac *mac)
973 {
974         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
975         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
976
977         switch (mac->speed) {
978         case HCLGE_MAC_SPEED_10G:
979         case HCLGE_MAC_SPEED_40G:
980                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
981                                  mac->supported);
982                 mac->fec_ability =
983                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
984                 break;
985         case HCLGE_MAC_SPEED_25G:
986         case HCLGE_MAC_SPEED_50G:
987                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
988                                  mac->supported);
989                 mac->fec_ability =
990                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
991                         BIT(HNAE3_FEC_AUTO);
992                 break;
993         case HCLGE_MAC_SPEED_100G:
994                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
995                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
996                 break;
997         default:
998                 mac->fec_ability = 0;
999                 break;
1000         }
1001 }
1002
1003 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1004                                         u8 speed_ability)
1005 {
1006         struct hclge_mac *mac = &hdev->hw.mac;
1007
1008         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1009                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1010                                  mac->supported);
1011
1012         hclge_convert_setting_sr(mac, speed_ability);
1013         hclge_convert_setting_lr(mac, speed_ability);
1014         hclge_convert_setting_cr(mac, speed_ability);
1015         if (hdev->pdev->revision >= 0x21)
1016                 hclge_convert_setting_fec(mac);
1017
1018         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1019         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1020         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1021 }
1022
1023 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1024                                             u8 speed_ability)
1025 {
1026         struct hclge_mac *mac = &hdev->hw.mac;
1027
1028         hclge_convert_setting_kr(mac, speed_ability);
1029         if (hdev->pdev->revision >= 0x21)
1030                 hclge_convert_setting_fec(mac);
1031         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1032         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1033         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1034 }
1035
1036 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1037                                          u8 speed_ability)
1038 {
1039         unsigned long *supported = hdev->hw.mac.supported;
1040
1041         /* default to support all speed for GE port */
1042         if (!speed_ability)
1043                 speed_ability = HCLGE_SUPPORT_GE;
1044
1045         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1046                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1047                                  supported);
1048
1049         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1050                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1051                                  supported);
1052                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1053                                  supported);
1054         }
1055
1056         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1057                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1058                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1059         }
1060
1061         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1062         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1063         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1064         linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1065 }
1066
1067 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1068 {
1069         u8 media_type = hdev->hw.mac.media_type;
1070
1071         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1072                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1073         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1074                 hclge_parse_copper_link_mode(hdev, speed_ability);
1075         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1076                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1077 }
1078 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1079 {
1080         struct hclge_cfg_param_cmd *req;
1081         u64 mac_addr_tmp_high;
1082         u64 mac_addr_tmp;
1083         unsigned int i;
1084
1085         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1086
1087         /* get the configuration */
1088         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1089                                               HCLGE_CFG_VMDQ_M,
1090                                               HCLGE_CFG_VMDQ_S);
1091         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1092                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1093         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1094                                             HCLGE_CFG_TQP_DESC_N_M,
1095                                             HCLGE_CFG_TQP_DESC_N_S);
1096
1097         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1098                                         HCLGE_CFG_PHY_ADDR_M,
1099                                         HCLGE_CFG_PHY_ADDR_S);
1100         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1101                                           HCLGE_CFG_MEDIA_TP_M,
1102                                           HCLGE_CFG_MEDIA_TP_S);
1103         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1104                                           HCLGE_CFG_RX_BUF_LEN_M,
1105                                           HCLGE_CFG_RX_BUF_LEN_S);
1106         /* get mac_address */
1107         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1108         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1109                                             HCLGE_CFG_MAC_ADDR_H_M,
1110                                             HCLGE_CFG_MAC_ADDR_H_S);
1111
1112         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1113
1114         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1115                                              HCLGE_CFG_DEFAULT_SPEED_M,
1116                                              HCLGE_CFG_DEFAULT_SPEED_S);
1117         cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1118                                             HCLGE_CFG_RSS_SIZE_M,
1119                                             HCLGE_CFG_RSS_SIZE_S);
1120
1121         for (i = 0; i < ETH_ALEN; i++)
1122                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1123
1124         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1125         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1126
1127         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1128                                              HCLGE_CFG_SPEED_ABILITY_M,
1129                                              HCLGE_CFG_SPEED_ABILITY_S);
1130         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1131                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1132                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1133         if (!cfg->umv_space)
1134                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1135 }
1136
1137 /* hclge_get_cfg: query the static parameter from flash
1138  * @hdev: pointer to struct hclge_dev
1139  * @hcfg: the config structure to be getted
1140  */
1141 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1142 {
1143         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1144         struct hclge_cfg_param_cmd *req;
1145         unsigned int i;
1146         int ret;
1147
1148         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1149                 u32 offset = 0;
1150
1151                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1152                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1153                                            true);
1154                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1155                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1156                 /* Len should be united by 4 bytes when send to hardware */
1157                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1158                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1159                 req->offset = cpu_to_le32(offset);
1160         }
1161
1162         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1163         if (ret) {
1164                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1165                 return ret;
1166         }
1167
1168         hclge_parse_cfg(hcfg, desc);
1169
1170         return 0;
1171 }
1172
1173 static int hclge_get_cap(struct hclge_dev *hdev)
1174 {
1175         int ret;
1176
1177         ret = hclge_query_function_status(hdev);
1178         if (ret) {
1179                 dev_err(&hdev->pdev->dev,
1180                         "query function status error %d.\n", ret);
1181                 return ret;
1182         }
1183
1184         /* get pf resource */
1185         ret = hclge_query_pf_resource(hdev);
1186         if (ret)
1187                 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1188
1189         return ret;
1190 }
1191
1192 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1193 {
1194 #define HCLGE_MIN_TX_DESC       64
1195 #define HCLGE_MIN_RX_DESC       64
1196
1197         if (!is_kdump_kernel())
1198                 return;
1199
1200         dev_info(&hdev->pdev->dev,
1201                  "Running kdump kernel. Using minimal resources\n");
1202
1203         /* minimal queue pairs equals to the number of vports */
1204         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1205         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1206         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1207 }
1208
1209 static int hclge_configure(struct hclge_dev *hdev)
1210 {
1211         struct hclge_cfg cfg;
1212         unsigned int i;
1213         int ret;
1214
1215         ret = hclge_get_cfg(hdev, &cfg);
1216         if (ret) {
1217                 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1218                 return ret;
1219         }
1220
1221         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1222         hdev->base_tqp_pid = 0;
1223         hdev->rss_size_max = cfg.rss_size_max;
1224         hdev->rx_buf_len = cfg.rx_buf_len;
1225         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1226         hdev->hw.mac.media_type = cfg.media_type;
1227         hdev->hw.mac.phy_addr = cfg.phy_addr;
1228         hdev->num_tx_desc = cfg.tqp_desc_num;
1229         hdev->num_rx_desc = cfg.tqp_desc_num;
1230         hdev->tm_info.num_pg = 1;
1231         hdev->tc_max = cfg.tc_num;
1232         hdev->tm_info.hw_pfc_map = 0;
1233         hdev->wanted_umv_size = cfg.umv_space;
1234
1235         if (hnae3_dev_fd_supported(hdev)) {
1236                 hdev->fd_en = true;
1237                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1238         }
1239
1240         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1241         if (ret) {
1242                 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1243                 return ret;
1244         }
1245
1246         hclge_parse_link_mode(hdev, cfg.speed_ability);
1247
1248         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1249             (hdev->tc_max < 1)) {
1250                 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1251                          hdev->tc_max);
1252                 hdev->tc_max = 1;
1253         }
1254
1255         /* Dev does not support DCB */
1256         if (!hnae3_dev_dcb_supported(hdev)) {
1257                 hdev->tc_max = 1;
1258                 hdev->pfc_max = 0;
1259         } else {
1260                 hdev->pfc_max = hdev->tc_max;
1261         }
1262
1263         hdev->tm_info.num_tc = 1;
1264
1265         /* Currently not support uncontiuous tc */
1266         for (i = 0; i < hdev->tm_info.num_tc; i++)
1267                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1268
1269         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1270
1271         hclge_init_kdump_kernel_config(hdev);
1272
1273         /* Set the init affinity based on pci func number */
1274         i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1275         i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1276         cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1277                         &hdev->affinity_mask);
1278
1279         return ret;
1280 }
1281
1282 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1283                             unsigned int tso_mss_max)
1284 {
1285         struct hclge_cfg_tso_status_cmd *req;
1286         struct hclge_desc desc;
1287         u16 tso_mss;
1288
1289         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1290
1291         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1292
1293         tso_mss = 0;
1294         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1295                         HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1296         req->tso_mss_min = cpu_to_le16(tso_mss);
1297
1298         tso_mss = 0;
1299         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1300                         HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1301         req->tso_mss_max = cpu_to_le16(tso_mss);
1302
1303         return hclge_cmd_send(&hdev->hw, &desc, 1);
1304 }
1305
1306 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1307 {
1308         struct hclge_cfg_gro_status_cmd *req;
1309         struct hclge_desc desc;
1310         int ret;
1311
1312         if (!hnae3_dev_gro_supported(hdev))
1313                 return 0;
1314
1315         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1316         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1317
1318         req->gro_en = cpu_to_le16(en ? 1 : 0);
1319
1320         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1321         if (ret)
1322                 dev_err(&hdev->pdev->dev,
1323                         "GRO hardware config cmd failed, ret = %d\n", ret);
1324
1325         return ret;
1326 }
1327
1328 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1329 {
1330         struct hclge_tqp *tqp;
1331         int i;
1332
1333         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1334                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1335         if (!hdev->htqp)
1336                 return -ENOMEM;
1337
1338         tqp = hdev->htqp;
1339
1340         for (i = 0; i < hdev->num_tqps; i++) {
1341                 tqp->dev = &hdev->pdev->dev;
1342                 tqp->index = i;
1343
1344                 tqp->q.ae_algo = &ae_algo;
1345                 tqp->q.buf_size = hdev->rx_buf_len;
1346                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1347                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1348                 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1349                         i * HCLGE_TQP_REG_SIZE;
1350
1351                 tqp++;
1352         }
1353
1354         return 0;
1355 }
1356
1357 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1358                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1359 {
1360         struct hclge_tqp_map_cmd *req;
1361         struct hclge_desc desc;
1362         int ret;
1363
1364         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1365
1366         req = (struct hclge_tqp_map_cmd *)desc.data;
1367         req->tqp_id = cpu_to_le16(tqp_pid);
1368         req->tqp_vf = func_id;
1369         req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1370         if (!is_pf)
1371                 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1372         req->tqp_vid = cpu_to_le16(tqp_vid);
1373
1374         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1375         if (ret)
1376                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1377
1378         return ret;
1379 }
1380
1381 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1382 {
1383         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1384         struct hclge_dev *hdev = vport->back;
1385         int i, alloced;
1386
1387         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1388              alloced < num_tqps; i++) {
1389                 if (!hdev->htqp[i].alloced) {
1390                         hdev->htqp[i].q.handle = &vport->nic;
1391                         hdev->htqp[i].q.tqp_index = alloced;
1392                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1393                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1394                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1395                         hdev->htqp[i].alloced = true;
1396                         alloced++;
1397                 }
1398         }
1399         vport->alloc_tqps = alloced;
1400         kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1401                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1402
1403         return 0;
1404 }
1405
1406 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1407                             u16 num_tx_desc, u16 num_rx_desc)
1408
1409 {
1410         struct hnae3_handle *nic = &vport->nic;
1411         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1412         struct hclge_dev *hdev = vport->back;
1413         int ret;
1414
1415         kinfo->num_tx_desc = num_tx_desc;
1416         kinfo->num_rx_desc = num_rx_desc;
1417
1418         kinfo->rx_buf_len = hdev->rx_buf_len;
1419
1420         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1421                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1422         if (!kinfo->tqp)
1423                 return -ENOMEM;
1424
1425         ret = hclge_assign_tqp(vport, num_tqps);
1426         if (ret)
1427                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1428
1429         return ret;
1430 }
1431
1432 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1433                                   struct hclge_vport *vport)
1434 {
1435         struct hnae3_handle *nic = &vport->nic;
1436         struct hnae3_knic_private_info *kinfo;
1437         u16 i;
1438
1439         kinfo = &nic->kinfo;
1440         for (i = 0; i < vport->alloc_tqps; i++) {
1441                 struct hclge_tqp *q =
1442                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1443                 bool is_pf;
1444                 int ret;
1445
1446                 is_pf = !(vport->vport_id);
1447                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1448                                              i, is_pf);
1449                 if (ret)
1450                         return ret;
1451         }
1452
1453         return 0;
1454 }
1455
1456 static int hclge_map_tqp(struct hclge_dev *hdev)
1457 {
1458         struct hclge_vport *vport = hdev->vport;
1459         u16 i, num_vport;
1460
1461         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1462         for (i = 0; i < num_vport; i++) {
1463                 int ret;
1464
1465                 ret = hclge_map_tqp_to_vport(hdev, vport);
1466                 if (ret)
1467                         return ret;
1468
1469                 vport++;
1470         }
1471
1472         return 0;
1473 }
1474
1475 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1476 {
1477         struct hnae3_handle *nic = &vport->nic;
1478         struct hclge_dev *hdev = vport->back;
1479         int ret;
1480
1481         nic->pdev = hdev->pdev;
1482         nic->ae_algo = &ae_algo;
1483         nic->numa_node_mask = hdev->numa_node_mask;
1484
1485         ret = hclge_knic_setup(vport, num_tqps,
1486                                hdev->num_tx_desc, hdev->num_rx_desc);
1487         if (ret)
1488                 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1489
1490         return ret;
1491 }
1492
1493 static int hclge_alloc_vport(struct hclge_dev *hdev)
1494 {
1495         struct pci_dev *pdev = hdev->pdev;
1496         struct hclge_vport *vport;
1497         u32 tqp_main_vport;
1498         u32 tqp_per_vport;
1499         int num_vport, i;
1500         int ret;
1501
1502         /* We need to alloc a vport for main NIC of PF */
1503         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1504
1505         if (hdev->num_tqps < num_vport) {
1506                 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1507                         hdev->num_tqps, num_vport);
1508                 return -EINVAL;
1509         }
1510
1511         /* Alloc the same number of TQPs for every vport */
1512         tqp_per_vport = hdev->num_tqps / num_vport;
1513         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1514
1515         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1516                              GFP_KERNEL);
1517         if (!vport)
1518                 return -ENOMEM;
1519
1520         hdev->vport = vport;
1521         hdev->num_alloc_vport = num_vport;
1522
1523         if (IS_ENABLED(CONFIG_PCI_IOV))
1524                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1525
1526         for (i = 0; i < num_vport; i++) {
1527                 vport->back = hdev;
1528                 vport->vport_id = i;
1529                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1530                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1531                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1532                 INIT_LIST_HEAD(&vport->vlan_list);
1533                 INIT_LIST_HEAD(&vport->uc_mac_list);
1534                 INIT_LIST_HEAD(&vport->mc_mac_list);
1535
1536                 if (i == 0)
1537                         ret = hclge_vport_setup(vport, tqp_main_vport);
1538                 else
1539                         ret = hclge_vport_setup(vport, tqp_per_vport);
1540                 if (ret) {
1541                         dev_err(&pdev->dev,
1542                                 "vport setup failed for vport %d, %d\n",
1543                                 i, ret);
1544                         return ret;
1545                 }
1546
1547                 vport++;
1548         }
1549
1550         return 0;
1551 }
1552
1553 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1554                                     struct hclge_pkt_buf_alloc *buf_alloc)
1555 {
1556 /* TX buffer size is unit by 128 byte */
1557 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1558 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1559         struct hclge_tx_buff_alloc_cmd *req;
1560         struct hclge_desc desc;
1561         int ret;
1562         u8 i;
1563
1564         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1565
1566         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1567         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1568                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1569
1570                 req->tx_pkt_buff[i] =
1571                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1572                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1573         }
1574
1575         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1576         if (ret)
1577                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1578                         ret);
1579
1580         return ret;
1581 }
1582
1583 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1584                                  struct hclge_pkt_buf_alloc *buf_alloc)
1585 {
1586         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1587
1588         if (ret)
1589                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1590
1591         return ret;
1592 }
1593
1594 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1595 {
1596         unsigned int i;
1597         u32 cnt = 0;
1598
1599         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1600                 if (hdev->hw_tc_map & BIT(i))
1601                         cnt++;
1602         return cnt;
1603 }
1604
1605 /* Get the number of pfc enabled TCs, which have private buffer */
1606 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1607                                   struct hclge_pkt_buf_alloc *buf_alloc)
1608 {
1609         struct hclge_priv_buf *priv;
1610         unsigned int i;
1611         int cnt = 0;
1612
1613         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1614                 priv = &buf_alloc->priv_buf[i];
1615                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1616                     priv->enable)
1617                         cnt++;
1618         }
1619
1620         return cnt;
1621 }
1622
1623 /* Get the number of pfc disabled TCs, which have private buffer */
1624 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1625                                      struct hclge_pkt_buf_alloc *buf_alloc)
1626 {
1627         struct hclge_priv_buf *priv;
1628         unsigned int i;
1629         int cnt = 0;
1630
1631         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1632                 priv = &buf_alloc->priv_buf[i];
1633                 if (hdev->hw_tc_map & BIT(i) &&
1634                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1635                     priv->enable)
1636                         cnt++;
1637         }
1638
1639         return cnt;
1640 }
1641
1642 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1643 {
1644         struct hclge_priv_buf *priv;
1645         u32 rx_priv = 0;
1646         int i;
1647
1648         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1649                 priv = &buf_alloc->priv_buf[i];
1650                 if (priv->enable)
1651                         rx_priv += priv->buf_size;
1652         }
1653         return rx_priv;
1654 }
1655
1656 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1657 {
1658         u32 i, total_tx_size = 0;
1659
1660         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1661                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1662
1663         return total_tx_size;
1664 }
1665
1666 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1667                                 struct hclge_pkt_buf_alloc *buf_alloc,
1668                                 u32 rx_all)
1669 {
1670         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1671         u32 tc_num = hclge_get_tc_num(hdev);
1672         u32 shared_buf, aligned_mps;
1673         u32 rx_priv;
1674         int i;
1675
1676         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1677
1678         if (hnae3_dev_dcb_supported(hdev))
1679                 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1680                                         hdev->dv_buf_size;
1681         else
1682                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1683                                         + hdev->dv_buf_size;
1684
1685         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1686         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1687                              HCLGE_BUF_SIZE_UNIT);
1688
1689         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1690         if (rx_all < rx_priv + shared_std)
1691                 return false;
1692
1693         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1694         buf_alloc->s_buf.buf_size = shared_buf;
1695         if (hnae3_dev_dcb_supported(hdev)) {
1696                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1697                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1698                         - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1699                                   HCLGE_BUF_SIZE_UNIT);
1700         } else {
1701                 buf_alloc->s_buf.self.high = aligned_mps +
1702                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1703                 buf_alloc->s_buf.self.low = aligned_mps;
1704         }
1705
1706         if (hnae3_dev_dcb_supported(hdev)) {
1707                 hi_thrd = shared_buf - hdev->dv_buf_size;
1708
1709                 if (tc_num <= NEED_RESERVE_TC_NUM)
1710                         hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1711                                         / BUF_MAX_PERCENT;
1712
1713                 if (tc_num)
1714                         hi_thrd = hi_thrd / tc_num;
1715
1716                 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1717                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1718                 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1719         } else {
1720                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1721                 lo_thrd = aligned_mps;
1722         }
1723
1724         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1725                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1726                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1727         }
1728
1729         return true;
1730 }
1731
1732 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1733                                 struct hclge_pkt_buf_alloc *buf_alloc)
1734 {
1735         u32 i, total_size;
1736
1737         total_size = hdev->pkt_buf_size;
1738
1739         /* alloc tx buffer for all enabled tc */
1740         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1741                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1742
1743                 if (hdev->hw_tc_map & BIT(i)) {
1744                         if (total_size < hdev->tx_buf_size)
1745                                 return -ENOMEM;
1746
1747                         priv->tx_buf_size = hdev->tx_buf_size;
1748                 } else {
1749                         priv->tx_buf_size = 0;
1750                 }
1751
1752                 total_size -= priv->tx_buf_size;
1753         }
1754
1755         return 0;
1756 }
1757
1758 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1759                                   struct hclge_pkt_buf_alloc *buf_alloc)
1760 {
1761         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1762         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1763         unsigned int i;
1764
1765         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1766                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1767
1768                 priv->enable = 0;
1769                 priv->wl.low = 0;
1770                 priv->wl.high = 0;
1771                 priv->buf_size = 0;
1772
1773                 if (!(hdev->hw_tc_map & BIT(i)))
1774                         continue;
1775
1776                 priv->enable = 1;
1777
1778                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1779                         priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1780                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
1781                                                 HCLGE_BUF_SIZE_UNIT);
1782                 } else {
1783                         priv->wl.low = 0;
1784                         priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1785                                         aligned_mps;
1786                 }
1787
1788                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1789         }
1790
1791         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1792 }
1793
1794 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1795                                           struct hclge_pkt_buf_alloc *buf_alloc)
1796 {
1797         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1798         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1799         int i;
1800
1801         /* let the last to be cleared first */
1802         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1803                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1804                 unsigned int mask = BIT((unsigned int)i);
1805
1806                 if (hdev->hw_tc_map & mask &&
1807                     !(hdev->tm_info.hw_pfc_map & mask)) {
1808                         /* Clear the no pfc TC private buffer */
1809                         priv->wl.low = 0;
1810                         priv->wl.high = 0;
1811                         priv->buf_size = 0;
1812                         priv->enable = 0;
1813                         no_pfc_priv_num--;
1814                 }
1815
1816                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1817                     no_pfc_priv_num == 0)
1818                         break;
1819         }
1820
1821         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1822 }
1823
1824 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1825                                         struct hclge_pkt_buf_alloc *buf_alloc)
1826 {
1827         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1828         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1829         int i;
1830
1831         /* let the last to be cleared first */
1832         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1833                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1834                 unsigned int mask = BIT((unsigned int)i);
1835
1836                 if (hdev->hw_tc_map & mask &&
1837                     hdev->tm_info.hw_pfc_map & mask) {
1838                         /* Reduce the number of pfc TC with private buffer */
1839                         priv->wl.low = 0;
1840                         priv->enable = 0;
1841                         priv->wl.high = 0;
1842                         priv->buf_size = 0;
1843                         pfc_priv_num--;
1844                 }
1845
1846                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1847                     pfc_priv_num == 0)
1848                         break;
1849         }
1850
1851         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1852 }
1853
1854 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
1855                                       struct hclge_pkt_buf_alloc *buf_alloc)
1856 {
1857 #define COMPENSATE_BUFFER       0x3C00
1858 #define COMPENSATE_HALF_MPS_NUM 5
1859 #define PRIV_WL_GAP             0x1800
1860
1861         u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1862         u32 tc_num = hclge_get_tc_num(hdev);
1863         u32 half_mps = hdev->mps >> 1;
1864         u32 min_rx_priv;
1865         unsigned int i;
1866
1867         if (tc_num)
1868                 rx_priv = rx_priv / tc_num;
1869
1870         if (tc_num <= NEED_RESERVE_TC_NUM)
1871                 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
1872
1873         min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
1874                         COMPENSATE_HALF_MPS_NUM * half_mps;
1875         min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
1876         rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
1877
1878         if (rx_priv < min_rx_priv)
1879                 return false;
1880
1881         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1882                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1883
1884                 priv->enable = 0;
1885                 priv->wl.low = 0;
1886                 priv->wl.high = 0;
1887                 priv->buf_size = 0;
1888
1889                 if (!(hdev->hw_tc_map & BIT(i)))
1890                         continue;
1891
1892                 priv->enable = 1;
1893                 priv->buf_size = rx_priv;
1894                 priv->wl.high = rx_priv - hdev->dv_buf_size;
1895                 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
1896         }
1897
1898         buf_alloc->s_buf.buf_size = 0;
1899
1900         return true;
1901 }
1902
1903 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1904  * @hdev: pointer to struct hclge_dev
1905  * @buf_alloc: pointer to buffer calculation data
1906  * @return: 0: calculate sucessful, negative: fail
1907  */
1908 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1909                                 struct hclge_pkt_buf_alloc *buf_alloc)
1910 {
1911         /* When DCB is not supported, rx private buffer is not allocated. */
1912         if (!hnae3_dev_dcb_supported(hdev)) {
1913                 u32 rx_all = hdev->pkt_buf_size;
1914
1915                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1916                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1917                         return -ENOMEM;
1918
1919                 return 0;
1920         }
1921
1922         if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
1923                 return 0;
1924
1925         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1926                 return 0;
1927
1928         /* try to decrease the buffer size */
1929         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1930                 return 0;
1931
1932         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1933                 return 0;
1934
1935         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1936                 return 0;
1937
1938         return -ENOMEM;
1939 }
1940
1941 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1942                                    struct hclge_pkt_buf_alloc *buf_alloc)
1943 {
1944         struct hclge_rx_priv_buff_cmd *req;
1945         struct hclge_desc desc;
1946         int ret;
1947         int i;
1948
1949         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1950         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1951
1952         /* Alloc private buffer TCs */
1953         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1954                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1955
1956                 req->buf_num[i] =
1957                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1958                 req->buf_num[i] |=
1959                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1960         }
1961
1962         req->shared_buf =
1963                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1964                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
1965
1966         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1967         if (ret)
1968                 dev_err(&hdev->pdev->dev,
1969                         "rx private buffer alloc cmd failed %d\n", ret);
1970
1971         return ret;
1972 }
1973
1974 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1975                                    struct hclge_pkt_buf_alloc *buf_alloc)
1976 {
1977         struct hclge_rx_priv_wl_buf *req;
1978         struct hclge_priv_buf *priv;
1979         struct hclge_desc desc[2];
1980         int i, j;
1981         int ret;
1982
1983         for (i = 0; i < 2; i++) {
1984                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1985                                            false);
1986                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1987
1988                 /* The first descriptor set the NEXT bit to 1 */
1989                 if (i == 0)
1990                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1991                 else
1992                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1993
1994                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1995                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1996
1997                         priv = &buf_alloc->priv_buf[idx];
1998                         req->tc_wl[j].high =
1999                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2000                         req->tc_wl[j].high |=
2001                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2002                         req->tc_wl[j].low =
2003                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2004                         req->tc_wl[j].low |=
2005                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2006                 }
2007         }
2008
2009         /* Send 2 descriptor at one time */
2010         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2011         if (ret)
2012                 dev_err(&hdev->pdev->dev,
2013                         "rx private waterline config cmd failed %d\n",
2014                         ret);
2015         return ret;
2016 }
2017
2018 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2019                                     struct hclge_pkt_buf_alloc *buf_alloc)
2020 {
2021         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2022         struct hclge_rx_com_thrd *req;
2023         struct hclge_desc desc[2];
2024         struct hclge_tc_thrd *tc;
2025         int i, j;
2026         int ret;
2027
2028         for (i = 0; i < 2; i++) {
2029                 hclge_cmd_setup_basic_desc(&desc[i],
2030                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2031                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2032
2033                 /* The first descriptor set the NEXT bit to 1 */
2034                 if (i == 0)
2035                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2036                 else
2037                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2038
2039                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2040                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2041
2042                         req->com_thrd[j].high =
2043                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2044                         req->com_thrd[j].high |=
2045                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2046                         req->com_thrd[j].low =
2047                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2048                         req->com_thrd[j].low |=
2049                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2050                 }
2051         }
2052
2053         /* Send 2 descriptors at one time */
2054         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2055         if (ret)
2056                 dev_err(&hdev->pdev->dev,
2057                         "common threshold config cmd failed %d\n", ret);
2058         return ret;
2059 }
2060
2061 static int hclge_common_wl_config(struct hclge_dev *hdev,
2062                                   struct hclge_pkt_buf_alloc *buf_alloc)
2063 {
2064         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2065         struct hclge_rx_com_wl *req;
2066         struct hclge_desc desc;
2067         int ret;
2068
2069         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2070
2071         req = (struct hclge_rx_com_wl *)desc.data;
2072         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2073         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2074
2075         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2076         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2077
2078         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2079         if (ret)
2080                 dev_err(&hdev->pdev->dev,
2081                         "common waterline config cmd failed %d\n", ret);
2082
2083         return ret;
2084 }
2085
2086 int hclge_buffer_alloc(struct hclge_dev *hdev)
2087 {
2088         struct hclge_pkt_buf_alloc *pkt_buf;
2089         int ret;
2090
2091         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2092         if (!pkt_buf)
2093                 return -ENOMEM;
2094
2095         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2096         if (ret) {
2097                 dev_err(&hdev->pdev->dev,
2098                         "could not calc tx buffer size for all TCs %d\n", ret);
2099                 goto out;
2100         }
2101
2102         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2103         if (ret) {
2104                 dev_err(&hdev->pdev->dev,
2105                         "could not alloc tx buffers %d\n", ret);
2106                 goto out;
2107         }
2108
2109         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2110         if (ret) {
2111                 dev_err(&hdev->pdev->dev,
2112                         "could not calc rx priv buffer size for all TCs %d\n",
2113                         ret);
2114                 goto out;
2115         }
2116
2117         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2118         if (ret) {
2119                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2120                         ret);
2121                 goto out;
2122         }
2123
2124         if (hnae3_dev_dcb_supported(hdev)) {
2125                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2126                 if (ret) {
2127                         dev_err(&hdev->pdev->dev,
2128                                 "could not configure rx private waterline %d\n",
2129                                 ret);
2130                         goto out;
2131                 }
2132
2133                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2134                 if (ret) {
2135                         dev_err(&hdev->pdev->dev,
2136                                 "could not configure common threshold %d\n",
2137                                 ret);
2138                         goto out;
2139                 }
2140         }
2141
2142         ret = hclge_common_wl_config(hdev, pkt_buf);
2143         if (ret)
2144                 dev_err(&hdev->pdev->dev,
2145                         "could not configure common waterline %d\n", ret);
2146
2147 out:
2148         kfree(pkt_buf);
2149         return ret;
2150 }
2151
2152 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2153 {
2154         struct hnae3_handle *roce = &vport->roce;
2155         struct hnae3_handle *nic = &vport->nic;
2156
2157         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2158
2159         if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2160             vport->back->num_msi_left == 0)
2161                 return -EINVAL;
2162
2163         roce->rinfo.base_vector = vport->back->roce_base_vector;
2164
2165         roce->rinfo.netdev = nic->kinfo.netdev;
2166         roce->rinfo.roce_io_base = vport->back->hw.io_base;
2167
2168         roce->pdev = nic->pdev;
2169         roce->ae_algo = nic->ae_algo;
2170         roce->numa_node_mask = nic->numa_node_mask;
2171
2172         return 0;
2173 }
2174
2175 static int hclge_init_msi(struct hclge_dev *hdev)
2176 {
2177         struct pci_dev *pdev = hdev->pdev;
2178         int vectors;
2179         int i;
2180
2181         vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2182                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2183         if (vectors < 0) {
2184                 dev_err(&pdev->dev,
2185                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2186                         vectors);
2187                 return vectors;
2188         }
2189         if (vectors < hdev->num_msi)
2190                 dev_warn(&hdev->pdev->dev,
2191                          "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2192                          hdev->num_msi, vectors);
2193
2194         hdev->num_msi = vectors;
2195         hdev->num_msi_left = vectors;
2196         hdev->base_msi_vector = pdev->irq;
2197         hdev->roce_base_vector = hdev->base_msi_vector +
2198                                 hdev->roce_base_msix_offset;
2199
2200         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2201                                            sizeof(u16), GFP_KERNEL);
2202         if (!hdev->vector_status) {
2203                 pci_free_irq_vectors(pdev);
2204                 return -ENOMEM;
2205         }
2206
2207         for (i = 0; i < hdev->num_msi; i++)
2208                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2209
2210         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2211                                         sizeof(int), GFP_KERNEL);
2212         if (!hdev->vector_irq) {
2213                 pci_free_irq_vectors(pdev);
2214                 return -ENOMEM;
2215         }
2216
2217         return 0;
2218 }
2219
2220 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2221 {
2222         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2223                 duplex = HCLGE_MAC_FULL;
2224
2225         return duplex;
2226 }
2227
2228 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2229                                       u8 duplex)
2230 {
2231         struct hclge_config_mac_speed_dup_cmd *req;
2232         struct hclge_desc desc;
2233         int ret;
2234
2235         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2236
2237         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2238
2239         if (duplex)
2240                 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2241
2242         switch (speed) {
2243         case HCLGE_MAC_SPEED_10M:
2244                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2245                                 HCLGE_CFG_SPEED_S, 6);
2246                 break;
2247         case HCLGE_MAC_SPEED_100M:
2248                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2249                                 HCLGE_CFG_SPEED_S, 7);
2250                 break;
2251         case HCLGE_MAC_SPEED_1G:
2252                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2253                                 HCLGE_CFG_SPEED_S, 0);
2254                 break;
2255         case HCLGE_MAC_SPEED_10G:
2256                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2257                                 HCLGE_CFG_SPEED_S, 1);
2258                 break;
2259         case HCLGE_MAC_SPEED_25G:
2260                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2261                                 HCLGE_CFG_SPEED_S, 2);
2262                 break;
2263         case HCLGE_MAC_SPEED_40G:
2264                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2265                                 HCLGE_CFG_SPEED_S, 3);
2266                 break;
2267         case HCLGE_MAC_SPEED_50G:
2268                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2269                                 HCLGE_CFG_SPEED_S, 4);
2270                 break;
2271         case HCLGE_MAC_SPEED_100G:
2272                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2273                                 HCLGE_CFG_SPEED_S, 5);
2274                 break;
2275         default:
2276                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2277                 return -EINVAL;
2278         }
2279
2280         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2281                       1);
2282
2283         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2284         if (ret) {
2285                 dev_err(&hdev->pdev->dev,
2286                         "mac speed/duplex config cmd failed %d.\n", ret);
2287                 return ret;
2288         }
2289
2290         return 0;
2291 }
2292
2293 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2294 {
2295         int ret;
2296
2297         duplex = hclge_check_speed_dup(duplex, speed);
2298         if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2299                 return 0;
2300
2301         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2302         if (ret)
2303                 return ret;
2304
2305         hdev->hw.mac.speed = speed;
2306         hdev->hw.mac.duplex = duplex;
2307
2308         return 0;
2309 }
2310
2311 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2312                                      u8 duplex)
2313 {
2314         struct hclge_vport *vport = hclge_get_vport(handle);
2315         struct hclge_dev *hdev = vport->back;
2316
2317         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2318 }
2319
2320 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2321 {
2322         struct hclge_config_auto_neg_cmd *req;
2323         struct hclge_desc desc;
2324         u32 flag = 0;
2325         int ret;
2326
2327         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2328
2329         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2330         if (enable)
2331                 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2332         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2333
2334         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2335         if (ret)
2336                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2337                         ret);
2338
2339         return ret;
2340 }
2341
2342 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2343 {
2344         struct hclge_vport *vport = hclge_get_vport(handle);
2345         struct hclge_dev *hdev = vport->back;
2346
2347         if (!hdev->hw.mac.support_autoneg) {
2348                 if (enable) {
2349                         dev_err(&hdev->pdev->dev,
2350                                 "autoneg is not supported by current port\n");
2351                         return -EOPNOTSUPP;
2352                 } else {
2353                         return 0;
2354                 }
2355         }
2356
2357         return hclge_set_autoneg_en(hdev, enable);
2358 }
2359
2360 static int hclge_get_autoneg(struct hnae3_handle *handle)
2361 {
2362         struct hclge_vport *vport = hclge_get_vport(handle);
2363         struct hclge_dev *hdev = vport->back;
2364         struct phy_device *phydev = hdev->hw.mac.phydev;
2365
2366         if (phydev)
2367                 return phydev->autoneg;
2368
2369         return hdev->hw.mac.autoneg;
2370 }
2371
2372 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2373 {
2374         struct hclge_vport *vport = hclge_get_vport(handle);
2375         struct hclge_dev *hdev = vport->back;
2376         int ret;
2377
2378         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2379
2380         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2381         if (ret)
2382                 return ret;
2383         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2384 }
2385
2386 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2387 {
2388         struct hclge_vport *vport = hclge_get_vport(handle);
2389         struct hclge_dev *hdev = vport->back;
2390
2391         if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2392                 return hclge_set_autoneg_en(hdev, !halt);
2393
2394         return 0;
2395 }
2396
2397 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2398 {
2399         struct hclge_config_fec_cmd *req;
2400         struct hclge_desc desc;
2401         int ret;
2402
2403         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2404
2405         req = (struct hclge_config_fec_cmd *)desc.data;
2406         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2407                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2408         if (fec_mode & BIT(HNAE3_FEC_RS))
2409                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2410                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2411         if (fec_mode & BIT(HNAE3_FEC_BASER))
2412                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2413                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2414
2415         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2416         if (ret)
2417                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2418
2419         return ret;
2420 }
2421
2422 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2423 {
2424         struct hclge_vport *vport = hclge_get_vport(handle);
2425         struct hclge_dev *hdev = vport->back;
2426         struct hclge_mac *mac = &hdev->hw.mac;
2427         int ret;
2428
2429         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2430                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2431                 return -EINVAL;
2432         }
2433
2434         ret = hclge_set_fec_hw(hdev, fec_mode);
2435         if (ret)
2436                 return ret;
2437
2438         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2439         return 0;
2440 }
2441
2442 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2443                           u8 *fec_mode)
2444 {
2445         struct hclge_vport *vport = hclge_get_vport(handle);
2446         struct hclge_dev *hdev = vport->back;
2447         struct hclge_mac *mac = &hdev->hw.mac;
2448
2449         if (fec_ability)
2450                 *fec_ability = mac->fec_ability;
2451         if (fec_mode)
2452                 *fec_mode = mac->fec_mode;
2453 }
2454
2455 static int hclge_mac_init(struct hclge_dev *hdev)
2456 {
2457         struct hclge_mac *mac = &hdev->hw.mac;
2458         int ret;
2459
2460         hdev->support_sfp_query = true;
2461         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2462         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2463                                          hdev->hw.mac.duplex);
2464         if (ret) {
2465                 dev_err(&hdev->pdev->dev,
2466                         "Config mac speed dup fail ret=%d\n", ret);
2467                 return ret;
2468         }
2469
2470         if (hdev->hw.mac.support_autoneg) {
2471                 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2472                 if (ret) {
2473                         dev_err(&hdev->pdev->dev,
2474                                 "Config mac autoneg fail ret=%d\n", ret);
2475                         return ret;
2476                 }
2477         }
2478
2479         mac->link = 0;
2480
2481         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2482                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2483                 if (ret) {
2484                         dev_err(&hdev->pdev->dev,
2485                                 "Fec mode init fail, ret = %d\n", ret);
2486                         return ret;
2487                 }
2488         }
2489
2490         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2491         if (ret) {
2492                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2493                 return ret;
2494         }
2495
2496         ret = hclge_buffer_alloc(hdev);
2497         if (ret)
2498                 dev_err(&hdev->pdev->dev,
2499                         "allocate buffer fail, ret=%d\n", ret);
2500
2501         return ret;
2502 }
2503
2504 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2505 {
2506         if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2507             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2508                 queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
2509                               &hdev->mbx_service_task);
2510 }
2511
2512 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2513 {
2514         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2515             !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2516                 queue_work_on(cpumask_first(&hdev->affinity_mask), system_wq,
2517                               &hdev->rst_service_task);
2518 }
2519
2520 static void hclge_task_schedule(struct hclge_dev *hdev)
2521 {
2522         if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2523             !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2524             !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) {
2525                 hdev->hw_stats.stats_timer++;
2526                 hdev->fd_arfs_expire_timer++;
2527                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2528                                     system_wq, &hdev->service_task,
2529                                     round_jiffies_relative(HZ));
2530         }
2531 }
2532
2533 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2534 {
2535         struct hclge_link_status_cmd *req;
2536         struct hclge_desc desc;
2537         int link_status;
2538         int ret;
2539
2540         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2541         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2542         if (ret) {
2543                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2544                         ret);
2545                 return ret;
2546         }
2547
2548         req = (struct hclge_link_status_cmd *)desc.data;
2549         link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2550
2551         return !!link_status;
2552 }
2553
2554 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2555 {
2556         unsigned int mac_state;
2557         int link_stat;
2558
2559         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2560                 return 0;
2561
2562         mac_state = hclge_get_mac_link_status(hdev);
2563
2564         if (hdev->hw.mac.phydev) {
2565                 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2566                         link_stat = mac_state &
2567                                 hdev->hw.mac.phydev->link;
2568                 else
2569                         link_stat = 0;
2570
2571         } else {
2572                 link_stat = mac_state;
2573         }
2574
2575         return !!link_stat;
2576 }
2577
2578 static void hclge_update_link_status(struct hclge_dev *hdev)
2579 {
2580         struct hnae3_client *rclient = hdev->roce_client;
2581         struct hnae3_client *client = hdev->nic_client;
2582         struct hnae3_handle *rhandle;
2583         struct hnae3_handle *handle;
2584         int state;
2585         int i;
2586
2587         if (!client)
2588                 return;
2589         state = hclge_get_mac_phy_link(hdev);
2590         if (state != hdev->hw.mac.link) {
2591                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2592                         handle = &hdev->vport[i].nic;
2593                         client->ops->link_status_change(handle, state);
2594                         hclge_config_mac_tnl_int(hdev, state);
2595                         rhandle = &hdev->vport[i].roce;
2596                         if (rclient && rclient->ops->link_status_change)
2597                                 rclient->ops->link_status_change(rhandle,
2598                                                                  state);
2599                 }
2600                 hdev->hw.mac.link = state;
2601         }
2602 }
2603
2604 static void hclge_update_port_capability(struct hclge_mac *mac)
2605 {
2606         /* update fec ability by speed */
2607         hclge_convert_setting_fec(mac);
2608
2609         /* firmware can not identify back plane type, the media type
2610          * read from configuration can help deal it
2611          */
2612         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2613             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2614                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2615         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2616                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2617
2618         if (mac->support_autoneg == true) {
2619                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2620                 linkmode_copy(mac->advertising, mac->supported);
2621         } else {
2622                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2623                                    mac->supported);
2624                 linkmode_zero(mac->advertising);
2625         }
2626 }
2627
2628 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2629 {
2630         struct hclge_sfp_info_cmd *resp;
2631         struct hclge_desc desc;
2632         int ret;
2633
2634         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2635         resp = (struct hclge_sfp_info_cmd *)desc.data;
2636         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2637         if (ret == -EOPNOTSUPP) {
2638                 dev_warn(&hdev->pdev->dev,
2639                          "IMP do not support get SFP speed %d\n", ret);
2640                 return ret;
2641         } else if (ret) {
2642                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2643                 return ret;
2644         }
2645
2646         *speed = le32_to_cpu(resp->speed);
2647
2648         return 0;
2649 }
2650
2651 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2652 {
2653         struct hclge_sfp_info_cmd *resp;
2654         struct hclge_desc desc;
2655         int ret;
2656
2657         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2658         resp = (struct hclge_sfp_info_cmd *)desc.data;
2659
2660         resp->query_type = QUERY_ACTIVE_SPEED;
2661
2662         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2663         if (ret == -EOPNOTSUPP) {
2664                 dev_warn(&hdev->pdev->dev,
2665                          "IMP does not support get SFP info %d\n", ret);
2666                 return ret;
2667         } else if (ret) {
2668                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2669                 return ret;
2670         }
2671
2672         mac->speed = le32_to_cpu(resp->speed);
2673         /* if resp->speed_ability is 0, it means it's an old version
2674          * firmware, do not update these params
2675          */
2676         if (resp->speed_ability) {
2677                 mac->module_type = le32_to_cpu(resp->module_type);
2678                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2679                 mac->autoneg = resp->autoneg;
2680                 mac->support_autoneg = resp->autoneg_ability;
2681                 mac->speed_type = QUERY_ACTIVE_SPEED;
2682                 if (!resp->active_fec)
2683                         mac->fec_mode = 0;
2684                 else
2685                         mac->fec_mode = BIT(resp->active_fec);
2686         } else {
2687                 mac->speed_type = QUERY_SFP_SPEED;
2688         }
2689
2690         return 0;
2691 }
2692
2693 static int hclge_update_port_info(struct hclge_dev *hdev)
2694 {
2695         struct hclge_mac *mac = &hdev->hw.mac;
2696         int speed = HCLGE_MAC_SPEED_UNKNOWN;
2697         int ret;
2698
2699         /* get the port info from SFP cmd if not copper port */
2700         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2701                 return 0;
2702
2703         /* if IMP does not support get SFP/qSFP info, return directly */
2704         if (!hdev->support_sfp_query)
2705                 return 0;
2706
2707         if (hdev->pdev->revision >= 0x21)
2708                 ret = hclge_get_sfp_info(hdev, mac);
2709         else
2710                 ret = hclge_get_sfp_speed(hdev, &speed);
2711
2712         if (ret == -EOPNOTSUPP) {
2713                 hdev->support_sfp_query = false;
2714                 return ret;
2715         } else if (ret) {
2716                 return ret;
2717         }
2718
2719         if (hdev->pdev->revision >= 0x21) {
2720                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2721                         hclge_update_port_capability(mac);
2722                         return 0;
2723                 }
2724                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2725                                                HCLGE_MAC_FULL);
2726         } else {
2727                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2728                         return 0; /* do nothing if no SFP */
2729
2730                 /* must config full duplex for SFP */
2731                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2732         }
2733 }
2734
2735 static int hclge_get_status(struct hnae3_handle *handle)
2736 {
2737         struct hclge_vport *vport = hclge_get_vport(handle);
2738         struct hclge_dev *hdev = vport->back;
2739
2740         hclge_update_link_status(hdev);
2741
2742         return hdev->hw.mac.link;
2743 }
2744
2745 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2746 {
2747         u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2748
2749         /* fetch the events from their corresponding regs */
2750         rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2751         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2752         msix_src_reg = hclge_read_dev(&hdev->hw,
2753                                       HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2754
2755         /* Assumption: If by any chance reset and mailbox events are reported
2756          * together then we will only process reset event in this go and will
2757          * defer the processing of the mailbox events. Since, we would have not
2758          * cleared RX CMDQ event this time we would receive again another
2759          * interrupt from H/W just for the mailbox.
2760          */
2761
2762         /* check for vector0 reset event sources */
2763         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2764                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2765                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2766                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2767                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2768                 hdev->rst_stats.imp_rst_cnt++;
2769                 return HCLGE_VECTOR0_EVENT_RST;
2770         }
2771
2772         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2773                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2774                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2775                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2776                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2777                 hdev->rst_stats.global_rst_cnt++;
2778                 return HCLGE_VECTOR0_EVENT_RST;
2779         }
2780
2781         /* check for vector0 msix event source */
2782         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2783                 dev_info(&hdev->pdev->dev, "received event 0x%x\n",
2784                          msix_src_reg);
2785                 *clearval = msix_src_reg;
2786                 return HCLGE_VECTOR0_EVENT_ERR;
2787         }
2788
2789         /* check for vector0 mailbox(=CMDQ RX) event source */
2790         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2791                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2792                 *clearval = cmdq_src_reg;
2793                 return HCLGE_VECTOR0_EVENT_MBX;
2794         }
2795
2796         /* print other vector0 event source */
2797         dev_info(&hdev->pdev->dev,
2798                  "CMDQ INT status:0x%x, other INT status:0x%x\n",
2799                  cmdq_src_reg, msix_src_reg);
2800         *clearval = msix_src_reg;
2801
2802         return HCLGE_VECTOR0_EVENT_OTHER;
2803 }
2804
2805 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2806                                     u32 regclr)
2807 {
2808         switch (event_type) {
2809         case HCLGE_VECTOR0_EVENT_RST:
2810                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2811                 break;
2812         case HCLGE_VECTOR0_EVENT_MBX:
2813                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2814                 break;
2815         default:
2816                 break;
2817         }
2818 }
2819
2820 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2821 {
2822         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2823                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2824                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2825                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2826         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2827 }
2828
2829 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2830 {
2831         writel(enable ? 1 : 0, vector->addr);
2832 }
2833
2834 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2835 {
2836         struct hclge_dev *hdev = data;
2837         u32 clearval = 0;
2838         u32 event_cause;
2839
2840         hclge_enable_vector(&hdev->misc_vector, false);
2841         event_cause = hclge_check_event_cause(hdev, &clearval);
2842
2843         /* vector 0 interrupt is shared with reset and mailbox source events.*/
2844         switch (event_cause) {
2845         case HCLGE_VECTOR0_EVENT_ERR:
2846                 /* we do not know what type of reset is required now. This could
2847                  * only be decided after we fetch the type of errors which
2848                  * caused this event. Therefore, we will do below for now:
2849                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2850                  *    have defered type of reset to be used.
2851                  * 2. Schedule the reset serivce task.
2852                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
2853                  *    will fetch the correct type of reset.  This would be done
2854                  *    by first decoding the types of errors.
2855                  */
2856                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2857                 /* fall through */
2858         case HCLGE_VECTOR0_EVENT_RST:
2859                 hclge_reset_task_schedule(hdev);
2860                 break;
2861         case HCLGE_VECTOR0_EVENT_MBX:
2862                 /* If we are here then,
2863                  * 1. Either we are not handling any mbx task and we are not
2864                  *    scheduled as well
2865                  *                        OR
2866                  * 2. We could be handling a mbx task but nothing more is
2867                  *    scheduled.
2868                  * In both cases, we should schedule mbx task as there are more
2869                  * mbx messages reported by this interrupt.
2870                  */
2871                 hclge_mbx_task_schedule(hdev);
2872                 break;
2873         default:
2874                 dev_warn(&hdev->pdev->dev,
2875                          "received unknown or unhandled event of vector0\n");
2876                 break;
2877         }
2878
2879         /* clear the source of interrupt if it is not cause by reset */
2880         if (!clearval ||
2881             event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2882                 hclge_clear_event_cause(hdev, event_cause, clearval);
2883                 hclge_enable_vector(&hdev->misc_vector, true);
2884         }
2885
2886         return IRQ_HANDLED;
2887 }
2888
2889 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2890 {
2891         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2892                 dev_warn(&hdev->pdev->dev,
2893                          "vector(vector_id %d) has been freed.\n", vector_id);
2894                 return;
2895         }
2896
2897         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2898         hdev->num_msi_left += 1;
2899         hdev->num_msi_used -= 1;
2900 }
2901
2902 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2903 {
2904         struct hclge_misc_vector *vector = &hdev->misc_vector;
2905
2906         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2907
2908         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2909         hdev->vector_status[0] = 0;
2910
2911         hdev->num_msi_left -= 1;
2912         hdev->num_msi_used += 1;
2913 }
2914
2915 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
2916                                       const cpumask_t *mask)
2917 {
2918         struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
2919                                               affinity_notify);
2920
2921         cpumask_copy(&hdev->affinity_mask, mask);
2922 }
2923
2924 static void hclge_irq_affinity_release(struct kref *ref)
2925 {
2926 }
2927
2928 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
2929 {
2930         irq_set_affinity_hint(hdev->misc_vector.vector_irq,
2931                               &hdev->affinity_mask);
2932
2933         hdev->affinity_notify.notify = hclge_irq_affinity_notify;
2934         hdev->affinity_notify.release = hclge_irq_affinity_release;
2935         irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
2936                                   &hdev->affinity_notify);
2937 }
2938
2939 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
2940 {
2941         irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
2942         irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
2943 }
2944
2945 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2946 {
2947         int ret;
2948
2949         hclge_get_misc_vector(hdev);
2950
2951         /* this would be explicitly freed in the end */
2952         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2953                           0, "hclge_misc", hdev);
2954         if (ret) {
2955                 hclge_free_vector(hdev, 0);
2956                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2957                         hdev->misc_vector.vector_irq);
2958         }
2959
2960         return ret;
2961 }
2962
2963 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2964 {
2965         free_irq(hdev->misc_vector.vector_irq, hdev);
2966         hclge_free_vector(hdev, 0);
2967 }
2968
2969 int hclge_notify_client(struct hclge_dev *hdev,
2970                         enum hnae3_reset_notify_type type)
2971 {
2972         struct hnae3_client *client = hdev->nic_client;
2973         u16 i;
2974
2975         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
2976                 return 0;
2977
2978         if (!client->ops->reset_notify)
2979                 return -EOPNOTSUPP;
2980
2981         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2982                 struct hnae3_handle *handle = &hdev->vport[i].nic;
2983                 int ret;
2984
2985                 ret = client->ops->reset_notify(handle, type);
2986                 if (ret) {
2987                         dev_err(&hdev->pdev->dev,
2988                                 "notify nic client failed %d(%d)\n", type, ret);
2989                         return ret;
2990                 }
2991         }
2992
2993         return 0;
2994 }
2995
2996 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2997                                     enum hnae3_reset_notify_type type)
2998 {
2999         struct hnae3_client *client = hdev->roce_client;
3000         int ret = 0;
3001         u16 i;
3002
3003         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3004                 return 0;
3005
3006         if (!client->ops->reset_notify)
3007                 return -EOPNOTSUPP;
3008
3009         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3010                 struct hnae3_handle *handle = &hdev->vport[i].roce;
3011
3012                 ret = client->ops->reset_notify(handle, type);
3013                 if (ret) {
3014                         dev_err(&hdev->pdev->dev,
3015                                 "notify roce client failed %d(%d)",
3016                                 type, ret);
3017                         return ret;
3018                 }
3019         }
3020
3021         return ret;
3022 }
3023
3024 static int hclge_reset_wait(struct hclge_dev *hdev)
3025 {
3026 #define HCLGE_RESET_WATI_MS     100
3027 #define HCLGE_RESET_WAIT_CNT    200
3028         u32 val, reg, reg_bit;
3029         u32 cnt = 0;
3030
3031         switch (hdev->reset_type) {
3032         case HNAE3_IMP_RESET:
3033                 reg = HCLGE_GLOBAL_RESET_REG;
3034                 reg_bit = HCLGE_IMP_RESET_BIT;
3035                 break;
3036         case HNAE3_GLOBAL_RESET:
3037                 reg = HCLGE_GLOBAL_RESET_REG;
3038                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3039                 break;
3040         case HNAE3_FUNC_RESET:
3041                 reg = HCLGE_FUN_RST_ING;
3042                 reg_bit = HCLGE_FUN_RST_ING_B;
3043                 break;
3044         case HNAE3_FLR_RESET:
3045                 break;
3046         default:
3047                 dev_err(&hdev->pdev->dev,
3048                         "Wait for unsupported reset type: %d\n",
3049                         hdev->reset_type);
3050                 return -EINVAL;
3051         }
3052
3053         if (hdev->reset_type == HNAE3_FLR_RESET) {
3054                 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
3055                        cnt++ < HCLGE_RESET_WAIT_CNT)
3056                         msleep(HCLGE_RESET_WATI_MS);
3057
3058                 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
3059                         dev_err(&hdev->pdev->dev,
3060                                 "flr wait timeout: %d\n", cnt);
3061                         return -EBUSY;
3062                 }
3063
3064                 return 0;
3065         }
3066
3067         val = hclge_read_dev(&hdev->hw, reg);
3068         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3069                 msleep(HCLGE_RESET_WATI_MS);
3070                 val = hclge_read_dev(&hdev->hw, reg);
3071                 cnt++;
3072         }
3073
3074         if (cnt >= HCLGE_RESET_WAIT_CNT) {
3075                 dev_warn(&hdev->pdev->dev,
3076                          "Wait for reset timeout: %d\n", hdev->reset_type);
3077                 return -EBUSY;
3078         }
3079
3080         return 0;
3081 }
3082
3083 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3084 {
3085         struct hclge_vf_rst_cmd *req;
3086         struct hclge_desc desc;
3087
3088         req = (struct hclge_vf_rst_cmd *)desc.data;
3089         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3090         req->dest_vfid = func_id;
3091
3092         if (reset)
3093                 req->vf_rst = 0x1;
3094
3095         return hclge_cmd_send(&hdev->hw, &desc, 1);
3096 }
3097
3098 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3099 {
3100         int i;
3101
3102         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3103                 struct hclge_vport *vport = &hdev->vport[i];
3104                 int ret;
3105
3106                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3107                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3108                 if (ret) {
3109                         dev_err(&hdev->pdev->dev,
3110                                 "set vf(%d) rst failed %d!\n",
3111                                 vport->vport_id, ret);
3112                         return ret;
3113                 }
3114
3115                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3116                         continue;
3117
3118                 /* Inform VF to process the reset.
3119                  * hclge_inform_reset_assert_to_vf may fail if VF
3120                  * driver is not loaded.
3121                  */
3122                 ret = hclge_inform_reset_assert_to_vf(vport);
3123                 if (ret)
3124                         dev_warn(&hdev->pdev->dev,
3125                                  "inform reset to vf(%d) failed %d!\n",
3126                                  vport->vport_id, ret);
3127         }
3128
3129         return 0;
3130 }
3131
3132 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3133 {
3134         struct hclge_desc desc;
3135         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3136         int ret;
3137
3138         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3139         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3140         req->fun_reset_vfid = func_id;
3141
3142         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3143         if (ret)
3144                 dev_err(&hdev->pdev->dev,
3145                         "send function reset cmd fail, status =%d\n", ret);
3146
3147         return ret;
3148 }
3149
3150 static void hclge_do_reset(struct hclge_dev *hdev)
3151 {
3152         struct hnae3_handle *handle = &hdev->vport[0].nic;
3153         struct pci_dev *pdev = hdev->pdev;
3154         u32 val;
3155
3156         if (hclge_get_hw_reset_stat(handle)) {
3157                 dev_info(&pdev->dev, "Hardware reset not finish\n");
3158                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3159                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3160                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3161                 return;
3162         }
3163
3164         switch (hdev->reset_type) {
3165         case HNAE3_GLOBAL_RESET:
3166                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3167                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3168                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3169                 dev_info(&pdev->dev, "Global Reset requested\n");
3170                 break;
3171         case HNAE3_FUNC_RESET:
3172                 dev_info(&pdev->dev, "PF Reset requested\n");
3173                 /* schedule again to check later */
3174                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3175                 hclge_reset_task_schedule(hdev);
3176                 break;
3177         case HNAE3_FLR_RESET:
3178                 dev_info(&pdev->dev, "FLR requested\n");
3179                 /* schedule again to check later */
3180                 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3181                 hclge_reset_task_schedule(hdev);
3182                 break;
3183         default:
3184                 dev_warn(&pdev->dev,
3185                          "Unsupported reset type: %d\n", hdev->reset_type);
3186                 break;
3187         }
3188 }
3189
3190 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3191                                                    unsigned long *addr)
3192 {
3193         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3194         struct hclge_dev *hdev = ae_dev->priv;
3195
3196         /* first, resolve any unknown reset type to the known type(s) */
3197         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3198                 /* we will intentionally ignore any errors from this function
3199                  *  as we will end up in *some* reset request in any case
3200                  */
3201                 hclge_handle_hw_msix_error(hdev, addr);
3202                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3203                 /* We defered the clearing of the error event which caused
3204                  * interrupt since it was not posssible to do that in
3205                  * interrupt context (and this is the reason we introduced
3206                  * new UNKNOWN reset type). Now, the errors have been
3207                  * handled and cleared in hardware we can safely enable
3208                  * interrupts. This is an exception to the norm.
3209                  */
3210                 hclge_enable_vector(&hdev->misc_vector, true);
3211         }
3212
3213         /* return the highest priority reset level amongst all */
3214         if (test_bit(HNAE3_IMP_RESET, addr)) {
3215                 rst_level = HNAE3_IMP_RESET;
3216                 clear_bit(HNAE3_IMP_RESET, addr);
3217                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3218                 clear_bit(HNAE3_FUNC_RESET, addr);
3219         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3220                 rst_level = HNAE3_GLOBAL_RESET;
3221                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3222                 clear_bit(HNAE3_FUNC_RESET, addr);
3223         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3224                 rst_level = HNAE3_FUNC_RESET;
3225                 clear_bit(HNAE3_FUNC_RESET, addr);
3226         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3227                 rst_level = HNAE3_FLR_RESET;
3228                 clear_bit(HNAE3_FLR_RESET, addr);
3229         }
3230
3231         if (hdev->reset_type != HNAE3_NONE_RESET &&
3232             rst_level < hdev->reset_type)
3233                 return HNAE3_NONE_RESET;
3234
3235         return rst_level;
3236 }
3237
3238 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3239 {
3240         u32 clearval = 0;
3241
3242         switch (hdev->reset_type) {
3243         case HNAE3_IMP_RESET:
3244                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3245                 break;
3246         case HNAE3_GLOBAL_RESET:
3247                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3248                 break;
3249         default:
3250                 break;
3251         }
3252
3253         if (!clearval)
3254                 return;
3255
3256         hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
3257         hclge_enable_vector(&hdev->misc_vector, true);
3258 }
3259
3260 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3261 {
3262         int ret = 0;
3263
3264         switch (hdev->reset_type) {
3265         case HNAE3_FUNC_RESET:
3266                 /* fall through */
3267         case HNAE3_FLR_RESET:
3268                 ret = hclge_set_all_vf_rst(hdev, true);
3269                 break;
3270         default:
3271                 break;
3272         }
3273
3274         return ret;
3275 }
3276
3277 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3278 {
3279 #define HCLGE_RESET_SYNC_TIME 100
3280
3281         u32 reg_val;
3282         int ret = 0;
3283
3284         switch (hdev->reset_type) {
3285         case HNAE3_FUNC_RESET:
3286                 /* There is no mechanism for PF to know if VF has stopped IO
3287                  * for now, just wait 100 ms for VF to stop IO
3288                  */
3289                 msleep(HCLGE_RESET_SYNC_TIME);
3290                 ret = hclge_func_reset_cmd(hdev, 0);
3291                 if (ret) {
3292                         dev_err(&hdev->pdev->dev,
3293                                 "asserting function reset fail %d!\n", ret);
3294                         return ret;
3295                 }
3296
3297                 /* After performaning pf reset, it is not necessary to do the
3298                  * mailbox handling or send any command to firmware, because
3299                  * any mailbox handling or command to firmware is only valid
3300                  * after hclge_cmd_init is called.
3301                  */
3302                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3303                 hdev->rst_stats.pf_rst_cnt++;
3304                 break;
3305         case HNAE3_FLR_RESET:
3306                 /* There is no mechanism for PF to know if VF has stopped IO
3307                  * for now, just wait 100 ms for VF to stop IO
3308                  */
3309                 msleep(HCLGE_RESET_SYNC_TIME);
3310                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3311                 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3312                 hdev->rst_stats.flr_rst_cnt++;
3313                 break;
3314         case HNAE3_IMP_RESET:
3315                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3316                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3317                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3318                 break;
3319         default:
3320                 break;
3321         }
3322
3323         /* inform hardware that preparatory work is done */
3324         msleep(HCLGE_RESET_SYNC_TIME);
3325         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG,
3326                         HCLGE_NIC_CMQ_ENABLE);
3327         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3328
3329         return ret;
3330 }
3331
3332 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3333 {
3334 #define MAX_RESET_FAIL_CNT 5
3335
3336         if (hdev->reset_pending) {
3337                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3338                          hdev->reset_pending);
3339                 return true;
3340         } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3341                    (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3342                     BIT(HCLGE_IMP_RESET_BIT))) {
3343                 dev_info(&hdev->pdev->dev,
3344                          "reset failed because IMP Reset is pending\n");
3345                 hclge_clear_reset_cause(hdev);
3346                 return false;
3347         } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3348                 hdev->reset_fail_cnt++;
3349                 set_bit(hdev->reset_type, &hdev->reset_pending);
3350                 dev_info(&hdev->pdev->dev,
3351                          "re-schedule reset task(%d)\n",
3352                          hdev->reset_fail_cnt);
3353                 return true;
3354         }
3355
3356         hclge_clear_reset_cause(hdev);
3357         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3358         return false;
3359 }
3360
3361 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3362 {
3363         int ret = 0;
3364
3365         switch (hdev->reset_type) {
3366         case HNAE3_FUNC_RESET:
3367                 /* fall through */
3368         case HNAE3_FLR_RESET:
3369                 ret = hclge_set_all_vf_rst(hdev, false);
3370                 break;
3371         default:
3372                 break;
3373         }
3374
3375         return ret;
3376 }
3377
3378 static int hclge_reset_stack(struct hclge_dev *hdev)
3379 {
3380         int ret;
3381
3382         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3383         if (ret)
3384                 return ret;
3385
3386         ret = hclge_reset_ae_dev(hdev->ae_dev);
3387         if (ret)
3388                 return ret;
3389
3390         ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3391         if (ret)
3392                 return ret;
3393
3394         return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3395 }
3396
3397 static void hclge_reset(struct hclge_dev *hdev)
3398 {
3399         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3400         int ret;
3401
3402         /* Initialize ae_dev reset status as well, in case enet layer wants to
3403          * know if device is undergoing reset
3404          */
3405         ae_dev->reset_type = hdev->reset_type;
3406         hdev->rst_stats.reset_cnt++;
3407         /* perform reset of the stack & ae device for a client */
3408         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3409         if (ret)
3410                 goto err_reset;
3411
3412         ret = hclge_reset_prepare_down(hdev);
3413         if (ret)
3414                 goto err_reset;
3415
3416         rtnl_lock();
3417         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3418         if (ret)
3419                 goto err_reset_lock;
3420
3421         rtnl_unlock();
3422
3423         ret = hclge_reset_prepare_wait(hdev);
3424         if (ret)
3425                 goto err_reset;
3426
3427         if (hclge_reset_wait(hdev))
3428                 goto err_reset;
3429
3430         hdev->rst_stats.hw_reset_done_cnt++;
3431
3432         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3433         if (ret)
3434                 goto err_reset;
3435
3436         rtnl_lock();
3437
3438         ret = hclge_reset_stack(hdev);
3439         if (ret)
3440                 goto err_reset_lock;
3441
3442         hclge_clear_reset_cause(hdev);
3443
3444         ret = hclge_reset_prepare_up(hdev);
3445         if (ret)
3446                 goto err_reset_lock;
3447
3448         rtnl_unlock();
3449
3450         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3451         /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3452          * times
3453          */
3454         if (ret && hdev->reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3455                 goto err_reset;
3456
3457         rtnl_lock();
3458
3459         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3460         if (ret)
3461                 goto err_reset_lock;
3462
3463         rtnl_unlock();
3464
3465         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3466         if (ret)
3467                 goto err_reset;
3468
3469         hdev->last_reset_time = jiffies;
3470         hdev->reset_fail_cnt = 0;
3471         hdev->rst_stats.reset_done_cnt++;
3472         ae_dev->reset_type = HNAE3_NONE_RESET;
3473         del_timer(&hdev->reset_timer);
3474
3475         return;
3476
3477 err_reset_lock:
3478         rtnl_unlock();
3479 err_reset:
3480         if (hclge_reset_err_handle(hdev))
3481                 hclge_reset_task_schedule(hdev);
3482 }
3483
3484 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3485 {
3486         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3487         struct hclge_dev *hdev = ae_dev->priv;
3488
3489         /* We might end up getting called broadly because of 2 below cases:
3490          * 1. Recoverable error was conveyed through APEI and only way to bring
3491          *    normalcy is to reset.
3492          * 2. A new reset request from the stack due to timeout
3493          *
3494          * For the first case,error event might not have ae handle available.
3495          * check if this is a new reset request and we are not here just because
3496          * last reset attempt did not succeed and watchdog hit us again. We will
3497          * know this if last reset request did not occur very recently (watchdog
3498          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3499          * In case of new request we reset the "reset level" to PF reset.
3500          * And if it is a repeat reset request of the most recent one then we
3501          * want to make sure we throttle the reset request. Therefore, we will
3502          * not allow it again before 3*HZ times.
3503          */
3504         if (!handle)
3505                 handle = &hdev->vport[0].nic;
3506
3507         if (time_before(jiffies, (hdev->last_reset_time +
3508                                   HCLGE_RESET_INTERVAL)))
3509                 return;
3510         else if (hdev->default_reset_request)
3511                 hdev->reset_level =
3512                         hclge_get_reset_level(ae_dev,
3513                                               &hdev->default_reset_request);
3514         else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3515                 hdev->reset_level = HNAE3_FUNC_RESET;
3516
3517         dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3518                  hdev->reset_level);
3519
3520         /* request reset & schedule reset task */
3521         set_bit(hdev->reset_level, &hdev->reset_request);
3522         hclge_reset_task_schedule(hdev);
3523
3524         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3525                 hdev->reset_level++;
3526 }
3527
3528 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3529                                         enum hnae3_reset_type rst_type)
3530 {
3531         struct hclge_dev *hdev = ae_dev->priv;
3532
3533         set_bit(rst_type, &hdev->default_reset_request);
3534 }
3535
3536 static void hclge_reset_timer(struct timer_list *t)
3537 {
3538         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3539
3540         dev_info(&hdev->pdev->dev,
3541                  "triggering reset in reset timer\n");
3542         hclge_reset_event(hdev->pdev, NULL);
3543 }
3544
3545 static void hclge_reset_subtask(struct hclge_dev *hdev)
3546 {
3547         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3548
3549         /* check if there is any ongoing reset in the hardware. This status can
3550          * be checked from reset_pending. If there is then, we need to wait for
3551          * hardware to complete reset.
3552          *    a. If we are able to figure out in reasonable time that hardware
3553          *       has fully resetted then, we can proceed with driver, client
3554          *       reset.
3555          *    b. else, we can come back later to check this status so re-sched
3556          *       now.
3557          */
3558         hdev->last_reset_time = jiffies;
3559         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3560         if (hdev->reset_type != HNAE3_NONE_RESET)
3561                 hclge_reset(hdev);
3562
3563         /* check if we got any *new* reset requests to be honored */
3564         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3565         if (hdev->reset_type != HNAE3_NONE_RESET)
3566                 hclge_do_reset(hdev);
3567
3568         hdev->reset_type = HNAE3_NONE_RESET;
3569 }
3570
3571 static void hclge_reset_service_task(struct work_struct *work)
3572 {
3573         struct hclge_dev *hdev =
3574                 container_of(work, struct hclge_dev, rst_service_task);
3575
3576         if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3577                 return;
3578
3579         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3580
3581         hclge_reset_subtask(hdev);
3582
3583         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3584 }
3585
3586 static void hclge_mailbox_service_task(struct work_struct *work)
3587 {
3588         struct hclge_dev *hdev =
3589                 container_of(work, struct hclge_dev, mbx_service_task);
3590
3591         if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3592                 return;
3593
3594         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3595
3596         hclge_mbx_handler(hdev);
3597
3598         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3599 }
3600
3601 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3602 {
3603         int i;
3604
3605         /* start from vport 1 for PF is always alive */
3606         for (i = 1; i < hdev->num_alloc_vport; i++) {
3607                 struct hclge_vport *vport = &hdev->vport[i];
3608
3609                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3610                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3611
3612                 /* If vf is not alive, set to default value */
3613                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3614                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3615         }
3616 }
3617
3618 static void hclge_service_task(struct work_struct *work)
3619 {
3620         struct hclge_dev *hdev =
3621                 container_of(work, struct hclge_dev, service_task.work);
3622
3623         clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
3624
3625         if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3626                 hclge_update_stats_for_all(hdev);
3627                 hdev->hw_stats.stats_timer = 0;
3628         }
3629
3630         hclge_update_port_info(hdev);
3631         hclge_update_link_status(hdev);
3632         hclge_update_vport_alive(hdev);
3633         hclge_sync_vlan_filter(hdev);
3634         if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) {
3635                 hclge_rfs_filter_expire(hdev);
3636                 hdev->fd_arfs_expire_timer = 0;
3637         }
3638
3639         hclge_task_schedule(hdev);
3640 }
3641
3642 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3643 {
3644         /* VF handle has no client */
3645         if (!handle->client)
3646                 return container_of(handle, struct hclge_vport, nic);
3647         else if (handle->client->type == HNAE3_CLIENT_ROCE)
3648                 return container_of(handle, struct hclge_vport, roce);
3649         else
3650                 return container_of(handle, struct hclge_vport, nic);
3651 }
3652
3653 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3654                             struct hnae3_vector_info *vector_info)
3655 {
3656         struct hclge_vport *vport = hclge_get_vport(handle);
3657         struct hnae3_vector_info *vector = vector_info;
3658         struct hclge_dev *hdev = vport->back;
3659         int alloc = 0;
3660         int i, j;
3661
3662         vector_num = min(hdev->num_msi_left, vector_num);
3663
3664         for (j = 0; j < vector_num; j++) {
3665                 for (i = 1; i < hdev->num_msi; i++) {
3666                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3667                                 vector->vector = pci_irq_vector(hdev->pdev, i);
3668                                 vector->io_addr = hdev->hw.io_base +
3669                                         HCLGE_VECTOR_REG_BASE +
3670                                         (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3671                                         vport->vport_id *
3672                                         HCLGE_VECTOR_VF_OFFSET;
3673                                 hdev->vector_status[i] = vport->vport_id;
3674                                 hdev->vector_irq[i] = vector->vector;
3675
3676                                 vector++;
3677                                 alloc++;
3678
3679                                 break;
3680                         }
3681                 }
3682         }
3683         hdev->num_msi_left -= alloc;
3684         hdev->num_msi_used += alloc;
3685
3686         return alloc;
3687 }
3688
3689 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3690 {
3691         int i;
3692
3693         for (i = 0; i < hdev->num_msi; i++)
3694                 if (vector == hdev->vector_irq[i])
3695                         return i;
3696
3697         return -EINVAL;
3698 }
3699
3700 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3701 {
3702         struct hclge_vport *vport = hclge_get_vport(handle);
3703         struct hclge_dev *hdev = vport->back;
3704         int vector_id;
3705
3706         vector_id = hclge_get_vector_index(hdev, vector);
3707         if (vector_id < 0) {
3708                 dev_err(&hdev->pdev->dev,
3709                         "Get vector index fail. vector_id =%d\n", vector_id);
3710                 return vector_id;
3711         }
3712
3713         hclge_free_vector(hdev, vector_id);
3714
3715         return 0;
3716 }
3717
3718 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3719 {
3720         return HCLGE_RSS_KEY_SIZE;
3721 }
3722
3723 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3724 {
3725         return HCLGE_RSS_IND_TBL_SIZE;
3726 }
3727
3728 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3729                                   const u8 hfunc, const u8 *key)
3730 {
3731         struct hclge_rss_config_cmd *req;
3732         unsigned int key_offset = 0;
3733         struct hclge_desc desc;
3734         int key_counts;
3735         int key_size;
3736         int ret;
3737
3738         key_counts = HCLGE_RSS_KEY_SIZE;
3739         req = (struct hclge_rss_config_cmd *)desc.data;
3740
3741         while (key_counts) {
3742                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3743                                            false);
3744
3745                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3746                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3747
3748                 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
3749                 memcpy(req->hash_key,
3750                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3751
3752                 key_counts -= key_size;
3753                 key_offset++;
3754                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3755                 if (ret) {
3756                         dev_err(&hdev->pdev->dev,
3757                                 "Configure RSS config fail, status = %d\n",
3758                                 ret);
3759                         return ret;
3760                 }
3761         }
3762         return 0;
3763 }
3764
3765 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3766 {
3767         struct hclge_rss_indirection_table_cmd *req;
3768         struct hclge_desc desc;
3769         int i, j;
3770         int ret;
3771
3772         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3773
3774         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3775                 hclge_cmd_setup_basic_desc
3776                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3777
3778                 req->start_table_index =
3779                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3780                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3781
3782                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3783                         req->rss_result[j] =
3784                                 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3785
3786                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3787                 if (ret) {
3788                         dev_err(&hdev->pdev->dev,
3789                                 "Configure rss indir table fail,status = %d\n",
3790                                 ret);
3791                         return ret;
3792                 }
3793         }
3794         return 0;
3795 }
3796
3797 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3798                                  u16 *tc_size, u16 *tc_offset)
3799 {
3800         struct hclge_rss_tc_mode_cmd *req;
3801         struct hclge_desc desc;
3802         int ret;
3803         int i;
3804
3805         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3806         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3807
3808         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3809                 u16 mode = 0;
3810
3811                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3812                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3813                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3814                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3815                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3816
3817                 req->rss_tc_mode[i] = cpu_to_le16(mode);
3818         }
3819
3820         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3821         if (ret)
3822                 dev_err(&hdev->pdev->dev,
3823                         "Configure rss tc mode fail, status = %d\n", ret);
3824
3825         return ret;
3826 }
3827
3828 static void hclge_get_rss_type(struct hclge_vport *vport)
3829 {
3830         if (vport->rss_tuple_sets.ipv4_tcp_en ||
3831             vport->rss_tuple_sets.ipv4_udp_en ||
3832             vport->rss_tuple_sets.ipv4_sctp_en ||
3833             vport->rss_tuple_sets.ipv6_tcp_en ||
3834             vport->rss_tuple_sets.ipv6_udp_en ||
3835             vport->rss_tuple_sets.ipv6_sctp_en)
3836                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3837         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3838                  vport->rss_tuple_sets.ipv6_fragment_en)
3839                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3840         else
3841                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3842 }
3843
3844 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3845 {
3846         struct hclge_rss_input_tuple_cmd *req;
3847         struct hclge_desc desc;
3848         int ret;
3849
3850         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3851
3852         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3853
3854         /* Get the tuple cfg from pf */
3855         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3856         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3857         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3858         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3859         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3860         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3861         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3862         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3863         hclge_get_rss_type(&hdev->vport[0]);
3864         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3865         if (ret)
3866                 dev_err(&hdev->pdev->dev,
3867                         "Configure rss input fail, status = %d\n", ret);
3868         return ret;
3869 }
3870
3871 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3872                          u8 *key, u8 *hfunc)
3873 {
3874         struct hclge_vport *vport = hclge_get_vport(handle);
3875         int i;
3876
3877         /* Get hash algorithm */
3878         if (hfunc) {
3879                 switch (vport->rss_algo) {
3880                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3881                         *hfunc = ETH_RSS_HASH_TOP;
3882                         break;
3883                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3884                         *hfunc = ETH_RSS_HASH_XOR;
3885                         break;
3886                 default:
3887                         *hfunc = ETH_RSS_HASH_UNKNOWN;
3888                         break;
3889                 }
3890         }
3891
3892         /* Get the RSS Key required by the user */
3893         if (key)
3894                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3895
3896         /* Get indirect table */
3897         if (indir)
3898                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3899                         indir[i] =  vport->rss_indirection_tbl[i];
3900
3901         return 0;
3902 }
3903
3904 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3905                          const  u8 *key, const  u8 hfunc)
3906 {
3907         struct hclge_vport *vport = hclge_get_vport(handle);
3908         struct hclge_dev *hdev = vport->back;
3909         u8 hash_algo;
3910         int ret, i;
3911
3912         /* Set the RSS Hash Key if specififed by the user */
3913         if (key) {
3914                 switch (hfunc) {
3915                 case ETH_RSS_HASH_TOP:
3916                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3917                         break;
3918                 case ETH_RSS_HASH_XOR:
3919                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3920                         break;
3921                 case ETH_RSS_HASH_NO_CHANGE:
3922                         hash_algo = vport->rss_algo;
3923                         break;
3924                 default:
3925                         return -EINVAL;
3926                 }
3927
3928                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3929                 if (ret)
3930                         return ret;
3931
3932                 /* Update the shadow RSS key with user specified qids */
3933                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3934                 vport->rss_algo = hash_algo;
3935         }
3936
3937         /* Update the shadow RSS table with user specified qids */
3938         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3939                 vport->rss_indirection_tbl[i] = indir[i];
3940
3941         /* Update the hardware */
3942         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3943 }
3944
3945 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3946 {
3947         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3948
3949         if (nfc->data & RXH_L4_B_2_3)
3950                 hash_sets |= HCLGE_D_PORT_BIT;
3951         else
3952                 hash_sets &= ~HCLGE_D_PORT_BIT;
3953
3954         if (nfc->data & RXH_IP_SRC)
3955                 hash_sets |= HCLGE_S_IP_BIT;
3956         else
3957                 hash_sets &= ~HCLGE_S_IP_BIT;
3958
3959         if (nfc->data & RXH_IP_DST)
3960                 hash_sets |= HCLGE_D_IP_BIT;
3961         else
3962                 hash_sets &= ~HCLGE_D_IP_BIT;
3963
3964         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3965                 hash_sets |= HCLGE_V_TAG_BIT;
3966
3967         return hash_sets;
3968 }
3969
3970 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3971                                struct ethtool_rxnfc *nfc)
3972 {
3973         struct hclge_vport *vport = hclge_get_vport(handle);
3974         struct hclge_dev *hdev = vport->back;
3975         struct hclge_rss_input_tuple_cmd *req;
3976         struct hclge_desc desc;
3977         u8 tuple_sets;
3978         int ret;
3979
3980         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3981                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
3982                 return -EINVAL;
3983
3984         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3985         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3986
3987         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3988         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3989         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3990         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3991         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3992         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3993         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3994         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3995
3996         tuple_sets = hclge_get_rss_hash_bits(nfc);
3997         switch (nfc->flow_type) {
3998         case TCP_V4_FLOW:
3999                 req->ipv4_tcp_en = tuple_sets;
4000                 break;
4001         case TCP_V6_FLOW:
4002                 req->ipv6_tcp_en = tuple_sets;
4003                 break;
4004         case UDP_V4_FLOW:
4005                 req->ipv4_udp_en = tuple_sets;
4006                 break;
4007         case UDP_V6_FLOW:
4008                 req->ipv6_udp_en = tuple_sets;
4009                 break;
4010         case SCTP_V4_FLOW:
4011                 req->ipv4_sctp_en = tuple_sets;
4012                 break;
4013         case SCTP_V6_FLOW:
4014                 if ((nfc->data & RXH_L4_B_0_1) ||
4015                     (nfc->data & RXH_L4_B_2_3))
4016                         return -EINVAL;
4017
4018                 req->ipv6_sctp_en = tuple_sets;
4019                 break;
4020         case IPV4_FLOW:
4021                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4022                 break;
4023         case IPV6_FLOW:
4024                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4025                 break;
4026         default:
4027                 return -EINVAL;
4028         }
4029
4030         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4031         if (ret) {
4032                 dev_err(&hdev->pdev->dev,
4033                         "Set rss tuple fail, status = %d\n", ret);
4034                 return ret;
4035         }
4036
4037         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4038         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4039         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4040         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4041         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4042         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4043         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4044         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4045         hclge_get_rss_type(vport);
4046         return 0;
4047 }
4048
4049 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4050                                struct ethtool_rxnfc *nfc)
4051 {
4052         struct hclge_vport *vport = hclge_get_vport(handle);
4053         u8 tuple_sets;
4054
4055         nfc->data = 0;
4056
4057         switch (nfc->flow_type) {
4058         case TCP_V4_FLOW:
4059                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4060                 break;
4061         case UDP_V4_FLOW:
4062                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4063                 break;
4064         case TCP_V6_FLOW:
4065                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4066                 break;
4067         case UDP_V6_FLOW:
4068                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4069                 break;
4070         case SCTP_V4_FLOW:
4071                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4072                 break;
4073         case SCTP_V6_FLOW:
4074                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4075                 break;
4076         case IPV4_FLOW:
4077         case IPV6_FLOW:
4078                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4079                 break;
4080         default:
4081                 return -EINVAL;
4082         }
4083
4084         if (!tuple_sets)
4085                 return 0;
4086
4087         if (tuple_sets & HCLGE_D_PORT_BIT)
4088                 nfc->data |= RXH_L4_B_2_3;
4089         if (tuple_sets & HCLGE_S_PORT_BIT)
4090                 nfc->data |= RXH_L4_B_0_1;
4091         if (tuple_sets & HCLGE_D_IP_BIT)
4092                 nfc->data |= RXH_IP_DST;
4093         if (tuple_sets & HCLGE_S_IP_BIT)
4094                 nfc->data |= RXH_IP_SRC;
4095
4096         return 0;
4097 }
4098
4099 static int hclge_get_tc_size(struct hnae3_handle *handle)
4100 {
4101         struct hclge_vport *vport = hclge_get_vport(handle);
4102         struct hclge_dev *hdev = vport->back;
4103
4104         return hdev->rss_size_max;
4105 }
4106
4107 int hclge_rss_init_hw(struct hclge_dev *hdev)
4108 {
4109         struct hclge_vport *vport = hdev->vport;
4110         u8 *rss_indir = vport[0].rss_indirection_tbl;
4111         u16 rss_size = vport[0].alloc_rss_size;
4112         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4113         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4114         u8 *key = vport[0].rss_hash_key;
4115         u8 hfunc = vport[0].rss_algo;
4116         u16 tc_valid[HCLGE_MAX_TC_NUM];
4117         u16 roundup_size;
4118         unsigned int i;
4119         int ret;
4120
4121         ret = hclge_set_rss_indir_table(hdev, rss_indir);
4122         if (ret)
4123                 return ret;
4124
4125         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4126         if (ret)
4127                 return ret;
4128
4129         ret = hclge_set_rss_input_tuple(hdev);
4130         if (ret)
4131                 return ret;
4132
4133         /* Each TC have the same queue size, and tc_size set to hardware is
4134          * the log2 of roundup power of two of rss_size, the acutal queue
4135          * size is limited by indirection table.
4136          */
4137         if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4138                 dev_err(&hdev->pdev->dev,
4139                         "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4140                         rss_size);
4141                 return -EINVAL;
4142         }
4143
4144         roundup_size = roundup_pow_of_two(rss_size);
4145         roundup_size = ilog2(roundup_size);
4146
4147         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4148                 tc_valid[i] = 0;
4149
4150                 if (!(hdev->hw_tc_map & BIT(i)))
4151                         continue;
4152
4153                 tc_valid[i] = 1;
4154                 tc_size[i] = roundup_size;
4155                 tc_offset[i] = rss_size * i;
4156         }
4157
4158         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4159 }
4160
4161 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4162 {
4163         struct hclge_vport *vport = hdev->vport;
4164         int i, j;
4165
4166         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4167                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4168                         vport[j].rss_indirection_tbl[i] =
4169                                 i % vport[j].alloc_rss_size;
4170         }
4171 }
4172
4173 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4174 {
4175         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4176         struct hclge_vport *vport = hdev->vport;
4177
4178         if (hdev->pdev->revision >= 0x21)
4179                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4180
4181         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4182                 vport[i].rss_tuple_sets.ipv4_tcp_en =
4183                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4184                 vport[i].rss_tuple_sets.ipv4_udp_en =
4185                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4186                 vport[i].rss_tuple_sets.ipv4_sctp_en =
4187                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4188                 vport[i].rss_tuple_sets.ipv4_fragment_en =
4189                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4190                 vport[i].rss_tuple_sets.ipv6_tcp_en =
4191                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4192                 vport[i].rss_tuple_sets.ipv6_udp_en =
4193                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4194                 vport[i].rss_tuple_sets.ipv6_sctp_en =
4195                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4196                 vport[i].rss_tuple_sets.ipv6_fragment_en =
4197                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4198
4199                 vport[i].rss_algo = rss_algo;
4200
4201                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4202                        HCLGE_RSS_KEY_SIZE);
4203         }
4204
4205         hclge_rss_indir_init_cfg(hdev);
4206 }
4207
4208 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4209                                 int vector_id, bool en,
4210                                 struct hnae3_ring_chain_node *ring_chain)
4211 {
4212         struct hclge_dev *hdev = vport->back;
4213         struct hnae3_ring_chain_node *node;
4214         struct hclge_desc desc;
4215         struct hclge_ctrl_vector_chain_cmd *req
4216                 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4217         enum hclge_cmd_status status;
4218         enum hclge_opcode_type op;
4219         u16 tqp_type_and_id;
4220         int i;
4221
4222         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4223         hclge_cmd_setup_basic_desc(&desc, op, false);
4224         req->int_vector_id = vector_id;
4225
4226         i = 0;
4227         for (node = ring_chain; node; node = node->next) {
4228                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4229                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4230                                 HCLGE_INT_TYPE_S,
4231                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4232                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4233                                 HCLGE_TQP_ID_S, node->tqp_index);
4234                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4235                                 HCLGE_INT_GL_IDX_S,
4236                                 hnae3_get_field(node->int_gl_idx,
4237                                                 HNAE3_RING_GL_IDX_M,
4238                                                 HNAE3_RING_GL_IDX_S));
4239                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4240                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4241                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4242                         req->vfid = vport->vport_id;
4243
4244                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4245                         if (status) {
4246                                 dev_err(&hdev->pdev->dev,
4247                                         "Map TQP fail, status is %d.\n",
4248                                         status);
4249                                 return -EIO;
4250                         }
4251                         i = 0;
4252
4253                         hclge_cmd_setup_basic_desc(&desc,
4254                                                    op,
4255                                                    false);
4256                         req->int_vector_id = vector_id;
4257                 }
4258         }
4259
4260         if (i > 0) {
4261                 req->int_cause_num = i;
4262                 req->vfid = vport->vport_id;
4263                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4264                 if (status) {
4265                         dev_err(&hdev->pdev->dev,
4266                                 "Map TQP fail, status is %d.\n", status);
4267                         return -EIO;
4268                 }
4269         }
4270
4271         return 0;
4272 }
4273
4274 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4275                                     struct hnae3_ring_chain_node *ring_chain)
4276 {
4277         struct hclge_vport *vport = hclge_get_vport(handle);
4278         struct hclge_dev *hdev = vport->back;
4279         int vector_id;
4280
4281         vector_id = hclge_get_vector_index(hdev, vector);
4282         if (vector_id < 0) {
4283                 dev_err(&hdev->pdev->dev,
4284                         "Get vector index fail. vector_id =%d\n", vector_id);
4285                 return vector_id;
4286         }
4287
4288         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4289 }
4290
4291 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4292                                        struct hnae3_ring_chain_node *ring_chain)
4293 {
4294         struct hclge_vport *vport = hclge_get_vport(handle);
4295         struct hclge_dev *hdev = vport->back;
4296         int vector_id, ret;
4297
4298         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4299                 return 0;
4300
4301         vector_id = hclge_get_vector_index(hdev, vector);
4302         if (vector_id < 0) {
4303                 dev_err(&handle->pdev->dev,
4304                         "Get vector index fail. ret =%d\n", vector_id);
4305                 return vector_id;
4306         }
4307
4308         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4309         if (ret)
4310                 dev_err(&handle->pdev->dev,
4311                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4312                         vector_id, ret);
4313
4314         return ret;
4315 }
4316
4317 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4318                                struct hclge_promisc_param *param)
4319 {
4320         struct hclge_promisc_cfg_cmd *req;
4321         struct hclge_desc desc;
4322         int ret;
4323
4324         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4325
4326         req = (struct hclge_promisc_cfg_cmd *)desc.data;
4327         req->vf_id = param->vf_id;
4328
4329         /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4330          * pdev revision(0x20), new revision support them. The
4331          * value of this two fields will not return error when driver
4332          * send command to fireware in revision(0x20).
4333          */
4334         req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4335                 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4336
4337         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4338         if (ret)
4339                 dev_err(&hdev->pdev->dev,
4340                         "Set promisc mode fail, status is %d.\n", ret);
4341
4342         return ret;
4343 }
4344
4345 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4346                               bool en_mc, bool en_bc, int vport_id)
4347 {
4348         if (!param)
4349                 return;
4350
4351         memset(param, 0, sizeof(struct hclge_promisc_param));
4352         if (en_uc)
4353                 param->enable = HCLGE_PROMISC_EN_UC;
4354         if (en_mc)
4355                 param->enable |= HCLGE_PROMISC_EN_MC;
4356         if (en_bc)
4357                 param->enable |= HCLGE_PROMISC_EN_BC;
4358         param->vf_id = vport_id;
4359 }
4360
4361 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4362                                   bool en_mc_pmc)
4363 {
4364         struct hclge_vport *vport = hclge_get_vport(handle);
4365         struct hclge_dev *hdev = vport->back;
4366         struct hclge_promisc_param param;
4367         bool en_bc_pmc = true;
4368
4369         /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4370          * always bypassed. So broadcast promisc should be disabled until
4371          * user enable promisc mode
4372          */
4373         if (handle->pdev->revision == 0x20)
4374                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4375
4376         hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4377                                  vport->vport_id);
4378         return hclge_cmd_set_promisc_mode(hdev, &param);
4379 }
4380
4381 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4382 {
4383         struct hclge_get_fd_mode_cmd *req;
4384         struct hclge_desc desc;
4385         int ret;
4386
4387         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4388
4389         req = (struct hclge_get_fd_mode_cmd *)desc.data;
4390
4391         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4392         if (ret) {
4393                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4394                 return ret;
4395         }
4396
4397         *fd_mode = req->mode;
4398
4399         return ret;
4400 }
4401
4402 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4403                                    u32 *stage1_entry_num,
4404                                    u32 *stage2_entry_num,
4405                                    u16 *stage1_counter_num,
4406                                    u16 *stage2_counter_num)
4407 {
4408         struct hclge_get_fd_allocation_cmd *req;
4409         struct hclge_desc desc;
4410         int ret;
4411
4412         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4413
4414         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4415
4416         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4417         if (ret) {
4418                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4419                         ret);
4420                 return ret;
4421         }
4422
4423         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4424         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4425         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4426         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4427
4428         return ret;
4429 }
4430
4431 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4432 {
4433         struct hclge_set_fd_key_config_cmd *req;
4434         struct hclge_fd_key_cfg *stage;
4435         struct hclge_desc desc;
4436         int ret;
4437
4438         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4439
4440         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4441         stage = &hdev->fd_cfg.key_cfg[stage_num];
4442         req->stage = stage_num;
4443         req->key_select = stage->key_sel;
4444         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4445         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4446         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4447         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4448         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4449         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4450
4451         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4452         if (ret)
4453                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4454
4455         return ret;
4456 }
4457
4458 static int hclge_init_fd_config(struct hclge_dev *hdev)
4459 {
4460 #define LOW_2_WORDS             0x03
4461         struct hclge_fd_key_cfg *key_cfg;
4462         int ret;
4463
4464         if (!hnae3_dev_fd_supported(hdev))
4465                 return 0;
4466
4467         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4468         if (ret)
4469                 return ret;
4470
4471         switch (hdev->fd_cfg.fd_mode) {
4472         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4473                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4474                 break;
4475         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4476                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4477                 break;
4478         default:
4479                 dev_err(&hdev->pdev->dev,
4480                         "Unsupported flow director mode %d\n",
4481                         hdev->fd_cfg.fd_mode);
4482                 return -EOPNOTSUPP;
4483         }
4484
4485         hdev->fd_cfg.proto_support =
4486                 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4487                 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4488         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4489         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4490         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4491         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4492         key_cfg->outer_sipv6_word_en = 0;
4493         key_cfg->outer_dipv6_word_en = 0;
4494
4495         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4496                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4497                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4498                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4499
4500         /* If use max 400bit key, we can support tuples for ether type */
4501         if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4502                 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4503                 key_cfg->tuple_active |=
4504                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4505         }
4506
4507         /* roce_type is used to filter roce frames
4508          * dst_vport is used to specify the rule
4509          */
4510         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4511
4512         ret = hclge_get_fd_allocation(hdev,
4513                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4514                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4515                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4516                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4517         if (ret)
4518                 return ret;
4519
4520         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4521 }
4522
4523 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4524                                 int loc, u8 *key, bool is_add)
4525 {
4526         struct hclge_fd_tcam_config_1_cmd *req1;
4527         struct hclge_fd_tcam_config_2_cmd *req2;
4528         struct hclge_fd_tcam_config_3_cmd *req3;
4529         struct hclge_desc desc[3];
4530         int ret;
4531
4532         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4533         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4534         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4535         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4536         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4537
4538         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4539         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4540         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4541
4542         req1->stage = stage;
4543         req1->xy_sel = sel_x ? 1 : 0;
4544         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4545         req1->index = cpu_to_le32(loc);
4546         req1->entry_vld = sel_x ? is_add : 0;
4547
4548         if (key) {
4549                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4550                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4551                        sizeof(req2->tcam_data));
4552                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4553                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4554         }
4555
4556         ret = hclge_cmd_send(&hdev->hw, desc, 3);
4557         if (ret)
4558                 dev_err(&hdev->pdev->dev,
4559                         "config tcam key fail, ret=%d\n",
4560                         ret);
4561
4562         return ret;
4563 }
4564
4565 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4566                               struct hclge_fd_ad_data *action)
4567 {
4568         struct hclge_fd_ad_config_cmd *req;
4569         struct hclge_desc desc;
4570         u64 ad_data = 0;
4571         int ret;
4572
4573         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4574
4575         req = (struct hclge_fd_ad_config_cmd *)desc.data;
4576         req->index = cpu_to_le32(loc);
4577         req->stage = stage;
4578
4579         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4580                       action->write_rule_id_to_bd);
4581         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4582                         action->rule_id);
4583         ad_data <<= 32;
4584         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4585         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4586                       action->forward_to_direct_queue);
4587         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4588                         action->queue_id);
4589         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4590         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4591                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4592         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4593         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4594                         action->counter_id);
4595
4596         req->ad_data = cpu_to_le64(ad_data);
4597         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4598         if (ret)
4599                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4600
4601         return ret;
4602 }
4603
4604 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4605                                    struct hclge_fd_rule *rule)
4606 {
4607         u16 tmp_x_s, tmp_y_s;
4608         u32 tmp_x_l, tmp_y_l;
4609         int i;
4610
4611         if (rule->unused_tuple & tuple_bit)
4612                 return true;
4613
4614         switch (tuple_bit) {
4615         case 0:
4616                 return false;
4617         case BIT(INNER_DST_MAC):
4618                 for (i = 0; i < ETH_ALEN; i++) {
4619                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4620                                rule->tuples_mask.dst_mac[i]);
4621                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
4622                                rule->tuples_mask.dst_mac[i]);
4623                 }
4624
4625                 return true;
4626         case BIT(INNER_SRC_MAC):
4627                 for (i = 0; i < ETH_ALEN; i++) {
4628                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4629                                rule->tuples.src_mac[i]);
4630                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
4631                                rule->tuples.src_mac[i]);
4632                 }
4633
4634                 return true;
4635         case BIT(INNER_VLAN_TAG_FST):
4636                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4637                        rule->tuples_mask.vlan_tag1);
4638                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4639                        rule->tuples_mask.vlan_tag1);
4640                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4641                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4642
4643                 return true;
4644         case BIT(INNER_ETH_TYPE):
4645                 calc_x(tmp_x_s, rule->tuples.ether_proto,
4646                        rule->tuples_mask.ether_proto);
4647                 calc_y(tmp_y_s, rule->tuples.ether_proto,
4648                        rule->tuples_mask.ether_proto);
4649                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4650                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4651
4652                 return true;
4653         case BIT(INNER_IP_TOS):
4654                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4655                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4656
4657                 return true;
4658         case BIT(INNER_IP_PROTO):
4659                 calc_x(*key_x, rule->tuples.ip_proto,
4660                        rule->tuples_mask.ip_proto);
4661                 calc_y(*key_y, rule->tuples.ip_proto,
4662                        rule->tuples_mask.ip_proto);
4663
4664                 return true;
4665         case BIT(INNER_SRC_IP):
4666                 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
4667                        rule->tuples_mask.src_ip[IPV4_INDEX]);
4668                 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
4669                        rule->tuples_mask.src_ip[IPV4_INDEX]);
4670                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4671                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4672
4673                 return true;
4674         case BIT(INNER_DST_IP):
4675                 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
4676                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
4677                 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
4678                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
4679                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4680                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4681
4682                 return true;
4683         case BIT(INNER_SRC_PORT):
4684                 calc_x(tmp_x_s, rule->tuples.src_port,
4685                        rule->tuples_mask.src_port);
4686                 calc_y(tmp_y_s, rule->tuples.src_port,
4687                        rule->tuples_mask.src_port);
4688                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4689                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4690
4691                 return true;
4692         case BIT(INNER_DST_PORT):
4693                 calc_x(tmp_x_s, rule->tuples.dst_port,
4694                        rule->tuples_mask.dst_port);
4695                 calc_y(tmp_y_s, rule->tuples.dst_port,
4696                        rule->tuples_mask.dst_port);
4697                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4698                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4699
4700                 return true;
4701         default:
4702                 return false;
4703         }
4704 }
4705
4706 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4707                                  u8 vf_id, u8 network_port_id)
4708 {
4709         u32 port_number = 0;
4710
4711         if (port_type == HOST_PORT) {
4712                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4713                                 pf_id);
4714                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4715                                 vf_id);
4716                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4717         } else {
4718                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4719                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4720                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4721         }
4722
4723         return port_number;
4724 }
4725
4726 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4727                                        __le32 *key_x, __le32 *key_y,
4728                                        struct hclge_fd_rule *rule)
4729 {
4730         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4731         u8 cur_pos = 0, tuple_size, shift_bits;
4732         unsigned int i;
4733
4734         for (i = 0; i < MAX_META_DATA; i++) {
4735                 tuple_size = meta_data_key_info[i].key_length;
4736                 tuple_bit = key_cfg->meta_data_active & BIT(i);
4737
4738                 switch (tuple_bit) {
4739                 case BIT(ROCE_TYPE):
4740                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4741                         cur_pos += tuple_size;
4742                         break;
4743                 case BIT(DST_VPORT):
4744                         port_number = hclge_get_port_number(HOST_PORT, 0,
4745                                                             rule->vf_id, 0);
4746                         hnae3_set_field(meta_data,
4747                                         GENMASK(cur_pos + tuple_size, cur_pos),
4748                                         cur_pos, port_number);
4749                         cur_pos += tuple_size;
4750                         break;
4751                 default:
4752                         break;
4753                 }
4754         }
4755
4756         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4757         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4758         shift_bits = sizeof(meta_data) * 8 - cur_pos;
4759
4760         *key_x = cpu_to_le32(tmp_x << shift_bits);
4761         *key_y = cpu_to_le32(tmp_y << shift_bits);
4762 }
4763
4764 /* A complete key is combined with meta data key and tuple key.
4765  * Meta data key is stored at the MSB region, and tuple key is stored at
4766  * the LSB region, unused bits will be filled 0.
4767  */
4768 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4769                             struct hclge_fd_rule *rule)
4770 {
4771         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4772         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4773         u8 *cur_key_x, *cur_key_y;
4774         unsigned int i;
4775         int ret, tuple_size;
4776         u8 meta_data_region;
4777
4778         memset(key_x, 0, sizeof(key_x));
4779         memset(key_y, 0, sizeof(key_y));
4780         cur_key_x = key_x;
4781         cur_key_y = key_y;
4782
4783         for (i = 0 ; i < MAX_TUPLE; i++) {
4784                 bool tuple_valid;
4785                 u32 check_tuple;
4786
4787                 tuple_size = tuple_key_info[i].key_length / 8;
4788                 check_tuple = key_cfg->tuple_active & BIT(i);
4789
4790                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4791                                                      cur_key_y, rule);
4792                 if (tuple_valid) {
4793                         cur_key_x += tuple_size;
4794                         cur_key_y += tuple_size;
4795                 }
4796         }
4797
4798         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4799                         MAX_META_DATA_LENGTH / 8;
4800
4801         hclge_fd_convert_meta_data(key_cfg,
4802                                    (__le32 *)(key_x + meta_data_region),
4803                                    (__le32 *)(key_y + meta_data_region),
4804                                    rule);
4805
4806         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4807                                    true);
4808         if (ret) {
4809                 dev_err(&hdev->pdev->dev,
4810                         "fd key_y config fail, loc=%d, ret=%d\n",
4811                         rule->queue_id, ret);
4812                 return ret;
4813         }
4814
4815         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4816                                    true);
4817         if (ret)
4818                 dev_err(&hdev->pdev->dev,
4819                         "fd key_x config fail, loc=%d, ret=%d\n",
4820                         rule->queue_id, ret);
4821         return ret;
4822 }
4823
4824 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4825                                struct hclge_fd_rule *rule)
4826 {
4827         struct hclge_fd_ad_data ad_data;
4828
4829         ad_data.ad_id = rule->location;
4830
4831         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4832                 ad_data.drop_packet = true;
4833                 ad_data.forward_to_direct_queue = false;
4834                 ad_data.queue_id = 0;
4835         } else {
4836                 ad_data.drop_packet = false;
4837                 ad_data.forward_to_direct_queue = true;
4838                 ad_data.queue_id = rule->queue_id;
4839         }
4840
4841         ad_data.use_counter = false;
4842         ad_data.counter_id = 0;
4843
4844         ad_data.use_next_stage = false;
4845         ad_data.next_input_key = 0;
4846
4847         ad_data.write_rule_id_to_bd = true;
4848         ad_data.rule_id = rule->location;
4849
4850         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4851 }
4852
4853 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4854                                struct ethtool_rx_flow_spec *fs, u32 *unused)
4855 {
4856         struct ethtool_tcpip4_spec *tcp_ip4_spec;
4857         struct ethtool_usrip4_spec *usr_ip4_spec;
4858         struct ethtool_tcpip6_spec *tcp_ip6_spec;
4859         struct ethtool_usrip6_spec *usr_ip6_spec;
4860         struct ethhdr *ether_spec;
4861
4862         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4863                 return -EINVAL;
4864
4865         if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4866                 return -EOPNOTSUPP;
4867
4868         if ((fs->flow_type & FLOW_EXT) &&
4869             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4870                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4871                 return -EOPNOTSUPP;
4872         }
4873
4874         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4875         case SCTP_V4_FLOW:
4876         case TCP_V4_FLOW:
4877         case UDP_V4_FLOW:
4878                 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4879                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4880
4881                 if (!tcp_ip4_spec->ip4src)
4882                         *unused |= BIT(INNER_SRC_IP);
4883
4884                 if (!tcp_ip4_spec->ip4dst)
4885                         *unused |= BIT(INNER_DST_IP);
4886
4887                 if (!tcp_ip4_spec->psrc)
4888                         *unused |= BIT(INNER_SRC_PORT);
4889
4890                 if (!tcp_ip4_spec->pdst)
4891                         *unused |= BIT(INNER_DST_PORT);
4892
4893                 if (!tcp_ip4_spec->tos)
4894                         *unused |= BIT(INNER_IP_TOS);
4895
4896                 break;
4897         case IP_USER_FLOW:
4898                 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4899                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4900                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4901
4902                 if (!usr_ip4_spec->ip4src)
4903                         *unused |= BIT(INNER_SRC_IP);
4904
4905                 if (!usr_ip4_spec->ip4dst)
4906                         *unused |= BIT(INNER_DST_IP);
4907
4908                 if (!usr_ip4_spec->tos)
4909                         *unused |= BIT(INNER_IP_TOS);
4910
4911                 if (!usr_ip4_spec->proto)
4912                         *unused |= BIT(INNER_IP_PROTO);
4913
4914                 if (usr_ip4_spec->l4_4_bytes)
4915                         return -EOPNOTSUPP;
4916
4917                 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4918                         return -EOPNOTSUPP;
4919
4920                 break;
4921         case SCTP_V6_FLOW:
4922         case TCP_V6_FLOW:
4923         case UDP_V6_FLOW:
4924                 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4925                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4926                         BIT(INNER_IP_TOS);
4927
4928                 /* check whether src/dst ip address used */
4929                 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4930                     !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4931                         *unused |= BIT(INNER_SRC_IP);
4932
4933                 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4934                     !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4935                         *unused |= BIT(INNER_DST_IP);
4936
4937                 if (!tcp_ip6_spec->psrc)
4938                         *unused |= BIT(INNER_SRC_PORT);
4939
4940                 if (!tcp_ip6_spec->pdst)
4941                         *unused |= BIT(INNER_DST_PORT);
4942
4943                 if (tcp_ip6_spec->tclass)
4944                         return -EOPNOTSUPP;
4945
4946                 break;
4947         case IPV6_USER_FLOW:
4948                 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4949                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4950                         BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4951                         BIT(INNER_DST_PORT);
4952
4953                 /* check whether src/dst ip address used */
4954                 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4955                     !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4956                         *unused |= BIT(INNER_SRC_IP);
4957
4958                 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4959                     !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4960                         *unused |= BIT(INNER_DST_IP);
4961
4962                 if (!usr_ip6_spec->l4_proto)
4963                         *unused |= BIT(INNER_IP_PROTO);
4964
4965                 if (usr_ip6_spec->tclass)
4966                         return -EOPNOTSUPP;
4967
4968                 if (usr_ip6_spec->l4_4_bytes)
4969                         return -EOPNOTSUPP;
4970
4971                 break;
4972         case ETHER_FLOW:
4973                 ether_spec = &fs->h_u.ether_spec;
4974                 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4975                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4976                         BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4977
4978                 if (is_zero_ether_addr(ether_spec->h_source))
4979                         *unused |= BIT(INNER_SRC_MAC);
4980
4981                 if (is_zero_ether_addr(ether_spec->h_dest))
4982                         *unused |= BIT(INNER_DST_MAC);
4983
4984                 if (!ether_spec->h_proto)
4985                         *unused |= BIT(INNER_ETH_TYPE);
4986
4987                 break;
4988         default:
4989                 return -EOPNOTSUPP;
4990         }
4991
4992         if ((fs->flow_type & FLOW_EXT)) {
4993                 if (fs->h_ext.vlan_etype)
4994                         return -EOPNOTSUPP;
4995                 if (!fs->h_ext.vlan_tci)
4996                         *unused |= BIT(INNER_VLAN_TAG_FST);
4997
4998                 if (fs->m_ext.vlan_tci) {
4999                         if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
5000                                 return -EINVAL;
5001                 }
5002         } else {
5003                 *unused |= BIT(INNER_VLAN_TAG_FST);
5004         }
5005
5006         if (fs->flow_type & FLOW_MAC_EXT) {
5007                 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
5008                         return -EOPNOTSUPP;
5009
5010                 if (is_zero_ether_addr(fs->h_ext.h_dest))
5011                         *unused |= BIT(INNER_DST_MAC);
5012                 else
5013                         *unused &= ~(BIT(INNER_DST_MAC));
5014         }
5015
5016         return 0;
5017 }
5018
5019 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5020 {
5021         struct hclge_fd_rule *rule = NULL;
5022         struct hlist_node *node2;
5023
5024         spin_lock_bh(&hdev->fd_rule_lock);
5025         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5026                 if (rule->location >= location)
5027                         break;
5028         }
5029
5030         spin_unlock_bh(&hdev->fd_rule_lock);
5031
5032         return  rule && rule->location == location;
5033 }
5034
5035 /* make sure being called after lock up with fd_rule_lock */
5036 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5037                                      struct hclge_fd_rule *new_rule,
5038                                      u16 location,
5039                                      bool is_add)
5040 {
5041         struct hclge_fd_rule *rule = NULL, *parent = NULL;
5042         struct hlist_node *node2;
5043
5044         if (is_add && !new_rule)
5045                 return -EINVAL;
5046
5047         hlist_for_each_entry_safe(rule, node2,
5048                                   &hdev->fd_rule_list, rule_node) {
5049                 if (rule->location >= location)
5050                         break;
5051                 parent = rule;
5052         }
5053
5054         if (rule && rule->location == location) {
5055                 hlist_del(&rule->rule_node);
5056                 kfree(rule);
5057                 hdev->hclge_fd_rule_num--;
5058
5059                 if (!is_add) {
5060                         if (!hdev->hclge_fd_rule_num)
5061                                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5062                         clear_bit(location, hdev->fd_bmap);
5063
5064                         return 0;
5065                 }
5066         } else if (!is_add) {
5067                 dev_err(&hdev->pdev->dev,
5068                         "delete fail, rule %d is inexistent\n",
5069                         location);
5070                 return -EINVAL;
5071         }
5072
5073         INIT_HLIST_NODE(&new_rule->rule_node);
5074
5075         if (parent)
5076                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5077         else
5078                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5079
5080         set_bit(location, hdev->fd_bmap);
5081         hdev->hclge_fd_rule_num++;
5082         hdev->fd_active_type = new_rule->rule_type;
5083
5084         return 0;
5085 }
5086
5087 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5088                               struct ethtool_rx_flow_spec *fs,
5089                               struct hclge_fd_rule *rule)
5090 {
5091         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5092
5093         switch (flow_type) {
5094         case SCTP_V4_FLOW:
5095         case TCP_V4_FLOW:
5096         case UDP_V4_FLOW:
5097                 rule->tuples.src_ip[IPV4_INDEX] =
5098                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5099                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5100                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5101
5102                 rule->tuples.dst_ip[IPV4_INDEX] =
5103                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5104                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5105                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5106
5107                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5108                 rule->tuples_mask.src_port =
5109                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5110
5111                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5112                 rule->tuples_mask.dst_port =
5113                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5114
5115                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5116                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5117
5118                 rule->tuples.ether_proto = ETH_P_IP;
5119                 rule->tuples_mask.ether_proto = 0xFFFF;
5120
5121                 break;
5122         case IP_USER_FLOW:
5123                 rule->tuples.src_ip[IPV4_INDEX] =
5124                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5125                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5126                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5127
5128                 rule->tuples.dst_ip[IPV4_INDEX] =
5129                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5130                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5131                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5132
5133                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5134                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5135
5136                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5137                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5138
5139                 rule->tuples.ether_proto = ETH_P_IP;
5140                 rule->tuples_mask.ether_proto = 0xFFFF;
5141
5142                 break;
5143         case SCTP_V6_FLOW:
5144         case TCP_V6_FLOW:
5145         case UDP_V6_FLOW:
5146                 be32_to_cpu_array(rule->tuples.src_ip,
5147                                   fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5148                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5149                                   fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5150
5151                 be32_to_cpu_array(rule->tuples.dst_ip,
5152                                   fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5153                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5154                                   fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5155
5156                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5157                 rule->tuples_mask.src_port =
5158                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5159
5160                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5161                 rule->tuples_mask.dst_port =
5162                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5163
5164                 rule->tuples.ether_proto = ETH_P_IPV6;
5165                 rule->tuples_mask.ether_proto = 0xFFFF;
5166
5167                 break;
5168         case IPV6_USER_FLOW:
5169                 be32_to_cpu_array(rule->tuples.src_ip,
5170                                   fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5171                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5172                                   fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5173
5174                 be32_to_cpu_array(rule->tuples.dst_ip,
5175                                   fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5176                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5177                                   fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5178
5179                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5180                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5181
5182                 rule->tuples.ether_proto = ETH_P_IPV6;
5183                 rule->tuples_mask.ether_proto = 0xFFFF;
5184
5185                 break;
5186         case ETHER_FLOW:
5187                 ether_addr_copy(rule->tuples.src_mac,
5188                                 fs->h_u.ether_spec.h_source);
5189                 ether_addr_copy(rule->tuples_mask.src_mac,
5190                                 fs->m_u.ether_spec.h_source);
5191
5192                 ether_addr_copy(rule->tuples.dst_mac,
5193                                 fs->h_u.ether_spec.h_dest);
5194                 ether_addr_copy(rule->tuples_mask.dst_mac,
5195                                 fs->m_u.ether_spec.h_dest);
5196
5197                 rule->tuples.ether_proto =
5198                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5199                 rule->tuples_mask.ether_proto =
5200                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5201
5202                 break;
5203         default:
5204                 return -EOPNOTSUPP;
5205         }
5206
5207         switch (flow_type) {
5208         case SCTP_V4_FLOW:
5209         case SCTP_V6_FLOW:
5210                 rule->tuples.ip_proto = IPPROTO_SCTP;
5211                 rule->tuples_mask.ip_proto = 0xFF;
5212                 break;
5213         case TCP_V4_FLOW:
5214         case TCP_V6_FLOW:
5215                 rule->tuples.ip_proto = IPPROTO_TCP;
5216                 rule->tuples_mask.ip_proto = 0xFF;
5217                 break;
5218         case UDP_V4_FLOW:
5219         case UDP_V6_FLOW:
5220                 rule->tuples.ip_proto = IPPROTO_UDP;
5221                 rule->tuples_mask.ip_proto = 0xFF;
5222                 break;
5223         default:
5224                 break;
5225         }
5226
5227         if ((fs->flow_type & FLOW_EXT)) {
5228                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5229                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5230         }
5231
5232         if (fs->flow_type & FLOW_MAC_EXT) {
5233                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5234                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5235         }
5236
5237         return 0;
5238 }
5239
5240 /* make sure being called after lock up with fd_rule_lock */
5241 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5242                                 struct hclge_fd_rule *rule)
5243 {
5244         int ret;
5245
5246         if (!rule) {
5247                 dev_err(&hdev->pdev->dev,
5248                         "The flow director rule is NULL\n");
5249                 return -EINVAL;
5250         }
5251
5252         /* it will never fail here, so needn't to check return value */
5253         hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5254
5255         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5256         if (ret)
5257                 goto clear_rule;
5258
5259         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5260         if (ret)
5261                 goto clear_rule;
5262
5263         return 0;
5264
5265 clear_rule:
5266         hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5267         return ret;
5268 }
5269
5270 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5271                               struct ethtool_rxnfc *cmd)
5272 {
5273         struct hclge_vport *vport = hclge_get_vport(handle);
5274         struct hclge_dev *hdev = vport->back;
5275         u16 dst_vport_id = 0, q_index = 0;
5276         struct ethtool_rx_flow_spec *fs;
5277         struct hclge_fd_rule *rule;
5278         u32 unused = 0;
5279         u8 action;
5280         int ret;
5281
5282         if (!hnae3_dev_fd_supported(hdev))
5283                 return -EOPNOTSUPP;
5284
5285         if (!hdev->fd_en) {
5286                 dev_warn(&hdev->pdev->dev,
5287                          "Please enable flow director first\n");
5288                 return -EOPNOTSUPP;
5289         }
5290
5291         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5292
5293         ret = hclge_fd_check_spec(hdev, fs, &unused);
5294         if (ret) {
5295                 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5296                 return ret;
5297         }
5298
5299         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5300                 action = HCLGE_FD_ACTION_DROP_PACKET;
5301         } else {
5302                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5303                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5304                 u16 tqps;
5305
5306                 if (vf > hdev->num_req_vfs) {
5307                         dev_err(&hdev->pdev->dev,
5308                                 "Error: vf id (%d) > max vf num (%d)\n",
5309                                 vf, hdev->num_req_vfs);
5310                         return -EINVAL;
5311                 }
5312
5313                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5314                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5315
5316                 if (ring >= tqps) {
5317                         dev_err(&hdev->pdev->dev,
5318                                 "Error: queue id (%d) > max tqp num (%d)\n",
5319                                 ring, tqps - 1);
5320                         return -EINVAL;
5321                 }
5322
5323                 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5324                 q_index = ring;
5325         }
5326
5327         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5328         if (!rule)
5329                 return -ENOMEM;
5330
5331         ret = hclge_fd_get_tuple(hdev, fs, rule);
5332         if (ret) {
5333                 kfree(rule);
5334                 return ret;
5335         }
5336
5337         rule->flow_type = fs->flow_type;
5338
5339         rule->location = fs->location;
5340         rule->unused_tuple = unused;
5341         rule->vf_id = dst_vport_id;
5342         rule->queue_id = q_index;
5343         rule->action = action;
5344         rule->rule_type = HCLGE_FD_EP_ACTIVE;
5345
5346         /* to avoid rule conflict, when user configure rule by ethtool,
5347          * we need to clear all arfs rules
5348          */
5349         hclge_clear_arfs_rules(handle);
5350
5351         spin_lock_bh(&hdev->fd_rule_lock);
5352         ret = hclge_fd_config_rule(hdev, rule);
5353
5354         spin_unlock_bh(&hdev->fd_rule_lock);
5355
5356         return ret;
5357 }
5358
5359 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5360                               struct ethtool_rxnfc *cmd)
5361 {
5362         struct hclge_vport *vport = hclge_get_vport(handle);
5363         struct hclge_dev *hdev = vport->back;
5364         struct ethtool_rx_flow_spec *fs;
5365         int ret;
5366
5367         if (!hnae3_dev_fd_supported(hdev))
5368                 return -EOPNOTSUPP;
5369
5370         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5371
5372         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5373                 return -EINVAL;
5374
5375         if (!hclge_fd_rule_exist(hdev, fs->location)) {
5376                 dev_err(&hdev->pdev->dev,
5377                         "Delete fail, rule %d is inexistent\n", fs->location);
5378                 return -ENOENT;
5379         }
5380
5381         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5382                                    NULL, false);
5383         if (ret)
5384                 return ret;
5385
5386         spin_lock_bh(&hdev->fd_rule_lock);
5387         ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5388
5389         spin_unlock_bh(&hdev->fd_rule_lock);
5390
5391         return ret;
5392 }
5393
5394 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5395                                      bool clear_list)
5396 {
5397         struct hclge_vport *vport = hclge_get_vport(handle);
5398         struct hclge_dev *hdev = vport->back;
5399         struct hclge_fd_rule *rule;
5400         struct hlist_node *node;
5401         u16 location;
5402
5403         if (!hnae3_dev_fd_supported(hdev))
5404                 return;
5405
5406         spin_lock_bh(&hdev->fd_rule_lock);
5407         for_each_set_bit(location, hdev->fd_bmap,
5408                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5409                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5410                                      NULL, false);
5411
5412         if (clear_list) {
5413                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5414                                           rule_node) {
5415                         hlist_del(&rule->rule_node);
5416                         kfree(rule);
5417                 }
5418                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5419                 hdev->hclge_fd_rule_num = 0;
5420                 bitmap_zero(hdev->fd_bmap,
5421                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5422         }
5423
5424         spin_unlock_bh(&hdev->fd_rule_lock);
5425 }
5426
5427 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5428 {
5429         struct hclge_vport *vport = hclge_get_vport(handle);
5430         struct hclge_dev *hdev = vport->back;
5431         struct hclge_fd_rule *rule;
5432         struct hlist_node *node;
5433         int ret;
5434
5435         /* Return ok here, because reset error handling will check this
5436          * return value. If error is returned here, the reset process will
5437          * fail.
5438          */
5439         if (!hnae3_dev_fd_supported(hdev))
5440                 return 0;
5441
5442         /* if fd is disabled, should not restore it when reset */
5443         if (!hdev->fd_en)
5444                 return 0;
5445
5446         spin_lock_bh(&hdev->fd_rule_lock);
5447         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5448                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5449                 if (!ret)
5450                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5451
5452                 if (ret) {
5453                         dev_warn(&hdev->pdev->dev,
5454                                  "Restore rule %d failed, remove it\n",
5455                                  rule->location);
5456                         clear_bit(rule->location, hdev->fd_bmap);
5457                         hlist_del(&rule->rule_node);
5458                         kfree(rule);
5459                         hdev->hclge_fd_rule_num--;
5460                 }
5461         }
5462
5463         if (hdev->hclge_fd_rule_num)
5464                 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5465
5466         spin_unlock_bh(&hdev->fd_rule_lock);
5467
5468         return 0;
5469 }
5470
5471 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5472                                  struct ethtool_rxnfc *cmd)
5473 {
5474         struct hclge_vport *vport = hclge_get_vport(handle);
5475         struct hclge_dev *hdev = vport->back;
5476
5477         if (!hnae3_dev_fd_supported(hdev))
5478                 return -EOPNOTSUPP;
5479
5480         cmd->rule_cnt = hdev->hclge_fd_rule_num;
5481         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5482
5483         return 0;
5484 }
5485
5486 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5487                                   struct ethtool_rxnfc *cmd)
5488 {
5489         struct hclge_vport *vport = hclge_get_vport(handle);
5490         struct hclge_fd_rule *rule = NULL;
5491         struct hclge_dev *hdev = vport->back;
5492         struct ethtool_rx_flow_spec *fs;
5493         struct hlist_node *node2;
5494
5495         if (!hnae3_dev_fd_supported(hdev))
5496                 return -EOPNOTSUPP;
5497
5498         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5499
5500         spin_lock_bh(&hdev->fd_rule_lock);
5501
5502         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5503                 if (rule->location >= fs->location)
5504                         break;
5505         }
5506
5507         if (!rule || fs->location != rule->location) {
5508                 spin_unlock_bh(&hdev->fd_rule_lock);
5509
5510                 return -ENOENT;
5511         }
5512
5513         fs->flow_type = rule->flow_type;
5514         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5515         case SCTP_V4_FLOW:
5516         case TCP_V4_FLOW:
5517         case UDP_V4_FLOW:
5518                 fs->h_u.tcp_ip4_spec.ip4src =
5519                                 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5520                 fs->m_u.tcp_ip4_spec.ip4src =
5521                         rule->unused_tuple & BIT(INNER_SRC_IP) ?
5522                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5523
5524                 fs->h_u.tcp_ip4_spec.ip4dst =
5525                                 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5526                 fs->m_u.tcp_ip4_spec.ip4dst =
5527                         rule->unused_tuple & BIT(INNER_DST_IP) ?
5528                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5529
5530                 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5531                 fs->m_u.tcp_ip4_spec.psrc =
5532                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5533                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5534
5535                 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5536                 fs->m_u.tcp_ip4_spec.pdst =
5537                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5538                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5539
5540                 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5541                 fs->m_u.tcp_ip4_spec.tos =
5542                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5543                                 0 : rule->tuples_mask.ip_tos;
5544
5545                 break;
5546         case IP_USER_FLOW:
5547                 fs->h_u.usr_ip4_spec.ip4src =
5548                                 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5549                 fs->m_u.tcp_ip4_spec.ip4src =
5550                         rule->unused_tuple & BIT(INNER_SRC_IP) ?
5551                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5552
5553                 fs->h_u.usr_ip4_spec.ip4dst =
5554                                 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5555                 fs->m_u.usr_ip4_spec.ip4dst =
5556                         rule->unused_tuple & BIT(INNER_DST_IP) ?
5557                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5558
5559                 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5560                 fs->m_u.usr_ip4_spec.tos =
5561                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5562                                 0 : rule->tuples_mask.ip_tos;
5563
5564                 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5565                 fs->m_u.usr_ip4_spec.proto =
5566                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5567                                 0 : rule->tuples_mask.ip_proto;
5568
5569                 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5570
5571                 break;
5572         case SCTP_V6_FLOW:
5573         case TCP_V6_FLOW:
5574         case UDP_V6_FLOW:
5575                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5576                                   rule->tuples.src_ip, IPV6_SIZE);
5577                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5578                         memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5579                                sizeof(int) * IPV6_SIZE);
5580                 else
5581                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5582                                           rule->tuples_mask.src_ip, IPV6_SIZE);
5583
5584                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5585                                   rule->tuples.dst_ip, IPV6_SIZE);
5586                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5587                         memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
5588                                sizeof(int) * IPV6_SIZE);
5589                 else
5590                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5591                                           rule->tuples_mask.dst_ip, IPV6_SIZE);
5592
5593                 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5594                 fs->m_u.tcp_ip6_spec.psrc =
5595                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5596                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5597
5598                 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5599                 fs->m_u.tcp_ip6_spec.pdst =
5600                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5601                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5602
5603                 break;
5604         case IPV6_USER_FLOW:
5605                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5606                                   rule->tuples.src_ip, IPV6_SIZE);
5607                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5608                         memset(fs->m_u.usr_ip6_spec.ip6src, 0,
5609                                sizeof(int) * IPV6_SIZE);
5610                 else
5611                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5612                                           rule->tuples_mask.src_ip, IPV6_SIZE);
5613
5614                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5615                                   rule->tuples.dst_ip, IPV6_SIZE);
5616                 if (rule->unused_tuple & BIT(INNER_DST_IP))
5617                         memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
5618                                sizeof(int) * IPV6_SIZE);
5619                 else
5620                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5621                                           rule->tuples_mask.dst_ip, IPV6_SIZE);
5622
5623                 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5624                 fs->m_u.usr_ip6_spec.l4_proto =
5625                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5626                                 0 : rule->tuples_mask.ip_proto;
5627
5628                 break;
5629         case ETHER_FLOW:
5630                 ether_addr_copy(fs->h_u.ether_spec.h_source,
5631                                 rule->tuples.src_mac);
5632                 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5633                         eth_zero_addr(fs->m_u.ether_spec.h_source);
5634                 else
5635                         ether_addr_copy(fs->m_u.ether_spec.h_source,
5636                                         rule->tuples_mask.src_mac);
5637
5638                 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5639                                 rule->tuples.dst_mac);
5640                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5641                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5642                 else
5643                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5644                                         rule->tuples_mask.dst_mac);
5645
5646                 fs->h_u.ether_spec.h_proto =
5647                                 cpu_to_be16(rule->tuples.ether_proto);
5648                 fs->m_u.ether_spec.h_proto =
5649                                 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5650                                 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5651
5652                 break;
5653         default:
5654                 spin_unlock_bh(&hdev->fd_rule_lock);
5655                 return -EOPNOTSUPP;
5656         }
5657
5658         if (fs->flow_type & FLOW_EXT) {
5659                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5660                 fs->m_ext.vlan_tci =
5661                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5662                                 cpu_to_be16(VLAN_VID_MASK) :
5663                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5664         }
5665
5666         if (fs->flow_type & FLOW_MAC_EXT) {
5667                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5668                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5669                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
5670                 else
5671                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
5672                                         rule->tuples_mask.dst_mac);
5673         }
5674
5675         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5676                 fs->ring_cookie = RX_CLS_FLOW_DISC;
5677         } else {
5678                 u64 vf_id;
5679
5680                 fs->ring_cookie = rule->queue_id;
5681                 vf_id = rule->vf_id;
5682                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5683                 fs->ring_cookie |= vf_id;
5684         }
5685
5686         spin_unlock_bh(&hdev->fd_rule_lock);
5687
5688         return 0;
5689 }
5690
5691 static int hclge_get_all_rules(struct hnae3_handle *handle,
5692                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
5693 {
5694         struct hclge_vport *vport = hclge_get_vport(handle);
5695         struct hclge_dev *hdev = vport->back;
5696         struct hclge_fd_rule *rule;
5697         struct hlist_node *node2;
5698         int cnt = 0;
5699
5700         if (!hnae3_dev_fd_supported(hdev))
5701                 return -EOPNOTSUPP;
5702
5703         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5704
5705         spin_lock_bh(&hdev->fd_rule_lock);
5706         hlist_for_each_entry_safe(rule, node2,
5707                                   &hdev->fd_rule_list, rule_node) {
5708                 if (cnt == cmd->rule_cnt) {
5709                         spin_unlock_bh(&hdev->fd_rule_lock);
5710                         return -EMSGSIZE;
5711                 }
5712
5713                 rule_locs[cnt] = rule->location;
5714                 cnt++;
5715         }
5716
5717         spin_unlock_bh(&hdev->fd_rule_lock);
5718
5719         cmd->rule_cnt = cnt;
5720
5721         return 0;
5722 }
5723
5724 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
5725                                      struct hclge_fd_rule_tuples *tuples)
5726 {
5727         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
5728         tuples->ip_proto = fkeys->basic.ip_proto;
5729         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
5730
5731         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
5732                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
5733                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
5734         } else {
5735                 memcpy(tuples->src_ip,
5736                        fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
5737                        sizeof(tuples->src_ip));
5738                 memcpy(tuples->dst_ip,
5739                        fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
5740                        sizeof(tuples->dst_ip));
5741         }
5742 }
5743
5744 /* traverse all rules, check whether an existed rule has the same tuples */
5745 static struct hclge_fd_rule *
5746 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
5747                           const struct hclge_fd_rule_tuples *tuples)
5748 {
5749         struct hclge_fd_rule *rule = NULL;
5750         struct hlist_node *node;
5751
5752         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5753                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
5754                         return rule;
5755         }
5756
5757         return NULL;
5758 }
5759
5760 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
5761                                      struct hclge_fd_rule *rule)
5762 {
5763         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5764                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
5765                              BIT(INNER_SRC_PORT);
5766         rule->action = 0;
5767         rule->vf_id = 0;
5768         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
5769         if (tuples->ether_proto == ETH_P_IP) {
5770                 if (tuples->ip_proto == IPPROTO_TCP)
5771                         rule->flow_type = TCP_V4_FLOW;
5772                 else
5773                         rule->flow_type = UDP_V4_FLOW;
5774         } else {
5775                 if (tuples->ip_proto == IPPROTO_TCP)
5776                         rule->flow_type = TCP_V6_FLOW;
5777                 else
5778                         rule->flow_type = UDP_V6_FLOW;
5779         }
5780         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
5781         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
5782 }
5783
5784 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
5785                                       u16 flow_id, struct flow_keys *fkeys)
5786 {
5787         struct hclge_vport *vport = hclge_get_vport(handle);
5788         struct hclge_fd_rule_tuples new_tuples;
5789         struct hclge_dev *hdev = vport->back;
5790         struct hclge_fd_rule *rule;
5791         u16 tmp_queue_id;
5792         u16 bit_id;
5793         int ret;
5794
5795         if (!hnae3_dev_fd_supported(hdev))
5796                 return -EOPNOTSUPP;
5797
5798         memset(&new_tuples, 0, sizeof(new_tuples));
5799         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
5800
5801         spin_lock_bh(&hdev->fd_rule_lock);
5802
5803         /* when there is already fd rule existed add by user,
5804          * arfs should not work
5805          */
5806         if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
5807                 spin_unlock_bh(&hdev->fd_rule_lock);
5808
5809                 return -EOPNOTSUPP;
5810         }
5811
5812         /* check is there flow director filter existed for this flow,
5813          * if not, create a new filter for it;
5814          * if filter exist with different queue id, modify the filter;
5815          * if filter exist with same queue id, do nothing
5816          */
5817         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
5818         if (!rule) {
5819                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
5820                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5821                         spin_unlock_bh(&hdev->fd_rule_lock);
5822
5823                         return -ENOSPC;
5824                 }
5825
5826                 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
5827                 if (!rule) {
5828                         spin_unlock_bh(&hdev->fd_rule_lock);
5829
5830                         return -ENOMEM;
5831                 }
5832
5833                 set_bit(bit_id, hdev->fd_bmap);
5834                 rule->location = bit_id;
5835                 rule->flow_id = flow_id;
5836                 rule->queue_id = queue_id;
5837                 hclge_fd_build_arfs_rule(&new_tuples, rule);
5838                 ret = hclge_fd_config_rule(hdev, rule);
5839
5840                 spin_unlock_bh(&hdev->fd_rule_lock);
5841
5842                 if (ret)
5843                         return ret;
5844
5845                 return rule->location;
5846         }
5847
5848         spin_unlock_bh(&hdev->fd_rule_lock);
5849
5850         if (rule->queue_id == queue_id)
5851                 return rule->location;
5852
5853         tmp_queue_id = rule->queue_id;
5854         rule->queue_id = queue_id;
5855         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5856         if (ret) {
5857                 rule->queue_id = tmp_queue_id;
5858                 return ret;
5859         }
5860
5861         return rule->location;
5862 }
5863
5864 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
5865 {
5866 #ifdef CONFIG_RFS_ACCEL
5867         struct hnae3_handle *handle = &hdev->vport[0].nic;
5868         struct hclge_fd_rule *rule;
5869         struct hlist_node *node;
5870         HLIST_HEAD(del_list);
5871
5872         spin_lock_bh(&hdev->fd_rule_lock);
5873         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
5874                 spin_unlock_bh(&hdev->fd_rule_lock);
5875                 return;
5876         }
5877         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5878                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
5879                                         rule->flow_id, rule->location)) {
5880                         hlist_del_init(&rule->rule_node);
5881                         hlist_add_head(&rule->rule_node, &del_list);
5882                         hdev->hclge_fd_rule_num--;
5883                         clear_bit(rule->location, hdev->fd_bmap);
5884                 }
5885         }
5886         spin_unlock_bh(&hdev->fd_rule_lock);
5887
5888         hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
5889                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5890                                      rule->location, NULL, false);
5891                 kfree(rule);
5892         }
5893 #endif
5894 }
5895
5896 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
5897 {
5898 #ifdef CONFIG_RFS_ACCEL
5899         struct hclge_vport *vport = hclge_get_vport(handle);
5900         struct hclge_dev *hdev = vport->back;
5901
5902         if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
5903                 hclge_del_all_fd_entries(handle, true);
5904 #endif
5905 }
5906
5907 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5908 {
5909         struct hclge_vport *vport = hclge_get_vport(handle);
5910         struct hclge_dev *hdev = vport->back;
5911
5912         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5913                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5914 }
5915
5916 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5917 {
5918         struct hclge_vport *vport = hclge_get_vport(handle);
5919         struct hclge_dev *hdev = vport->back;
5920
5921         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5922 }
5923
5924 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5925 {
5926         struct hclge_vport *vport = hclge_get_vport(handle);
5927         struct hclge_dev *hdev = vport->back;
5928
5929         return hdev->rst_stats.hw_reset_done_cnt;
5930 }
5931
5932 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5933 {
5934         struct hclge_vport *vport = hclge_get_vport(handle);
5935         struct hclge_dev *hdev = vport->back;
5936         bool clear;
5937
5938         hdev->fd_en = enable;
5939         clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE ? true : false;
5940         if (!enable)
5941                 hclge_del_all_fd_entries(handle, clear);
5942         else
5943                 hclge_restore_fd_entries(handle);
5944 }
5945
5946 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5947 {
5948         struct hclge_desc desc;
5949         struct hclge_config_mac_mode_cmd *req =
5950                 (struct hclge_config_mac_mode_cmd *)desc.data;
5951         u32 loop_en = 0;
5952         int ret;
5953
5954         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5955
5956         if (enable) {
5957                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
5958                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
5959                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
5960                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
5961                 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
5962                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
5963                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
5964                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
5965                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
5966                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
5967         }
5968
5969         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5970
5971         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5972         if (ret)
5973                 dev_err(&hdev->pdev->dev,
5974                         "mac enable fail, ret =%d.\n", ret);
5975 }
5976
5977 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5978 {
5979         struct hclge_config_mac_mode_cmd *req;
5980         struct hclge_desc desc;
5981         u32 loop_en;
5982         int ret;
5983
5984         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5985         /* 1 Read out the MAC mode config at first */
5986         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5987         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5988         if (ret) {
5989                 dev_err(&hdev->pdev->dev,
5990                         "mac loopback get fail, ret =%d.\n", ret);
5991                 return ret;
5992         }
5993
5994         /* 2 Then setup the loopback flag */
5995         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5996         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5997         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5998         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5999
6000         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6001
6002         /* 3 Config mac work mode with loopback flag
6003          * and its original configure parameters
6004          */
6005         hclge_cmd_reuse_desc(&desc, false);
6006         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6007         if (ret)
6008                 dev_err(&hdev->pdev->dev,
6009                         "mac loopback set fail, ret =%d.\n", ret);
6010         return ret;
6011 }
6012
6013 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6014                                      enum hnae3_loop loop_mode)
6015 {
6016 #define HCLGE_SERDES_RETRY_MS   10
6017 #define HCLGE_SERDES_RETRY_NUM  100
6018
6019 #define HCLGE_MAC_LINK_STATUS_MS   10
6020 #define HCLGE_MAC_LINK_STATUS_NUM  100
6021 #define HCLGE_MAC_LINK_STATUS_DOWN 0
6022 #define HCLGE_MAC_LINK_STATUS_UP   1
6023
6024         struct hclge_serdes_lb_cmd *req;
6025         struct hclge_desc desc;
6026         int mac_link_ret = 0;
6027         int ret, i = 0;
6028         u8 loop_mode_b;
6029
6030         req = (struct hclge_serdes_lb_cmd *)desc.data;
6031         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6032
6033         switch (loop_mode) {
6034         case HNAE3_LOOP_SERIAL_SERDES:
6035                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6036                 break;
6037         case HNAE3_LOOP_PARALLEL_SERDES:
6038                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6039                 break;
6040         default:
6041                 dev_err(&hdev->pdev->dev,
6042                         "unsupported serdes loopback mode %d\n", loop_mode);
6043                 return -ENOTSUPP;
6044         }
6045
6046         if (en) {
6047                 req->enable = loop_mode_b;
6048                 req->mask = loop_mode_b;
6049                 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
6050         } else {
6051                 req->mask = loop_mode_b;
6052                 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
6053         }
6054
6055         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6056         if (ret) {
6057                 dev_err(&hdev->pdev->dev,
6058                         "serdes loopback set fail, ret = %d\n", ret);
6059                 return ret;
6060         }
6061
6062         do {
6063                 msleep(HCLGE_SERDES_RETRY_MS);
6064                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6065                                            true);
6066                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6067                 if (ret) {
6068                         dev_err(&hdev->pdev->dev,
6069                                 "serdes loopback get, ret = %d\n", ret);
6070                         return ret;
6071                 }
6072         } while (++i < HCLGE_SERDES_RETRY_NUM &&
6073                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
6074
6075         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6076                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6077                 return -EBUSY;
6078         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6079                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6080                 return -EIO;
6081         }
6082
6083         hclge_cfg_mac_mode(hdev, en);
6084
6085         i = 0;
6086         do {
6087                 /* serdes Internal loopback, independent of the network cable.*/
6088                 msleep(HCLGE_MAC_LINK_STATUS_MS);
6089                 ret = hclge_get_mac_link_status(hdev);
6090                 if (ret == mac_link_ret)
6091                         return 0;
6092         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6093
6094         dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
6095
6096         return -EBUSY;
6097 }
6098
6099 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6100                             int stream_id, bool enable)
6101 {
6102         struct hclge_desc desc;
6103         struct hclge_cfg_com_tqp_queue_cmd *req =
6104                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6105         int ret;
6106
6107         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6108         req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6109         req->stream_id = cpu_to_le16(stream_id);
6110         if (enable)
6111                 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6112
6113         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6114         if (ret)
6115                 dev_err(&hdev->pdev->dev,
6116                         "Tqp enable fail, status =%d.\n", ret);
6117         return ret;
6118 }
6119
6120 static int hclge_set_loopback(struct hnae3_handle *handle,
6121                               enum hnae3_loop loop_mode, bool en)
6122 {
6123         struct hclge_vport *vport = hclge_get_vport(handle);
6124         struct hnae3_knic_private_info *kinfo;
6125         struct hclge_dev *hdev = vport->back;
6126         int i, ret;
6127
6128         switch (loop_mode) {
6129         case HNAE3_LOOP_APP:
6130                 ret = hclge_set_app_loopback(hdev, en);
6131                 break;
6132         case HNAE3_LOOP_SERIAL_SERDES:
6133         case HNAE3_LOOP_PARALLEL_SERDES:
6134                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6135                 break;
6136         default:
6137                 ret = -ENOTSUPP;
6138                 dev_err(&hdev->pdev->dev,
6139                         "loop_mode %d is not supported\n", loop_mode);
6140                 break;
6141         }
6142
6143         if (ret)
6144                 return ret;
6145
6146         kinfo = &vport->nic.kinfo;
6147         for (i = 0; i < kinfo->num_tqps; i++) {
6148                 ret = hclge_tqp_enable(hdev, i, 0, en);
6149                 if (ret)
6150                         return ret;
6151         }
6152
6153         return 0;
6154 }
6155
6156 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6157 {
6158         struct hclge_vport *vport = hclge_get_vport(handle);
6159         struct hnae3_knic_private_info *kinfo;
6160         struct hnae3_queue *queue;
6161         struct hclge_tqp *tqp;
6162         int i;
6163
6164         kinfo = &vport->nic.kinfo;
6165         for (i = 0; i < kinfo->num_tqps; i++) {
6166                 queue = handle->kinfo.tqp[i];
6167                 tqp = container_of(queue, struct hclge_tqp, q);
6168                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6169         }
6170 }
6171
6172 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6173 {
6174         struct hclge_vport *vport = hclge_get_vport(handle);
6175         struct hclge_dev *hdev = vport->back;
6176
6177         if (enable) {
6178                 hclge_task_schedule(hdev);
6179         } else {
6180                 /* Set the DOWN flag here to disable the service to be
6181                  * scheduled again
6182                  */
6183                 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6184                 cancel_delayed_work_sync(&hdev->service_task);
6185                 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
6186         }
6187 }
6188
6189 static int hclge_ae_start(struct hnae3_handle *handle)
6190 {
6191         struct hclge_vport *vport = hclge_get_vport(handle);
6192         struct hclge_dev *hdev = vport->back;
6193
6194         /* mac enable */
6195         hclge_cfg_mac_mode(hdev, true);
6196         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6197         hdev->hw.mac.link = 0;
6198
6199         /* reset tqp stats */
6200         hclge_reset_tqp_stats(handle);
6201
6202         hclge_mac_start_phy(hdev);
6203
6204         return 0;
6205 }
6206
6207 static void hclge_ae_stop(struct hnae3_handle *handle)
6208 {
6209         struct hclge_vport *vport = hclge_get_vport(handle);
6210         struct hclge_dev *hdev = vport->back;
6211         int i;
6212
6213         set_bit(HCLGE_STATE_DOWN, &hdev->state);
6214
6215         hclge_clear_arfs_rules(handle);
6216
6217         /* If it is not PF reset, the firmware will disable the MAC,
6218          * so it only need to stop phy here.
6219          */
6220         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6221             hdev->reset_type != HNAE3_FUNC_RESET) {
6222                 hclge_mac_stop_phy(hdev);
6223                 return;
6224         }
6225
6226         for (i = 0; i < handle->kinfo.num_tqps; i++)
6227                 hclge_reset_tqp(handle, i);
6228
6229         /* Mac disable */
6230         hclge_cfg_mac_mode(hdev, false);
6231
6232         hclge_mac_stop_phy(hdev);
6233
6234         /* reset tqp stats */
6235         hclge_reset_tqp_stats(handle);
6236         hclge_update_link_status(hdev);
6237 }
6238
6239 int hclge_vport_start(struct hclge_vport *vport)
6240 {
6241         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6242         vport->last_active_jiffies = jiffies;
6243         return 0;
6244 }
6245
6246 void hclge_vport_stop(struct hclge_vport *vport)
6247 {
6248         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6249 }
6250
6251 static int hclge_client_start(struct hnae3_handle *handle)
6252 {
6253         struct hclge_vport *vport = hclge_get_vport(handle);
6254
6255         return hclge_vport_start(vport);
6256 }
6257
6258 static void hclge_client_stop(struct hnae3_handle *handle)
6259 {
6260         struct hclge_vport *vport = hclge_get_vport(handle);
6261
6262         hclge_vport_stop(vport);
6263 }
6264
6265 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6266                                          u16 cmdq_resp, u8  resp_code,
6267                                          enum hclge_mac_vlan_tbl_opcode op)
6268 {
6269         struct hclge_dev *hdev = vport->back;
6270         int return_status = -EIO;
6271
6272         if (cmdq_resp) {
6273                 dev_err(&hdev->pdev->dev,
6274                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
6275                         cmdq_resp);
6276                 return -EIO;
6277         }
6278
6279         if (op == HCLGE_MAC_VLAN_ADD) {
6280                 if ((!resp_code) || (resp_code == 1)) {
6281                         return_status = 0;
6282                 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6283                         return_status = -ENOSPC;
6284                         dev_err(&hdev->pdev->dev,
6285                                 "add mac addr failed for uc_overflow.\n");
6286                 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6287                         return_status = -ENOSPC;
6288                         dev_err(&hdev->pdev->dev,
6289                                 "add mac addr failed for mc_overflow.\n");
6290                 } else {
6291                         dev_err(&hdev->pdev->dev,
6292                                 "add mac addr failed for undefined, code=%d.\n",
6293                                 resp_code);
6294                 }
6295         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6296                 if (!resp_code) {
6297                         return_status = 0;
6298                 } else if (resp_code == 1) {
6299                         return_status = -ENOENT;
6300                         dev_dbg(&hdev->pdev->dev,
6301                                 "remove mac addr failed for miss.\n");
6302                 } else {
6303                         dev_err(&hdev->pdev->dev,
6304                                 "remove mac addr failed for undefined, code=%d.\n",
6305                                 resp_code);
6306                 }
6307         } else if (op == HCLGE_MAC_VLAN_LKUP) {
6308                 if (!resp_code) {
6309                         return_status = 0;
6310                 } else if (resp_code == 1) {
6311                         return_status = -ENOENT;
6312                         dev_dbg(&hdev->pdev->dev,
6313                                 "lookup mac addr failed for miss.\n");
6314                 } else {
6315                         dev_err(&hdev->pdev->dev,
6316                                 "lookup mac addr failed for undefined, code=%d.\n",
6317                                 resp_code);
6318                 }
6319         } else {
6320                 return_status = -EINVAL;
6321                 dev_err(&hdev->pdev->dev,
6322                         "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
6323                         op);
6324         }
6325
6326         return return_status;
6327 }
6328
6329 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6330 {
6331 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6332
6333         unsigned int word_num;
6334         unsigned int bit_num;
6335
6336         if (vfid > 255 || vfid < 0)
6337                 return -EIO;
6338
6339         if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6340                 word_num = vfid / 32;
6341                 bit_num  = vfid % 32;
6342                 if (clr)
6343                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6344                 else
6345                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6346         } else {
6347                 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6348                 bit_num  = vfid % 32;
6349                 if (clr)
6350                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6351                 else
6352                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6353         }
6354
6355         return 0;
6356 }
6357
6358 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6359 {
6360 #define HCLGE_DESC_NUMBER 3
6361 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6362         int i, j;
6363
6364         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6365                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6366                         if (desc[i].data[j])
6367                                 return false;
6368
6369         return true;
6370 }
6371
6372 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6373                                    const u8 *addr, bool is_mc)
6374 {
6375         const unsigned char *mac_addr = addr;
6376         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6377                        (mac_addr[0]) | (mac_addr[1] << 8);
6378         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
6379
6380         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6381         if (is_mc) {
6382                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6383                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6384         }
6385
6386         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6387         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6388 }
6389
6390 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6391                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
6392 {
6393         struct hclge_dev *hdev = vport->back;
6394         struct hclge_desc desc;
6395         u8 resp_code;
6396         u16 retval;
6397         int ret;
6398
6399         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6400
6401         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6402
6403         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6404         if (ret) {
6405                 dev_err(&hdev->pdev->dev,
6406                         "del mac addr failed for cmd_send, ret =%d.\n",
6407                         ret);
6408                 return ret;
6409         }
6410         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6411         retval = le16_to_cpu(desc.retval);
6412
6413         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6414                                              HCLGE_MAC_VLAN_REMOVE);
6415 }
6416
6417 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6418                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
6419                                      struct hclge_desc *desc,
6420                                      bool is_mc)
6421 {
6422         struct hclge_dev *hdev = vport->back;
6423         u8 resp_code;
6424         u16 retval;
6425         int ret;
6426
6427         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6428         if (is_mc) {
6429                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6430                 memcpy(desc[0].data,
6431                        req,
6432                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6433                 hclge_cmd_setup_basic_desc(&desc[1],
6434                                            HCLGE_OPC_MAC_VLAN_ADD,
6435                                            true);
6436                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6437                 hclge_cmd_setup_basic_desc(&desc[2],
6438                                            HCLGE_OPC_MAC_VLAN_ADD,
6439                                            true);
6440                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6441         } else {
6442                 memcpy(desc[0].data,
6443                        req,
6444                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6445                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6446         }
6447         if (ret) {
6448                 dev_err(&hdev->pdev->dev,
6449                         "lookup mac addr failed for cmd_send, ret =%d.\n",
6450                         ret);
6451                 return ret;
6452         }
6453         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6454         retval = le16_to_cpu(desc[0].retval);
6455
6456         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6457                                              HCLGE_MAC_VLAN_LKUP);
6458 }
6459
6460 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6461                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
6462                                   struct hclge_desc *mc_desc)
6463 {
6464         struct hclge_dev *hdev = vport->back;
6465         int cfg_status;
6466         u8 resp_code;
6467         u16 retval;
6468         int ret;
6469
6470         if (!mc_desc) {
6471                 struct hclge_desc desc;
6472
6473                 hclge_cmd_setup_basic_desc(&desc,
6474                                            HCLGE_OPC_MAC_VLAN_ADD,
6475                                            false);
6476                 memcpy(desc.data, req,
6477                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6478                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6479                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6480                 retval = le16_to_cpu(desc.retval);
6481
6482                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6483                                                            resp_code,
6484                                                            HCLGE_MAC_VLAN_ADD);
6485         } else {
6486                 hclge_cmd_reuse_desc(&mc_desc[0], false);
6487                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6488                 hclge_cmd_reuse_desc(&mc_desc[1], false);
6489                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6490                 hclge_cmd_reuse_desc(&mc_desc[2], false);
6491                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6492                 memcpy(mc_desc[0].data, req,
6493                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6494                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6495                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6496                 retval = le16_to_cpu(mc_desc[0].retval);
6497
6498                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6499                                                            resp_code,
6500                                                            HCLGE_MAC_VLAN_ADD);
6501         }
6502
6503         if (ret) {
6504                 dev_err(&hdev->pdev->dev,
6505                         "add mac addr failed for cmd_send, ret =%d.\n",
6506                         ret);
6507                 return ret;
6508         }
6509
6510         return cfg_status;
6511 }
6512
6513 static int hclge_init_umv_space(struct hclge_dev *hdev)
6514 {
6515         u16 allocated_size = 0;
6516         int ret;
6517
6518         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6519                                   true);
6520         if (ret)
6521                 return ret;
6522
6523         if (allocated_size < hdev->wanted_umv_size)
6524                 dev_warn(&hdev->pdev->dev,
6525                          "Alloc umv space failed, want %d, get %d\n",
6526                          hdev->wanted_umv_size, allocated_size);
6527
6528         mutex_init(&hdev->umv_mutex);
6529         hdev->max_umv_size = allocated_size;
6530         /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
6531          * preserve some unicast mac vlan table entries shared by pf
6532          * and its vfs.
6533          */
6534         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6535         hdev->share_umv_size = hdev->priv_umv_size +
6536                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
6537
6538         return 0;
6539 }
6540
6541 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6542 {
6543         int ret;
6544
6545         if (hdev->max_umv_size > 0) {
6546                 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6547                                           false);
6548                 if (ret)
6549                         return ret;
6550                 hdev->max_umv_size = 0;
6551         }
6552         mutex_destroy(&hdev->umv_mutex);
6553
6554         return 0;
6555 }
6556
6557 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6558                                u16 *allocated_size, bool is_alloc)
6559 {
6560         struct hclge_umv_spc_alc_cmd *req;
6561         struct hclge_desc desc;
6562         int ret;
6563
6564         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6565         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6566         if (!is_alloc)
6567                 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
6568
6569         req->space_size = cpu_to_le32(space_size);
6570
6571         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6572         if (ret) {
6573                 dev_err(&hdev->pdev->dev,
6574                         "%s umv space failed for cmd_send, ret =%d\n",
6575                         is_alloc ? "allocate" : "free", ret);
6576                 return ret;
6577         }
6578
6579         if (is_alloc && allocated_size)
6580                 *allocated_size = le32_to_cpu(desc.data[1]);
6581
6582         return 0;
6583 }
6584
6585 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6586 {
6587         struct hclge_vport *vport;
6588         int i;
6589
6590         for (i = 0; i < hdev->num_alloc_vport; i++) {
6591                 vport = &hdev->vport[i];
6592                 vport->used_umv_num = 0;
6593         }
6594
6595         mutex_lock(&hdev->umv_mutex);
6596         hdev->share_umv_size = hdev->priv_umv_size +
6597                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
6598         mutex_unlock(&hdev->umv_mutex);
6599 }
6600
6601 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6602 {
6603         struct hclge_dev *hdev = vport->back;
6604         bool is_full;
6605
6606         mutex_lock(&hdev->umv_mutex);
6607         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6608                    hdev->share_umv_size == 0);
6609         mutex_unlock(&hdev->umv_mutex);
6610
6611         return is_full;
6612 }
6613
6614 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6615 {
6616         struct hclge_dev *hdev = vport->back;
6617
6618         mutex_lock(&hdev->umv_mutex);
6619         if (is_free) {
6620                 if (vport->used_umv_num > hdev->priv_umv_size)
6621                         hdev->share_umv_size++;
6622
6623                 if (vport->used_umv_num > 0)
6624                         vport->used_umv_num--;
6625         } else {
6626                 if (vport->used_umv_num >= hdev->priv_umv_size &&
6627                     hdev->share_umv_size > 0)
6628                         hdev->share_umv_size--;
6629                 vport->used_umv_num++;
6630         }
6631         mutex_unlock(&hdev->umv_mutex);
6632 }
6633
6634 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6635                              const unsigned char *addr)
6636 {
6637         struct hclge_vport *vport = hclge_get_vport(handle);
6638
6639         return hclge_add_uc_addr_common(vport, addr);
6640 }
6641
6642 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6643                              const unsigned char *addr)
6644 {
6645         struct hclge_dev *hdev = vport->back;
6646         struct hclge_mac_vlan_tbl_entry_cmd req;
6647         struct hclge_desc desc;
6648         u16 egress_port = 0;
6649         int ret;
6650
6651         /* mac addr check */
6652         if (is_zero_ether_addr(addr) ||
6653             is_broadcast_ether_addr(addr) ||
6654             is_multicast_ether_addr(addr)) {
6655                 dev_err(&hdev->pdev->dev,
6656                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6657                          addr, is_zero_ether_addr(addr),
6658                          is_broadcast_ether_addr(addr),
6659                          is_multicast_ether_addr(addr));
6660                 return -EINVAL;
6661         }
6662
6663         memset(&req, 0, sizeof(req));
6664
6665         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6666                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6667
6668         req.egress_port = cpu_to_le16(egress_port);
6669
6670         hclge_prepare_mac_addr(&req, addr, false);
6671
6672         /* Lookup the mac address in the mac_vlan table, and add
6673          * it if the entry is inexistent. Repeated unicast entry
6674          * is not allowed in the mac vlan table.
6675          */
6676         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6677         if (ret == -ENOENT) {
6678                 if (!hclge_is_umv_space_full(vport)) {
6679                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6680                         if (!ret)
6681                                 hclge_update_umv_space(vport, false);
6682                         return ret;
6683                 }
6684
6685                 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6686                         hdev->priv_umv_size);
6687
6688                 return -ENOSPC;
6689         }
6690
6691         /* check if we just hit the duplicate */
6692         if (!ret) {
6693                 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6694                          vport->vport_id, addr);
6695                 return 0;
6696         }
6697
6698         dev_err(&hdev->pdev->dev,
6699                 "PF failed to add unicast entry(%pM) in the MAC table\n",
6700                 addr);
6701
6702         return ret;
6703 }
6704
6705 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6706                             const unsigned char *addr)
6707 {
6708         struct hclge_vport *vport = hclge_get_vport(handle);
6709
6710         return hclge_rm_uc_addr_common(vport, addr);
6711 }
6712
6713 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6714                             const unsigned char *addr)
6715 {
6716         struct hclge_dev *hdev = vport->back;
6717         struct hclge_mac_vlan_tbl_entry_cmd req;
6718         int ret;
6719
6720         /* mac addr check */
6721         if (is_zero_ether_addr(addr) ||
6722             is_broadcast_ether_addr(addr) ||
6723             is_multicast_ether_addr(addr)) {
6724                 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
6725                         addr);
6726                 return -EINVAL;
6727         }
6728
6729         memset(&req, 0, sizeof(req));
6730         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6731         hclge_prepare_mac_addr(&req, addr, false);
6732         ret = hclge_remove_mac_vlan_tbl(vport, &req);
6733         if (!ret)
6734                 hclge_update_umv_space(vport, true);
6735
6736         return ret;
6737 }
6738
6739 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6740                              const unsigned char *addr)
6741 {
6742         struct hclge_vport *vport = hclge_get_vport(handle);
6743
6744         return hclge_add_mc_addr_common(vport, addr);
6745 }
6746
6747 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6748                              const unsigned char *addr)
6749 {
6750         struct hclge_dev *hdev = vport->back;
6751         struct hclge_mac_vlan_tbl_entry_cmd req;
6752         struct hclge_desc desc[3];
6753         int status;
6754
6755         /* mac addr check */
6756         if (!is_multicast_ether_addr(addr)) {
6757                 dev_err(&hdev->pdev->dev,
6758                         "Add mc mac err! invalid mac:%pM.\n",
6759                          addr);
6760                 return -EINVAL;
6761         }
6762         memset(&req, 0, sizeof(req));
6763         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6764         hclge_prepare_mac_addr(&req, addr, true);
6765         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6766         if (status) {
6767                 /* This mac addr do not exist, add new entry for it */
6768                 memset(desc[0].data, 0, sizeof(desc[0].data));
6769                 memset(desc[1].data, 0, sizeof(desc[0].data));
6770                 memset(desc[2].data, 0, sizeof(desc[0].data));
6771         }
6772         status = hclge_update_desc_vfid(desc, vport->vport_id, false);
6773         if (status)
6774                 return status;
6775         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6776
6777         if (status == -ENOSPC)
6778                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6779
6780         return status;
6781 }
6782
6783 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6784                             const unsigned char *addr)
6785 {
6786         struct hclge_vport *vport = hclge_get_vport(handle);
6787
6788         return hclge_rm_mc_addr_common(vport, addr);
6789 }
6790
6791 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6792                             const unsigned char *addr)
6793 {
6794         struct hclge_dev *hdev = vport->back;
6795         struct hclge_mac_vlan_tbl_entry_cmd req;
6796         enum hclge_cmd_status status;
6797         struct hclge_desc desc[3];
6798
6799         /* mac addr check */
6800         if (!is_multicast_ether_addr(addr)) {
6801                 dev_dbg(&hdev->pdev->dev,
6802                         "Remove mc mac err! invalid mac:%pM.\n",
6803                          addr);
6804                 return -EINVAL;
6805         }
6806
6807         memset(&req, 0, sizeof(req));
6808         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6809         hclge_prepare_mac_addr(&req, addr, true);
6810         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6811         if (!status) {
6812                 /* This mac addr exist, remove this handle's VFID for it */
6813                 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
6814                 if (status)
6815                         return status;
6816
6817                 if (hclge_is_all_function_id_zero(desc))
6818                         /* All the vfid is zero, so need to delete this entry */
6819                         status = hclge_remove_mac_vlan_tbl(vport, &req);
6820                 else
6821                         /* Not all the vfid is zero, update the vfid */
6822                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6823
6824         } else {
6825                 /* Maybe this mac address is in mta table, but it cannot be
6826                  * deleted here because an entry of mta represents an address
6827                  * range rather than a specific address. the delete action to
6828                  * all entries will take effect in update_mta_status called by
6829                  * hns3_nic_set_rx_mode.
6830                  */
6831                 status = 0;
6832         }
6833
6834         return status;
6835 }
6836
6837 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6838                                enum HCLGE_MAC_ADDR_TYPE mac_type)
6839 {
6840         struct hclge_vport_mac_addr_cfg *mac_cfg;
6841         struct list_head *list;
6842
6843         if (!vport->vport_id)
6844                 return;
6845
6846         mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6847         if (!mac_cfg)
6848                 return;
6849
6850         mac_cfg->hd_tbl_status = true;
6851         memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6852
6853         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6854                &vport->uc_mac_list : &vport->mc_mac_list;
6855
6856         list_add_tail(&mac_cfg->node, list);
6857 }
6858
6859 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6860                               bool is_write_tbl,
6861                               enum HCLGE_MAC_ADDR_TYPE mac_type)
6862 {
6863         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6864         struct list_head *list;
6865         bool uc_flag, mc_flag;
6866
6867         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6868                &vport->uc_mac_list : &vport->mc_mac_list;
6869
6870         uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6871         mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6872
6873         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6874                 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6875                         if (uc_flag && mac_cfg->hd_tbl_status)
6876                                 hclge_rm_uc_addr_common(vport, mac_addr);
6877
6878                         if (mc_flag && mac_cfg->hd_tbl_status)
6879                                 hclge_rm_mc_addr_common(vport, mac_addr);
6880
6881                         list_del(&mac_cfg->node);
6882                         kfree(mac_cfg);
6883                         break;
6884                 }
6885         }
6886 }
6887
6888 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6889                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
6890 {
6891         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6892         struct list_head *list;
6893
6894         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6895                &vport->uc_mac_list : &vport->mc_mac_list;
6896
6897         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6898                 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6899                         hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6900
6901                 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6902                         hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6903
6904                 mac_cfg->hd_tbl_status = false;
6905                 if (is_del_list) {
6906                         list_del(&mac_cfg->node);
6907                         kfree(mac_cfg);
6908                 }
6909         }
6910 }
6911
6912 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6913 {
6914         struct hclge_vport_mac_addr_cfg *mac, *tmp;
6915         struct hclge_vport *vport;
6916         int i;
6917
6918         mutex_lock(&hdev->vport_cfg_mutex);
6919         for (i = 0; i < hdev->num_alloc_vport; i++) {
6920                 vport = &hdev->vport[i];
6921                 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6922                         list_del(&mac->node);
6923                         kfree(mac);
6924                 }
6925
6926                 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6927                         list_del(&mac->node);
6928                         kfree(mac);
6929                 }
6930         }
6931         mutex_unlock(&hdev->vport_cfg_mutex);
6932 }
6933
6934 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6935                                               u16 cmdq_resp, u8 resp_code)
6936 {
6937 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
6938 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
6939 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
6940 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
6941
6942         int return_status;
6943
6944         if (cmdq_resp) {
6945                 dev_err(&hdev->pdev->dev,
6946                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6947                         cmdq_resp);
6948                 return -EIO;
6949         }
6950
6951         switch (resp_code) {
6952         case HCLGE_ETHERTYPE_SUCCESS_ADD:
6953         case HCLGE_ETHERTYPE_ALREADY_ADD:
6954                 return_status = 0;
6955                 break;
6956         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6957                 dev_err(&hdev->pdev->dev,
6958                         "add mac ethertype failed for manager table overflow.\n");
6959                 return_status = -EIO;
6960                 break;
6961         case HCLGE_ETHERTYPE_KEY_CONFLICT:
6962                 dev_err(&hdev->pdev->dev,
6963                         "add mac ethertype failed for key conflict.\n");
6964                 return_status = -EIO;
6965                 break;
6966         default:
6967                 dev_err(&hdev->pdev->dev,
6968                         "add mac ethertype failed for undefined, code=%d.\n",
6969                         resp_code);
6970                 return_status = -EIO;
6971         }
6972
6973         return return_status;
6974 }
6975
6976 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6977                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
6978 {
6979         struct hclge_desc desc;
6980         u8 resp_code;
6981         u16 retval;
6982         int ret;
6983
6984         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6985         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6986
6987         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6988         if (ret) {
6989                 dev_err(&hdev->pdev->dev,
6990                         "add mac ethertype failed for cmd_send, ret =%d.\n",
6991                         ret);
6992                 return ret;
6993         }
6994
6995         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6996         retval = le16_to_cpu(desc.retval);
6997
6998         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6999 }
7000
7001 static int init_mgr_tbl(struct hclge_dev *hdev)
7002 {
7003         int ret;
7004         int i;
7005
7006         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
7007                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
7008                 if (ret) {
7009                         dev_err(&hdev->pdev->dev,
7010                                 "add mac ethertype failed, ret =%d.\n",
7011                                 ret);
7012                         return ret;
7013                 }
7014         }
7015
7016         return 0;
7017 }
7018
7019 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7020 {
7021         struct hclge_vport *vport = hclge_get_vport(handle);
7022         struct hclge_dev *hdev = vport->back;
7023
7024         ether_addr_copy(p, hdev->hw.mac.mac_addr);
7025 }
7026
7027 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7028                               bool is_first)
7029 {
7030         const unsigned char *new_addr = (const unsigned char *)p;
7031         struct hclge_vport *vport = hclge_get_vport(handle);
7032         struct hclge_dev *hdev = vport->back;
7033         int ret;
7034
7035         /* mac addr check */
7036         if (is_zero_ether_addr(new_addr) ||
7037             is_broadcast_ether_addr(new_addr) ||
7038             is_multicast_ether_addr(new_addr)) {
7039                 dev_err(&hdev->pdev->dev,
7040                         "Change uc mac err! invalid mac:%p.\n",
7041                          new_addr);
7042                 return -EINVAL;
7043         }
7044
7045         if ((!is_first || is_kdump_kernel()) &&
7046             hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7047                 dev_warn(&hdev->pdev->dev,
7048                          "remove old uc mac address fail.\n");
7049
7050         ret = hclge_add_uc_addr(handle, new_addr);
7051         if (ret) {
7052                 dev_err(&hdev->pdev->dev,
7053                         "add uc mac address fail, ret =%d.\n",
7054                         ret);
7055
7056                 if (!is_first &&
7057                     hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7058                         dev_err(&hdev->pdev->dev,
7059                                 "restore uc mac address fail.\n");
7060
7061                 return -EIO;
7062         }
7063
7064         ret = hclge_pause_addr_cfg(hdev, new_addr);
7065         if (ret) {
7066                 dev_err(&hdev->pdev->dev,
7067                         "configure mac pause address fail, ret =%d.\n",
7068                         ret);
7069                 return -EIO;
7070         }
7071
7072         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7073
7074         return 0;
7075 }
7076
7077 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7078                           int cmd)
7079 {
7080         struct hclge_vport *vport = hclge_get_vport(handle);
7081         struct hclge_dev *hdev = vport->back;
7082
7083         if (!hdev->hw.mac.phydev)
7084                 return -EOPNOTSUPP;
7085
7086         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7087 }
7088
7089 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7090                                       u8 fe_type, bool filter_en, u8 vf_id)
7091 {
7092         struct hclge_vlan_filter_ctrl_cmd *req;
7093         struct hclge_desc desc;
7094         int ret;
7095
7096         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7097
7098         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7099         req->vlan_type = vlan_type;
7100         req->vlan_fe = filter_en ? fe_type : 0;
7101         req->vf_id = vf_id;
7102
7103         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7104         if (ret)
7105                 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7106                         ret);
7107
7108         return ret;
7109 }
7110
7111 #define HCLGE_FILTER_TYPE_VF            0
7112 #define HCLGE_FILTER_TYPE_PORT          1
7113 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
7114 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
7115 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
7116 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
7117 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
7118 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
7119                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7120 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
7121                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7122
7123 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7124 {
7125         struct hclge_vport *vport = hclge_get_vport(handle);
7126         struct hclge_dev *hdev = vport->back;
7127
7128         if (hdev->pdev->revision >= 0x21) {
7129                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7130                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
7131                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7132                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
7133         } else {
7134                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7135                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7136                                            0);
7137         }
7138         if (enable)
7139                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7140         else
7141                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7142 }
7143
7144 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7145                                     bool is_kill, u16 vlan, u8 qos,
7146                                     __be16 proto)
7147 {
7148 #define HCLGE_MAX_VF_BYTES  16
7149         struct hclge_vlan_filter_vf_cfg_cmd *req0;
7150         struct hclge_vlan_filter_vf_cfg_cmd *req1;
7151         struct hclge_desc desc[2];
7152         u8 vf_byte_val;
7153         u8 vf_byte_off;
7154         int ret;
7155
7156         /* if vf vlan table is full, firmware will close vf vlan filter, it
7157          * is unable and unnecessary to add new vlan id to vf vlan filter
7158          */
7159         if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill)
7160                 return 0;
7161
7162         hclge_cmd_setup_basic_desc(&desc[0],
7163                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7164         hclge_cmd_setup_basic_desc(&desc[1],
7165                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7166
7167         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7168
7169         vf_byte_off = vfid / 8;
7170         vf_byte_val = 1 << (vfid % 8);
7171
7172         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7173         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7174
7175         req0->vlan_id  = cpu_to_le16(vlan);
7176         req0->vlan_cfg = is_kill;
7177
7178         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7179                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7180         else
7181                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7182
7183         ret = hclge_cmd_send(&hdev->hw, desc, 2);
7184         if (ret) {
7185                 dev_err(&hdev->pdev->dev,
7186                         "Send vf vlan command fail, ret =%d.\n",
7187                         ret);
7188                 return ret;
7189         }
7190
7191         if (!is_kill) {
7192 #define HCLGE_VF_VLAN_NO_ENTRY  2
7193                 if (!req0->resp_code || req0->resp_code == 1)
7194                         return 0;
7195
7196                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7197                         set_bit(vfid, hdev->vf_vlan_full);
7198                         dev_warn(&hdev->pdev->dev,
7199                                  "vf vlan table is full, vf vlan filter is disabled\n");
7200                         return 0;
7201                 }
7202
7203                 dev_err(&hdev->pdev->dev,
7204                         "Add vf vlan filter fail, ret =%d.\n",
7205                         req0->resp_code);
7206         } else {
7207 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
7208                 if (!req0->resp_code)
7209                         return 0;
7210
7211                 /* vf vlan filter is disabled when vf vlan table is full,
7212                  * then new vlan id will not be added into vf vlan table.
7213                  * Just return 0 without warning, avoid massive verbose
7214                  * print logs when unload.
7215                  */
7216                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7217                         return 0;
7218
7219                 dev_err(&hdev->pdev->dev,
7220                         "Kill vf vlan filter fail, ret =%d.\n",
7221                         req0->resp_code);
7222         }
7223
7224         return -EIO;
7225 }
7226
7227 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7228                                       u16 vlan_id, bool is_kill)
7229 {
7230         struct hclge_vlan_filter_pf_cfg_cmd *req;
7231         struct hclge_desc desc;
7232         u8 vlan_offset_byte_val;
7233         u8 vlan_offset_byte;
7234         u8 vlan_offset_160;
7235         int ret;
7236
7237         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7238
7239         vlan_offset_160 = vlan_id / 160;
7240         vlan_offset_byte = (vlan_id % 160) / 8;
7241         vlan_offset_byte_val = 1 << (vlan_id % 8);
7242
7243         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7244         req->vlan_offset = vlan_offset_160;
7245         req->vlan_cfg = is_kill;
7246         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7247
7248         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7249         if (ret)
7250                 dev_err(&hdev->pdev->dev,
7251                         "port vlan command, send fail, ret =%d.\n", ret);
7252         return ret;
7253 }
7254
7255 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7256                                     u16 vport_id, u16 vlan_id, u8 qos,
7257                                     bool is_kill)
7258 {
7259         u16 vport_idx, vport_num = 0;
7260         int ret;
7261
7262         if (is_kill && !vlan_id)
7263                 return 0;
7264
7265         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7266                                        0, proto);
7267         if (ret) {
7268                 dev_err(&hdev->pdev->dev,
7269                         "Set %d vport vlan filter config fail, ret =%d.\n",
7270                         vport_id, ret);
7271                 return ret;
7272         }
7273
7274         /* vlan 0 may be added twice when 8021q module is enabled */
7275         if (!is_kill && !vlan_id &&
7276             test_bit(vport_id, hdev->vlan_table[vlan_id]))
7277                 return 0;
7278
7279         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7280                 dev_err(&hdev->pdev->dev,
7281                         "Add port vlan failed, vport %d is already in vlan %d\n",
7282                         vport_id, vlan_id);
7283                 return -EINVAL;
7284         }
7285
7286         if (is_kill &&
7287             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7288                 dev_err(&hdev->pdev->dev,
7289                         "Delete port vlan failed, vport %d is not in vlan %d\n",
7290                         vport_id, vlan_id);
7291                 return -EINVAL;
7292         }
7293
7294         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7295                 vport_num++;
7296
7297         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7298                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7299                                                  is_kill);
7300
7301         return ret;
7302 }
7303
7304 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7305 {
7306         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7307         struct hclge_vport_vtag_tx_cfg_cmd *req;
7308         struct hclge_dev *hdev = vport->back;
7309         struct hclge_desc desc;
7310         int status;
7311
7312         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7313
7314         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
7315         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
7316         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
7317         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
7318                       vcfg->accept_tag1 ? 1 : 0);
7319         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
7320                       vcfg->accept_untag1 ? 1 : 0);
7321         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
7322                       vcfg->accept_tag2 ? 1 : 0);
7323         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
7324                       vcfg->accept_untag2 ? 1 : 0);
7325         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
7326                       vcfg->insert_tag1_en ? 1 : 0);
7327         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
7328                       vcfg->insert_tag2_en ? 1 : 0);
7329         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
7330
7331         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7332         req->vf_bitmap[req->vf_offset] =
7333                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7334
7335         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7336         if (status)
7337                 dev_err(&hdev->pdev->dev,
7338                         "Send port txvlan cfg command fail, ret =%d\n",
7339                         status);
7340
7341         return status;
7342 }
7343
7344 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
7345 {
7346         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
7347         struct hclge_vport_vtag_rx_cfg_cmd *req;
7348         struct hclge_dev *hdev = vport->back;
7349         struct hclge_desc desc;
7350         int status;
7351
7352         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
7353
7354         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
7355         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
7356                       vcfg->strip_tag1_en ? 1 : 0);
7357         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
7358                       vcfg->strip_tag2_en ? 1 : 0);
7359         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
7360                       vcfg->vlan1_vlan_prionly ? 1 : 0);
7361         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
7362                       vcfg->vlan2_vlan_prionly ? 1 : 0);
7363
7364         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
7365         req->vf_bitmap[req->vf_offset] =
7366                 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
7367
7368         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7369         if (status)
7370                 dev_err(&hdev->pdev->dev,
7371                         "Send port rxvlan cfg command fail, ret =%d\n",
7372                         status);
7373
7374         return status;
7375 }
7376
7377 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
7378                                   u16 port_base_vlan_state,
7379                                   u16 vlan_tag)
7380 {
7381         int ret;
7382
7383         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7384                 vport->txvlan_cfg.accept_tag1 = true;
7385                 vport->txvlan_cfg.insert_tag1_en = false;
7386                 vport->txvlan_cfg.default_tag1 = 0;
7387         } else {
7388                 vport->txvlan_cfg.accept_tag1 = false;
7389                 vport->txvlan_cfg.insert_tag1_en = true;
7390                 vport->txvlan_cfg.default_tag1 = vlan_tag;
7391         }
7392
7393         vport->txvlan_cfg.accept_untag1 = true;
7394
7395         /* accept_tag2 and accept_untag2 are not supported on
7396          * pdev revision(0x20), new revision support them,
7397          * this two fields can not be configured by user.
7398          */
7399         vport->txvlan_cfg.accept_tag2 = true;
7400         vport->txvlan_cfg.accept_untag2 = true;
7401         vport->txvlan_cfg.insert_tag2_en = false;
7402         vport->txvlan_cfg.default_tag2 = 0;
7403
7404         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7405                 vport->rxvlan_cfg.strip_tag1_en = false;
7406                 vport->rxvlan_cfg.strip_tag2_en =
7407                                 vport->rxvlan_cfg.rx_vlan_offload_en;
7408         } else {
7409                 vport->rxvlan_cfg.strip_tag1_en =
7410                                 vport->rxvlan_cfg.rx_vlan_offload_en;
7411                 vport->rxvlan_cfg.strip_tag2_en = true;
7412         }
7413         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7414         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7415
7416         ret = hclge_set_vlan_tx_offload_cfg(vport);
7417         if (ret)
7418                 return ret;
7419
7420         return hclge_set_vlan_rx_offload_cfg(vport);
7421 }
7422
7423 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7424 {
7425         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7426         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7427         struct hclge_desc desc;
7428         int status;
7429
7430         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7431         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7432         rx_req->ot_fst_vlan_type =
7433                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7434         rx_req->ot_sec_vlan_type =
7435                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7436         rx_req->in_fst_vlan_type =
7437                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7438         rx_req->in_sec_vlan_type =
7439                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7440
7441         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7442         if (status) {
7443                 dev_err(&hdev->pdev->dev,
7444                         "Send rxvlan protocol type command fail, ret =%d\n",
7445                         status);
7446                 return status;
7447         }
7448
7449         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7450
7451         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7452         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7453         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7454
7455         status = hclge_cmd_send(&hdev->hw, &desc, 1);
7456         if (status)
7457                 dev_err(&hdev->pdev->dev,
7458                         "Send txvlan protocol type command fail, ret =%d\n",
7459                         status);
7460
7461         return status;
7462 }
7463
7464 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7465 {
7466 #define HCLGE_DEF_VLAN_TYPE             0x8100
7467
7468         struct hnae3_handle *handle = &hdev->vport[0].nic;
7469         struct hclge_vport *vport;
7470         int ret;
7471         int i;
7472
7473         if (hdev->pdev->revision >= 0x21) {
7474                 /* for revision 0x21, vf vlan filter is per function */
7475                 for (i = 0; i < hdev->num_alloc_vport; i++) {
7476                         vport = &hdev->vport[i];
7477                         ret = hclge_set_vlan_filter_ctrl(hdev,
7478                                                          HCLGE_FILTER_TYPE_VF,
7479                                                          HCLGE_FILTER_FE_EGRESS,
7480                                                          true,
7481                                                          vport->vport_id);
7482                         if (ret)
7483                                 return ret;
7484                 }
7485
7486                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7487                                                  HCLGE_FILTER_FE_INGRESS, true,
7488                                                  0);
7489                 if (ret)
7490                         return ret;
7491         } else {
7492                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7493                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
7494                                                  true, 0);
7495                 if (ret)
7496                         return ret;
7497         }
7498
7499         handle->netdev_flags |= HNAE3_VLAN_FLTR;
7500
7501         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7502         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7503         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7504         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7505         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7506         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7507
7508         ret = hclge_set_vlan_protocol_type(hdev);
7509         if (ret)
7510                 return ret;
7511
7512         for (i = 0; i < hdev->num_alloc_vport; i++) {
7513                 u16 vlan_tag;
7514
7515                 vport = &hdev->vport[i];
7516                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7517
7518                 ret = hclge_vlan_offload_cfg(vport,
7519                                              vport->port_base_vlan_cfg.state,
7520                                              vlan_tag);
7521                 if (ret)
7522                         return ret;
7523         }
7524
7525         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7526 }
7527
7528 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7529                                        bool writen_to_tbl)
7530 {
7531         struct hclge_vport_vlan_cfg *vlan;
7532
7533         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7534         if (!vlan)
7535                 return;
7536
7537         vlan->hd_tbl_status = writen_to_tbl;
7538         vlan->vlan_id = vlan_id;
7539
7540         list_add_tail(&vlan->node, &vport->vlan_list);
7541 }
7542
7543 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7544 {
7545         struct hclge_vport_vlan_cfg *vlan, *tmp;
7546         struct hclge_dev *hdev = vport->back;
7547         int ret;
7548
7549         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7550                 if (!vlan->hd_tbl_status) {
7551                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7552                                                        vport->vport_id,
7553                                                        vlan->vlan_id, 0, false);
7554                         if (ret) {
7555                                 dev_err(&hdev->pdev->dev,
7556                                         "restore vport vlan list failed, ret=%d\n",
7557                                         ret);
7558                                 return ret;
7559                         }
7560                 }
7561                 vlan->hd_tbl_status = true;
7562         }
7563
7564         return 0;
7565 }
7566
7567 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7568                                       bool is_write_tbl)
7569 {
7570         struct hclge_vport_vlan_cfg *vlan, *tmp;
7571         struct hclge_dev *hdev = vport->back;
7572
7573         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7574                 if (vlan->vlan_id == vlan_id) {
7575                         if (is_write_tbl && vlan->hd_tbl_status)
7576                                 hclge_set_vlan_filter_hw(hdev,
7577                                                          htons(ETH_P_8021Q),
7578                                                          vport->vport_id,
7579                                                          vlan_id, 0,
7580                                                          true);
7581
7582                         list_del(&vlan->node);
7583                         kfree(vlan);
7584                         break;
7585                 }
7586         }
7587 }
7588
7589 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7590 {
7591         struct hclge_vport_vlan_cfg *vlan, *tmp;
7592         struct hclge_dev *hdev = vport->back;
7593
7594         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7595                 if (vlan->hd_tbl_status)
7596                         hclge_set_vlan_filter_hw(hdev,
7597                                                  htons(ETH_P_8021Q),
7598                                                  vport->vport_id,
7599                                                  vlan->vlan_id, 0,
7600                                                  true);
7601
7602                 vlan->hd_tbl_status = false;
7603                 if (is_del_list) {
7604                         list_del(&vlan->node);
7605                         kfree(vlan);
7606                 }
7607         }
7608 }
7609
7610 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7611 {
7612         struct hclge_vport_vlan_cfg *vlan, *tmp;
7613         struct hclge_vport *vport;
7614         int i;
7615
7616         mutex_lock(&hdev->vport_cfg_mutex);
7617         for (i = 0; i < hdev->num_alloc_vport; i++) {
7618                 vport = &hdev->vport[i];
7619                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7620                         list_del(&vlan->node);
7621                         kfree(vlan);
7622                 }
7623         }
7624         mutex_unlock(&hdev->vport_cfg_mutex);
7625 }
7626
7627 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
7628 {
7629         struct hclge_vport *vport = hclge_get_vport(handle);
7630         struct hclge_vport_vlan_cfg *vlan, *tmp;
7631         struct hclge_dev *hdev = vport->back;
7632         u16 vlan_proto, qos;
7633         u16 state, vlan_id;
7634         int i;
7635
7636         mutex_lock(&hdev->vport_cfg_mutex);
7637         for (i = 0; i < hdev->num_alloc_vport; i++) {
7638                 vport = &hdev->vport[i];
7639                 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
7640                 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7641                 qos = vport->port_base_vlan_cfg.vlan_info.qos;
7642                 state = vport->port_base_vlan_cfg.state;
7643
7644                 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
7645                         hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
7646                                                  vport->vport_id, vlan_id, qos,
7647                                                  false);
7648                         continue;
7649                 }
7650
7651                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7652                         if (vlan->hd_tbl_status)
7653                                 hclge_set_vlan_filter_hw(hdev,
7654                                                          htons(ETH_P_8021Q),
7655                                                          vport->vport_id,
7656                                                          vlan->vlan_id, 0,
7657                                                          false);
7658                 }
7659         }
7660
7661         mutex_unlock(&hdev->vport_cfg_mutex);
7662 }
7663
7664 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7665 {
7666         struct hclge_vport *vport = hclge_get_vport(handle);
7667
7668         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7669                 vport->rxvlan_cfg.strip_tag1_en = false;
7670                 vport->rxvlan_cfg.strip_tag2_en = enable;
7671         } else {
7672                 vport->rxvlan_cfg.strip_tag1_en = enable;
7673                 vport->rxvlan_cfg.strip_tag2_en = true;
7674         }
7675         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7676         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7677         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7678
7679         return hclge_set_vlan_rx_offload_cfg(vport);
7680 }
7681
7682 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7683                                             u16 port_base_vlan_state,
7684                                             struct hclge_vlan_info *new_info,
7685                                             struct hclge_vlan_info *old_info)
7686 {
7687         struct hclge_dev *hdev = vport->back;
7688         int ret;
7689
7690         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7691                 hclge_rm_vport_all_vlan_table(vport, false);
7692                 return hclge_set_vlan_filter_hw(hdev,
7693                                                  htons(new_info->vlan_proto),
7694                                                  vport->vport_id,
7695                                                  new_info->vlan_tag,
7696                                                  new_info->qos, false);
7697         }
7698
7699         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7700                                        vport->vport_id, old_info->vlan_tag,
7701                                        old_info->qos, true);
7702         if (ret)
7703                 return ret;
7704
7705         return hclge_add_vport_all_vlan_table(vport);
7706 }
7707
7708 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7709                                     struct hclge_vlan_info *vlan_info)
7710 {
7711         struct hnae3_handle *nic = &vport->nic;
7712         struct hclge_vlan_info *old_vlan_info;
7713         struct hclge_dev *hdev = vport->back;
7714         int ret;
7715
7716         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7717
7718         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7719         if (ret)
7720                 return ret;
7721
7722         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7723                 /* add new VLAN tag */
7724                 ret = hclge_set_vlan_filter_hw(hdev,
7725                                                htons(vlan_info->vlan_proto),
7726                                                vport->vport_id,
7727                                                vlan_info->vlan_tag,
7728                                                vlan_info->qos, false);
7729                 if (ret)
7730                         return ret;
7731
7732                 /* remove old VLAN tag */
7733                 ret = hclge_set_vlan_filter_hw(hdev,
7734                                                htons(old_vlan_info->vlan_proto),
7735                                                vport->vport_id,
7736                                                old_vlan_info->vlan_tag,
7737                                                old_vlan_info->qos, true);
7738                 if (ret)
7739                         return ret;
7740
7741                 goto update;
7742         }
7743
7744         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7745                                                old_vlan_info);
7746         if (ret)
7747                 return ret;
7748
7749         /* update state only when disable/enable port based VLAN */
7750         vport->port_base_vlan_cfg.state = state;
7751         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7752                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7753         else
7754                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7755
7756 update:
7757         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7758         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7759         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7760
7761         return 0;
7762 }
7763
7764 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7765                                           enum hnae3_port_base_vlan_state state,
7766                                           u16 vlan)
7767 {
7768         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7769                 if (!vlan)
7770                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7771                 else
7772                         return HNAE3_PORT_BASE_VLAN_ENABLE;
7773         } else {
7774                 if (!vlan)
7775                         return HNAE3_PORT_BASE_VLAN_DISABLE;
7776                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7777                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7778                 else
7779                         return HNAE3_PORT_BASE_VLAN_MODIFY;
7780         }
7781 }
7782
7783 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7784                                     u16 vlan, u8 qos, __be16 proto)
7785 {
7786         struct hclge_vport *vport = hclge_get_vport(handle);
7787         struct hclge_dev *hdev = vport->back;
7788         struct hclge_vlan_info vlan_info;
7789         u16 state;
7790         int ret;
7791
7792         if (hdev->pdev->revision == 0x20)
7793                 return -EOPNOTSUPP;
7794
7795         /* qos is a 3 bits value, so can not be bigger than 7 */
7796         if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7797                 return -EINVAL;
7798         if (proto != htons(ETH_P_8021Q))
7799                 return -EPROTONOSUPPORT;
7800
7801         vport = &hdev->vport[vfid];
7802         state = hclge_get_port_base_vlan_state(vport,
7803                                                vport->port_base_vlan_cfg.state,
7804                                                vlan);
7805         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7806                 return 0;
7807
7808         vlan_info.vlan_tag = vlan;
7809         vlan_info.qos = qos;
7810         vlan_info.vlan_proto = ntohs(proto);
7811
7812         /* update port based VLAN for PF */
7813         if (!vfid) {
7814                 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7815                 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7816                 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7817
7818                 return ret;
7819         }
7820
7821         if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7822                 return hclge_update_port_base_vlan_cfg(vport, state,
7823                                                        &vlan_info);
7824         } else {
7825                 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7826                                                         (u8)vfid, state,
7827                                                         vlan, qos,
7828                                                         ntohs(proto));
7829                 return ret;
7830         }
7831 }
7832
7833 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7834                           u16 vlan_id, bool is_kill)
7835 {
7836         struct hclge_vport *vport = hclge_get_vport(handle);
7837         struct hclge_dev *hdev = vport->back;
7838         bool writen_to_tbl = false;
7839         int ret = 0;
7840
7841         /* When device is resetting, firmware is unable to handle
7842          * mailbox. Just record the vlan id, and remove it after
7843          * reset finished.
7844          */
7845         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
7846                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
7847                 return -EBUSY;
7848         }
7849
7850         /* When port base vlan enabled, we use port base vlan as the vlan
7851          * filter entry. In this case, we don't update vlan filter table
7852          * when user add new vlan or remove exist vlan, just update the vport
7853          * vlan list. The vlan id in vlan list will be writen in vlan filter
7854          * table until port base vlan disabled
7855          */
7856         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7857                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7858                                                vlan_id, 0, is_kill);
7859                 writen_to_tbl = true;
7860         }
7861
7862         if (!ret) {
7863                 if (is_kill)
7864                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
7865                 else
7866                         hclge_add_vport_vlan_table(vport, vlan_id,
7867                                                    writen_to_tbl);
7868         } else if (is_kill) {
7869                 /* When remove hw vlan filter failed, record the vlan id,
7870                  * and try to remove it from hw later, to be consistence
7871                  * with stack
7872                  */
7873                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
7874         }
7875         return ret;
7876 }
7877
7878 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
7879 {
7880 #define HCLGE_MAX_SYNC_COUNT    60
7881
7882         int i, ret, sync_cnt = 0;
7883         u16 vlan_id;
7884
7885         /* start from vport 1 for PF is always alive */
7886         for (i = 0; i < hdev->num_alloc_vport; i++) {
7887                 struct hclge_vport *vport = &hdev->vport[i];
7888
7889                 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
7890                                          VLAN_N_VID);
7891                 while (vlan_id != VLAN_N_VID) {
7892                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7893                                                        vport->vport_id, vlan_id,
7894                                                        0, true);
7895                         if (ret && ret != -EINVAL)
7896                                 return;
7897
7898                         clear_bit(vlan_id, vport->vlan_del_fail_bmap);
7899                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
7900
7901                         sync_cnt++;
7902                         if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
7903                                 return;
7904
7905                         vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
7906                                                  VLAN_N_VID);
7907                 }
7908         }
7909 }
7910
7911 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7912 {
7913         struct hclge_config_max_frm_size_cmd *req;
7914         struct hclge_desc desc;
7915
7916         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7917
7918         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7919         req->max_frm_size = cpu_to_le16(new_mps);
7920         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7921
7922         return hclge_cmd_send(&hdev->hw, &desc, 1);
7923 }
7924
7925 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7926 {
7927         struct hclge_vport *vport = hclge_get_vport(handle);
7928
7929         return hclge_set_vport_mtu(vport, new_mtu);
7930 }
7931
7932 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7933 {
7934         struct hclge_dev *hdev = vport->back;
7935         int i, max_frm_size, ret;
7936
7937         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7938         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7939             max_frm_size > HCLGE_MAC_MAX_FRAME)
7940                 return -EINVAL;
7941
7942         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7943         mutex_lock(&hdev->vport_lock);
7944         /* VF's mps must fit within hdev->mps */
7945         if (vport->vport_id && max_frm_size > hdev->mps) {
7946                 mutex_unlock(&hdev->vport_lock);
7947                 return -EINVAL;
7948         } else if (vport->vport_id) {
7949                 vport->mps = max_frm_size;
7950                 mutex_unlock(&hdev->vport_lock);
7951                 return 0;
7952         }
7953
7954         /* PF's mps must be greater then VF's mps */
7955         for (i = 1; i < hdev->num_alloc_vport; i++)
7956                 if (max_frm_size < hdev->vport[i].mps) {
7957                         mutex_unlock(&hdev->vport_lock);
7958                         return -EINVAL;
7959                 }
7960
7961         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7962
7963         ret = hclge_set_mac_mtu(hdev, max_frm_size);
7964         if (ret) {
7965                 dev_err(&hdev->pdev->dev,
7966                         "Change mtu fail, ret =%d\n", ret);
7967                 goto out;
7968         }
7969
7970         hdev->mps = max_frm_size;
7971         vport->mps = max_frm_size;
7972
7973         ret = hclge_buffer_alloc(hdev);
7974         if (ret)
7975                 dev_err(&hdev->pdev->dev,
7976                         "Allocate buffer fail, ret =%d\n", ret);
7977
7978 out:
7979         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7980         mutex_unlock(&hdev->vport_lock);
7981         return ret;
7982 }
7983
7984 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7985                                     bool enable)
7986 {
7987         struct hclge_reset_tqp_queue_cmd *req;
7988         struct hclge_desc desc;
7989         int ret;
7990
7991         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7992
7993         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7994         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7995         if (enable)
7996                 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
7997
7998         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7999         if (ret) {
8000                 dev_err(&hdev->pdev->dev,
8001                         "Send tqp reset cmd error, status =%d\n", ret);
8002                 return ret;
8003         }
8004
8005         return 0;
8006 }
8007
8008 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
8009 {
8010         struct hclge_reset_tqp_queue_cmd *req;
8011         struct hclge_desc desc;
8012         int ret;
8013
8014         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
8015
8016         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8017         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8018
8019         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8020         if (ret) {
8021                 dev_err(&hdev->pdev->dev,
8022                         "Get reset status error, status =%d\n", ret);
8023                 return ret;
8024         }
8025
8026         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8027 }
8028
8029 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8030 {
8031         struct hnae3_queue *queue;
8032         struct hclge_tqp *tqp;
8033
8034         queue = handle->kinfo.tqp[queue_id];
8035         tqp = container_of(queue, struct hclge_tqp, q);
8036
8037         return tqp->index;
8038 }
8039
8040 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8041 {
8042         struct hclge_vport *vport = hclge_get_vport(handle);
8043         struct hclge_dev *hdev = vport->back;
8044         int reset_try_times = 0;
8045         int reset_status;
8046         u16 queue_gid;
8047         int ret;
8048
8049         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8050
8051         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8052         if (ret) {
8053                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8054                 return ret;
8055         }
8056
8057         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8058         if (ret) {
8059                 dev_err(&hdev->pdev->dev,
8060                         "Send reset tqp cmd fail, ret = %d\n", ret);
8061                 return ret;
8062         }
8063
8064         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8065                 /* Wait for tqp hw reset */
8066                 msleep(20);
8067                 reset_status = hclge_get_reset_status(hdev, queue_gid);
8068                 if (reset_status)
8069                         break;
8070         }
8071
8072         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8073                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8074                 return ret;
8075         }
8076
8077         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8078         if (ret)
8079                 dev_err(&hdev->pdev->dev,
8080                         "Deassert the soft reset fail, ret = %d\n", ret);
8081
8082         return ret;
8083 }
8084
8085 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8086 {
8087         struct hclge_dev *hdev = vport->back;
8088         int reset_try_times = 0;
8089         int reset_status;
8090         u16 queue_gid;
8091         int ret;
8092
8093         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8094
8095         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8096         if (ret) {
8097                 dev_warn(&hdev->pdev->dev,
8098                          "Send reset tqp cmd fail, ret = %d\n", ret);
8099                 return;
8100         }
8101
8102         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8103                 /* Wait for tqp hw reset */
8104                 msleep(20);
8105                 reset_status = hclge_get_reset_status(hdev, queue_gid);
8106                 if (reset_status)
8107                         break;
8108         }
8109
8110         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8111                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8112                 return;
8113         }
8114
8115         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8116         if (ret)
8117                 dev_warn(&hdev->pdev->dev,
8118                          "Deassert the soft reset fail, ret = %d\n", ret);
8119 }
8120
8121 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8122 {
8123         struct hclge_vport *vport = hclge_get_vport(handle);
8124         struct hclge_dev *hdev = vport->back;
8125
8126         return hdev->fw_version;
8127 }
8128
8129 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8130 {
8131         struct phy_device *phydev = hdev->hw.mac.phydev;
8132
8133         if (!phydev)
8134                 return;
8135
8136         phy_set_asym_pause(phydev, rx_en, tx_en);
8137 }
8138
8139 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8140 {
8141         int ret;
8142
8143         if (rx_en && tx_en)
8144                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
8145         else if (rx_en && !tx_en)
8146                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8147         else if (!rx_en && tx_en)
8148                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8149         else
8150                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
8151
8152         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8153                 return 0;
8154
8155         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8156         if (ret) {
8157                 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
8158                         ret);
8159                 return ret;
8160         }
8161
8162         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8163
8164         return 0;
8165 }
8166
8167 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8168 {
8169         struct phy_device *phydev = hdev->hw.mac.phydev;
8170         u16 remote_advertising = 0;
8171         u16 local_advertising;
8172         u32 rx_pause, tx_pause;
8173         u8 flowctl;
8174
8175         if (!phydev->link || !phydev->autoneg)
8176                 return 0;
8177
8178         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8179
8180         if (phydev->pause)
8181                 remote_advertising = LPA_PAUSE_CAP;
8182
8183         if (phydev->asym_pause)
8184                 remote_advertising |= LPA_PAUSE_ASYM;
8185
8186         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8187                                            remote_advertising);
8188         tx_pause = flowctl & FLOW_CTRL_TX;
8189         rx_pause = flowctl & FLOW_CTRL_RX;
8190
8191         if (phydev->duplex == HCLGE_MAC_HALF) {
8192                 tx_pause = 0;
8193                 rx_pause = 0;
8194         }
8195
8196         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8197 }
8198
8199 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8200                                  u32 *rx_en, u32 *tx_en)
8201 {
8202         struct hclge_vport *vport = hclge_get_vport(handle);
8203         struct hclge_dev *hdev = vport->back;
8204         struct phy_device *phydev = hdev->hw.mac.phydev;
8205
8206         *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8207
8208         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8209                 *rx_en = 0;
8210                 *tx_en = 0;
8211                 return;
8212         }
8213
8214         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8215                 *rx_en = 1;
8216                 *tx_en = 0;
8217         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8218                 *tx_en = 1;
8219                 *rx_en = 0;
8220         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8221                 *rx_en = 1;
8222                 *tx_en = 1;
8223         } else {
8224                 *rx_en = 0;
8225                 *tx_en = 0;
8226         }
8227 }
8228
8229 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8230                                 u32 rx_en, u32 tx_en)
8231 {
8232         struct hclge_vport *vport = hclge_get_vport(handle);
8233         struct hclge_dev *hdev = vport->back;
8234         struct phy_device *phydev = hdev->hw.mac.phydev;
8235         u32 fc_autoneg;
8236
8237         if (phydev) {
8238                 fc_autoneg = hclge_get_autoneg(handle);
8239                 if (auto_neg != fc_autoneg) {
8240                         dev_info(&hdev->pdev->dev,
8241                                  "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8242                         return -EOPNOTSUPP;
8243                 }
8244         }
8245
8246         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8247                 dev_info(&hdev->pdev->dev,
8248                          "Priority flow control enabled. Cannot set link flow control.\n");
8249                 return -EOPNOTSUPP;
8250         }
8251
8252         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8253
8254         if (!auto_neg)
8255                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8256
8257         if (phydev)
8258                 return phy_start_aneg(phydev);
8259
8260         return -EOPNOTSUPP;
8261 }
8262
8263 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8264                                           u8 *auto_neg, u32 *speed, u8 *duplex)
8265 {
8266         struct hclge_vport *vport = hclge_get_vport(handle);
8267         struct hclge_dev *hdev = vport->back;
8268
8269         if (speed)
8270                 *speed = hdev->hw.mac.speed;
8271         if (duplex)
8272                 *duplex = hdev->hw.mac.duplex;
8273         if (auto_neg)
8274                 *auto_neg = hdev->hw.mac.autoneg;
8275 }
8276
8277 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8278                                  u8 *module_type)
8279 {
8280         struct hclge_vport *vport = hclge_get_vport(handle);
8281         struct hclge_dev *hdev = vport->back;
8282
8283         if (media_type)
8284                 *media_type = hdev->hw.mac.media_type;
8285
8286         if (module_type)
8287                 *module_type = hdev->hw.mac.module_type;
8288 }
8289
8290 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8291                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8292 {
8293         struct hclge_vport *vport = hclge_get_vport(handle);
8294         struct hclge_dev *hdev = vport->back;
8295         struct phy_device *phydev = hdev->hw.mac.phydev;
8296         int mdix_ctrl, mdix, is_resolved;
8297         unsigned int retval;
8298
8299         if (!phydev) {
8300                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8301                 *tp_mdix = ETH_TP_MDI_INVALID;
8302                 return;
8303         }
8304
8305         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
8306
8307         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
8308         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
8309                                     HCLGE_PHY_MDIX_CTRL_S);
8310
8311         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
8312         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
8313         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
8314
8315         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
8316
8317         switch (mdix_ctrl) {
8318         case 0x0:
8319                 *tp_mdix_ctrl = ETH_TP_MDI;
8320                 break;
8321         case 0x1:
8322                 *tp_mdix_ctrl = ETH_TP_MDI_X;
8323                 break;
8324         case 0x3:
8325                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
8326                 break;
8327         default:
8328                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8329                 break;
8330         }
8331
8332         if (!is_resolved)
8333                 *tp_mdix = ETH_TP_MDI_INVALID;
8334         else if (mdix)
8335                 *tp_mdix = ETH_TP_MDI_X;
8336         else
8337                 *tp_mdix = ETH_TP_MDI;
8338 }
8339
8340 static void hclge_info_show(struct hclge_dev *hdev)
8341 {
8342         struct device *dev = &hdev->pdev->dev;
8343
8344         dev_info(dev, "PF info begin:\n");
8345
8346         dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
8347         dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
8348         dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
8349         dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
8350         dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
8351         dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
8352         dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
8353         dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
8354         dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
8355         dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
8356         dev_info(dev, "This is %s PF\n",
8357                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
8358         dev_info(dev, "DCB %s\n",
8359                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
8360         dev_info(dev, "MQPRIO %s\n",
8361                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
8362
8363         dev_info(dev, "PF info end.\n");
8364 }
8365
8366 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
8367                                           struct hclge_vport *vport)
8368 {
8369         struct hnae3_client *client = vport->nic.client;
8370         struct hclge_dev *hdev = ae_dev->priv;
8371         int rst_cnt;
8372         int ret;
8373
8374         rst_cnt = hdev->rst_stats.reset_cnt;
8375         ret = client->ops->init_instance(&vport->nic);
8376         if (ret)
8377                 return ret;
8378
8379         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8380         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
8381             rst_cnt != hdev->rst_stats.reset_cnt) {
8382                 ret = -EBUSY;
8383                 goto init_nic_err;
8384         }
8385
8386         /* Enable nic hw error interrupts */
8387         ret = hclge_config_nic_hw_error(hdev, true);
8388         if (ret) {
8389                 dev_err(&ae_dev->pdev->dev,
8390                         "fail(%d) to enable hw error interrupts\n", ret);
8391                 goto init_nic_err;
8392         }
8393
8394         hnae3_set_client_init_flag(client, ae_dev, 1);
8395
8396         if (netif_msg_drv(&hdev->vport->nic))
8397                 hclge_info_show(hdev);
8398
8399         return ret;
8400
8401 init_nic_err:
8402         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8403         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8404                 msleep(HCLGE_WAIT_RESET_DONE);
8405
8406         client->ops->uninit_instance(&vport->nic, 0);
8407
8408         return ret;
8409 }
8410
8411 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
8412                                            struct hclge_vport *vport)
8413 {
8414         struct hnae3_client *client = vport->roce.client;
8415         struct hclge_dev *hdev = ae_dev->priv;
8416         int rst_cnt;
8417         int ret;
8418
8419         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
8420             !hdev->nic_client)
8421                 return 0;
8422
8423         client = hdev->roce_client;
8424         ret = hclge_init_roce_base_info(vport);
8425         if (ret)
8426                 return ret;
8427
8428         rst_cnt = hdev->rst_stats.reset_cnt;
8429         ret = client->ops->init_instance(&vport->roce);
8430         if (ret)
8431                 return ret;
8432
8433         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8434         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
8435             rst_cnt != hdev->rst_stats.reset_cnt) {
8436                 ret = -EBUSY;
8437                 goto init_roce_err;
8438         }
8439
8440         /* Enable roce ras interrupts */
8441         ret = hclge_config_rocee_ras_interrupt(hdev, true);
8442         if (ret) {
8443                 dev_err(&ae_dev->pdev->dev,
8444                         "fail(%d) to enable roce ras interrupts\n", ret);
8445                 goto init_roce_err;
8446         }
8447
8448         hnae3_set_client_init_flag(client, ae_dev, 1);
8449
8450         return 0;
8451
8452 init_roce_err:
8453         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8454         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8455                 msleep(HCLGE_WAIT_RESET_DONE);
8456
8457         hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
8458
8459         return ret;
8460 }
8461
8462 static int hclge_init_client_instance(struct hnae3_client *client,
8463                                       struct hnae3_ae_dev *ae_dev)
8464 {
8465         struct hclge_dev *hdev = ae_dev->priv;
8466         struct hclge_vport *vport;
8467         int i, ret;
8468
8469         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
8470                 vport = &hdev->vport[i];
8471
8472                 switch (client->type) {
8473                 case HNAE3_CLIENT_KNIC:
8474
8475                         hdev->nic_client = client;
8476                         vport->nic.client = client;
8477                         ret = hclge_init_nic_client_instance(ae_dev, vport);
8478                         if (ret)
8479                                 goto clear_nic;
8480
8481                         ret = hclge_init_roce_client_instance(ae_dev, vport);
8482                         if (ret)
8483                                 goto clear_roce;
8484
8485                         break;
8486                 case HNAE3_CLIENT_ROCE:
8487                         if (hnae3_dev_roce_supported(hdev)) {
8488                                 hdev->roce_client = client;
8489                                 vport->roce.client = client;
8490                         }
8491
8492                         ret = hclge_init_roce_client_instance(ae_dev, vport);
8493                         if (ret)
8494                                 goto clear_roce;
8495
8496                         break;
8497                 default:
8498                         return -EINVAL;
8499                 }
8500         }
8501
8502         return ret;
8503
8504 clear_nic:
8505         hdev->nic_client = NULL;
8506         vport->nic.client = NULL;
8507         return ret;
8508 clear_roce:
8509         hdev->roce_client = NULL;
8510         vport->roce.client = NULL;
8511         return ret;
8512 }
8513
8514 static void hclge_uninit_client_instance(struct hnae3_client *client,
8515                                          struct hnae3_ae_dev *ae_dev)
8516 {
8517         struct hclge_dev *hdev = ae_dev->priv;
8518         struct hclge_vport *vport;
8519         int i;
8520
8521         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
8522                 vport = &hdev->vport[i];
8523                 if (hdev->roce_client) {
8524                         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
8525                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8526                                 msleep(HCLGE_WAIT_RESET_DONE);
8527
8528                         hdev->roce_client->ops->uninit_instance(&vport->roce,
8529                                                                 0);
8530                         hdev->roce_client = NULL;
8531                         vport->roce.client = NULL;
8532                 }
8533                 if (client->type == HNAE3_CLIENT_ROCE)
8534                         return;
8535                 if (hdev->nic_client && client->ops->uninit_instance) {
8536                         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
8537                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
8538                                 msleep(HCLGE_WAIT_RESET_DONE);
8539
8540                         client->ops->uninit_instance(&vport->nic, 0);
8541                         hdev->nic_client = NULL;
8542                         vport->nic.client = NULL;
8543                 }
8544         }
8545 }
8546
8547 static int hclge_pci_init(struct hclge_dev *hdev)
8548 {
8549         struct pci_dev *pdev = hdev->pdev;
8550         struct hclge_hw *hw;
8551         int ret;
8552
8553         ret = pci_enable_device(pdev);
8554         if (ret) {
8555                 dev_err(&pdev->dev, "failed to enable PCI device\n");
8556                 return ret;
8557         }
8558
8559         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8560         if (ret) {
8561                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8562                 if (ret) {
8563                         dev_err(&pdev->dev,
8564                                 "can't set consistent PCI DMA");
8565                         goto err_disable_device;
8566                 }
8567                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8568         }
8569
8570         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8571         if (ret) {
8572                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8573                 goto err_disable_device;
8574         }
8575
8576         pci_set_master(pdev);
8577         hw = &hdev->hw;
8578         hw->io_base = pcim_iomap(pdev, 2, 0);
8579         if (!hw->io_base) {
8580                 dev_err(&pdev->dev, "Can't map configuration register space\n");
8581                 ret = -ENOMEM;
8582                 goto err_clr_master;
8583         }
8584
8585         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8586
8587         return 0;
8588 err_clr_master:
8589         pci_clear_master(pdev);
8590         pci_release_regions(pdev);
8591 err_disable_device:
8592         pci_disable_device(pdev);
8593
8594         return ret;
8595 }
8596
8597 static void hclge_pci_uninit(struct hclge_dev *hdev)
8598 {
8599         struct pci_dev *pdev = hdev->pdev;
8600
8601         pcim_iounmap(pdev, hdev->hw.io_base);
8602         pci_free_irq_vectors(pdev);
8603         pci_clear_master(pdev);
8604         pci_release_mem_regions(pdev);
8605         pci_disable_device(pdev);
8606 }
8607
8608 static void hclge_state_init(struct hclge_dev *hdev)
8609 {
8610         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
8611         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8612         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
8613         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8614         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
8615         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
8616 }
8617
8618 static void hclge_state_uninit(struct hclge_dev *hdev)
8619 {
8620         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8621         set_bit(HCLGE_STATE_REMOVING, &hdev->state);
8622
8623         if (hdev->reset_timer.function)
8624                 del_timer_sync(&hdev->reset_timer);
8625         if (hdev->service_task.work.func)
8626                 cancel_delayed_work_sync(&hdev->service_task);
8627         if (hdev->rst_service_task.func)
8628                 cancel_work_sync(&hdev->rst_service_task);
8629         if (hdev->mbx_service_task.func)
8630                 cancel_work_sync(&hdev->mbx_service_task);
8631 }
8632
8633 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
8634 {
8635 #define HCLGE_FLR_WAIT_MS       100
8636 #define HCLGE_FLR_WAIT_CNT      50
8637         struct hclge_dev *hdev = ae_dev->priv;
8638         int cnt = 0;
8639
8640         clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8641         clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8642         set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8643         hclge_reset_event(hdev->pdev, NULL);
8644
8645         while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8646                cnt++ < HCLGE_FLR_WAIT_CNT)
8647                 msleep(HCLGE_FLR_WAIT_MS);
8648
8649         if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8650                 dev_err(&hdev->pdev->dev,
8651                         "flr wait down timeout: %d\n", cnt);
8652 }
8653
8654 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8655 {
8656         struct hclge_dev *hdev = ae_dev->priv;
8657
8658         set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8659 }
8660
8661 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
8662 {
8663         u16 i;
8664
8665         for (i = 0; i < hdev->num_alloc_vport; i++) {
8666                 struct hclge_vport *vport = &hdev->vport[i];
8667                 int ret;
8668
8669                  /* Send cmd to clear VF's FUNC_RST_ING */
8670                 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
8671                 if (ret)
8672                         dev_warn(&hdev->pdev->dev,
8673                                  "clear vf(%d) rst failed %d!\n",
8674                                  vport->vport_id, ret);
8675         }
8676 }
8677
8678 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8679 {
8680         struct pci_dev *pdev = ae_dev->pdev;
8681         struct hclge_dev *hdev;
8682         int ret;
8683
8684         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8685         if (!hdev) {
8686                 ret = -ENOMEM;
8687                 goto out;
8688         }
8689
8690         hdev->pdev = pdev;
8691         hdev->ae_dev = ae_dev;
8692         hdev->reset_type = HNAE3_NONE_RESET;
8693         hdev->reset_level = HNAE3_FUNC_RESET;
8694         ae_dev->priv = hdev;
8695         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8696
8697         mutex_init(&hdev->vport_lock);
8698         mutex_init(&hdev->vport_cfg_mutex);
8699         spin_lock_init(&hdev->fd_rule_lock);
8700
8701         ret = hclge_pci_init(hdev);
8702         if (ret) {
8703                 dev_err(&pdev->dev, "PCI init failed\n");
8704                 goto out;
8705         }
8706
8707         /* Firmware command queue initialize */
8708         ret = hclge_cmd_queue_init(hdev);
8709         if (ret) {
8710                 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8711                 goto err_pci_uninit;
8712         }
8713
8714         /* Firmware command initialize */
8715         ret = hclge_cmd_init(hdev);
8716         if (ret)
8717                 goto err_cmd_uninit;
8718
8719         ret = hclge_get_cap(hdev);
8720         if (ret) {
8721                 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8722                         ret);
8723                 goto err_cmd_uninit;
8724         }
8725
8726         ret = hclge_configure(hdev);
8727         if (ret) {
8728                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8729                 goto err_cmd_uninit;
8730         }
8731
8732         ret = hclge_init_msi(hdev);
8733         if (ret) {
8734                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8735                 goto err_cmd_uninit;
8736         }
8737
8738         ret = hclge_misc_irq_init(hdev);
8739         if (ret) {
8740                 dev_err(&pdev->dev,
8741                         "Misc IRQ(vector0) init error, ret = %d.\n",
8742                         ret);
8743                 goto err_msi_uninit;
8744         }
8745
8746         ret = hclge_alloc_tqps(hdev);
8747         if (ret) {
8748                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8749                 goto err_msi_irq_uninit;
8750         }
8751
8752         ret = hclge_alloc_vport(hdev);
8753         if (ret) {
8754                 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8755                 goto err_msi_irq_uninit;
8756         }
8757
8758         ret = hclge_map_tqp(hdev);
8759         if (ret) {
8760                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8761                 goto err_msi_irq_uninit;
8762         }
8763
8764         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8765                 ret = hclge_mac_mdio_config(hdev);
8766                 if (ret) {
8767                         dev_err(&hdev->pdev->dev,
8768                                 "mdio config fail ret=%d\n", ret);
8769                         goto err_msi_irq_uninit;
8770                 }
8771         }
8772
8773         ret = hclge_init_umv_space(hdev);
8774         if (ret) {
8775                 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8776                 goto err_mdiobus_unreg;
8777         }
8778
8779         ret = hclge_mac_init(hdev);
8780         if (ret) {
8781                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8782                 goto err_mdiobus_unreg;
8783         }
8784
8785         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8786         if (ret) {
8787                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8788                 goto err_mdiobus_unreg;
8789         }
8790
8791         ret = hclge_config_gro(hdev, true);
8792         if (ret)
8793                 goto err_mdiobus_unreg;
8794
8795         ret = hclge_init_vlan_config(hdev);
8796         if (ret) {
8797                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8798                 goto err_mdiobus_unreg;
8799         }
8800
8801         ret = hclge_tm_schd_init(hdev);
8802         if (ret) {
8803                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8804                 goto err_mdiobus_unreg;
8805         }
8806
8807         hclge_rss_init_cfg(hdev);
8808         ret = hclge_rss_init_hw(hdev);
8809         if (ret) {
8810                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8811                 goto err_mdiobus_unreg;
8812         }
8813
8814         ret = init_mgr_tbl(hdev);
8815         if (ret) {
8816                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8817                 goto err_mdiobus_unreg;
8818         }
8819
8820         ret = hclge_init_fd_config(hdev);
8821         if (ret) {
8822                 dev_err(&pdev->dev,
8823                         "fd table init fail, ret=%d\n", ret);
8824                 goto err_mdiobus_unreg;
8825         }
8826
8827         INIT_KFIFO(hdev->mac_tnl_log);
8828
8829         hclge_dcb_ops_set(hdev);
8830
8831         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8832         INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
8833         INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8834         INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8835
8836         /* Setup affinity after service timer setup because add_timer_on
8837          * is called in affinity notify.
8838          */
8839         hclge_misc_affinity_setup(hdev);
8840
8841         hclge_clear_all_event_cause(hdev);
8842         hclge_clear_resetting_state(hdev);
8843
8844         /* Log and clear the hw errors those already occurred */
8845         hclge_handle_all_hns_hw_errors(ae_dev);
8846
8847         /* request delayed reset for the error recovery because an immediate
8848          * global reset on a PF affecting pending initialization of other PFs
8849          */
8850         if (ae_dev->hw_err_reset_req) {
8851                 enum hnae3_reset_type reset_level;
8852
8853                 reset_level = hclge_get_reset_level(ae_dev,
8854                                                     &ae_dev->hw_err_reset_req);
8855                 hclge_set_def_reset_request(ae_dev, reset_level);
8856                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
8857         }
8858
8859         /* Enable MISC vector(vector0) */
8860         hclge_enable_vector(&hdev->misc_vector, true);
8861
8862         hclge_state_init(hdev);
8863         hdev->last_reset_time = jiffies;
8864
8865         pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
8866         return 0;
8867
8868 err_mdiobus_unreg:
8869         if (hdev->hw.mac.phydev)
8870                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
8871 err_msi_irq_uninit:
8872         hclge_misc_irq_uninit(hdev);
8873 err_msi_uninit:
8874         pci_free_irq_vectors(pdev);
8875 err_cmd_uninit:
8876         hclge_cmd_uninit(hdev);
8877 err_pci_uninit:
8878         pcim_iounmap(pdev, hdev->hw.io_base);
8879         pci_clear_master(pdev);
8880         pci_release_regions(pdev);
8881         pci_disable_device(pdev);
8882 out:
8883         return ret;
8884 }
8885
8886 static void hclge_stats_clear(struct hclge_dev *hdev)
8887 {
8888         memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
8889 }
8890
8891 static void hclge_reset_vport_state(struct hclge_dev *hdev)
8892 {
8893         struct hclge_vport *vport = hdev->vport;
8894         int i;
8895
8896         for (i = 0; i < hdev->num_alloc_vport; i++) {
8897                 hclge_vport_stop(vport);
8898                 vport++;
8899         }
8900 }
8901
8902 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
8903 {
8904         struct hclge_dev *hdev = ae_dev->priv;
8905         struct pci_dev *pdev = ae_dev->pdev;
8906         int ret;
8907
8908         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8909
8910         hclge_stats_clear(hdev);
8911         memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8912         memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
8913
8914         ret = hclge_cmd_init(hdev);
8915         if (ret) {
8916                 dev_err(&pdev->dev, "Cmd queue init failed\n");
8917                 return ret;
8918         }
8919
8920         ret = hclge_map_tqp(hdev);
8921         if (ret) {
8922                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8923                 return ret;
8924         }
8925
8926         hclge_reset_umv_space(hdev);
8927
8928         ret = hclge_mac_init(hdev);
8929         if (ret) {
8930                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8931                 return ret;
8932         }
8933
8934         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8935         if (ret) {
8936                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8937                 return ret;
8938         }
8939
8940         ret = hclge_config_gro(hdev, true);
8941         if (ret)
8942                 return ret;
8943
8944         ret = hclge_init_vlan_config(hdev);
8945         if (ret) {
8946                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8947                 return ret;
8948         }
8949
8950         ret = hclge_tm_init_hw(hdev, true);
8951         if (ret) {
8952                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8953                 return ret;
8954         }
8955
8956         ret = hclge_rss_init_hw(hdev);
8957         if (ret) {
8958                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8959                 return ret;
8960         }
8961
8962         ret = hclge_init_fd_config(hdev);
8963         if (ret) {
8964                 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
8965                 return ret;
8966         }
8967
8968         /* Re-enable the hw error interrupts because
8969          * the interrupts get disabled on global reset.
8970          */
8971         ret = hclge_config_nic_hw_error(hdev, true);
8972         if (ret) {
8973                 dev_err(&pdev->dev,
8974                         "fail(%d) to re-enable NIC hw error interrupts\n",
8975                         ret);
8976                 return ret;
8977         }
8978
8979         if (hdev->roce_client) {
8980                 ret = hclge_config_rocee_ras_interrupt(hdev, true);
8981                 if (ret) {
8982                         dev_err(&pdev->dev,
8983                                 "fail(%d) to re-enable roce ras interrupts\n",
8984                                 ret);
8985                         return ret;
8986                 }
8987         }
8988
8989         hclge_reset_vport_state(hdev);
8990
8991         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8992                  HCLGE_DRIVER_NAME);
8993
8994         return 0;
8995 }
8996
8997 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8998 {
8999         struct hclge_dev *hdev = ae_dev->priv;
9000         struct hclge_mac *mac = &hdev->hw.mac;
9001
9002         hclge_misc_affinity_teardown(hdev);
9003         hclge_state_uninit(hdev);
9004
9005         if (mac->phydev)
9006                 mdiobus_unregister(mac->mdio_bus);
9007
9008         hclge_uninit_umv_space(hdev);
9009
9010         /* Disable MISC vector(vector0) */
9011         hclge_enable_vector(&hdev->misc_vector, false);
9012         synchronize_irq(hdev->misc_vector.vector_irq);
9013
9014         /* Disable all hw interrupts */
9015         hclge_config_mac_tnl_int(hdev, false);
9016         hclge_config_nic_hw_error(hdev, false);
9017         hclge_config_rocee_ras_interrupt(hdev, false);
9018
9019         hclge_cmd_uninit(hdev);
9020         hclge_misc_irq_uninit(hdev);
9021         hclge_pci_uninit(hdev);
9022         mutex_destroy(&hdev->vport_lock);
9023         hclge_uninit_vport_mac_table(hdev);
9024         hclge_uninit_vport_vlan_table(hdev);
9025         mutex_destroy(&hdev->vport_cfg_mutex);
9026         ae_dev->priv = NULL;
9027 }
9028
9029 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9030 {
9031         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9032         struct hclge_vport *vport = hclge_get_vport(handle);
9033         struct hclge_dev *hdev = vport->back;
9034
9035         return min_t(u32, hdev->rss_size_max,
9036                      vport->alloc_tqps / kinfo->num_tc);
9037 }
9038
9039 static void hclge_get_channels(struct hnae3_handle *handle,
9040                                struct ethtool_channels *ch)
9041 {
9042         ch->max_combined = hclge_get_max_channels(handle);
9043         ch->other_count = 1;
9044         ch->max_other = 1;
9045         ch->combined_count = handle->kinfo.rss_size;
9046 }
9047
9048 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9049                                         u16 *alloc_tqps, u16 *max_rss_size)
9050 {
9051         struct hclge_vport *vport = hclge_get_vport(handle);
9052         struct hclge_dev *hdev = vport->back;
9053
9054         *alloc_tqps = vport->alloc_tqps;
9055         *max_rss_size = hdev->rss_size_max;
9056 }
9057
9058 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9059                               bool rxfh_configured)
9060 {
9061         struct hclge_vport *vport = hclge_get_vport(handle);
9062         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9063         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
9064         struct hclge_dev *hdev = vport->back;
9065         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
9066         int cur_rss_size = kinfo->rss_size;
9067         int cur_tqps = kinfo->num_tqps;
9068         u16 tc_valid[HCLGE_MAX_TC_NUM];
9069         u16 roundup_size;
9070         u32 *rss_indir;
9071         unsigned int i;
9072         int ret;
9073
9074         kinfo->req_rss_size = new_tqps_num;
9075
9076         ret = hclge_tm_vport_map_update(hdev);
9077         if (ret) {
9078                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
9079                 return ret;
9080         }
9081
9082         roundup_size = roundup_pow_of_two(kinfo->rss_size);
9083         roundup_size = ilog2(roundup_size);
9084         /* Set the RSS TC mode according to the new RSS size */
9085         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
9086                 tc_valid[i] = 0;
9087
9088                 if (!(hdev->hw_tc_map & BIT(i)))
9089                         continue;
9090
9091                 tc_valid[i] = 1;
9092                 tc_size[i] = roundup_size;
9093                 tc_offset[i] = kinfo->rss_size * i;
9094         }
9095         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
9096         if (ret)
9097                 return ret;
9098
9099         /* RSS indirection table has been configuared by user */
9100         if (rxfh_configured)
9101                 goto out;
9102
9103         /* Reinitializes the rss indirect table according to the new RSS size */
9104         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
9105         if (!rss_indir)
9106                 return -ENOMEM;
9107
9108         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
9109                 rss_indir[i] = i % kinfo->rss_size;
9110
9111         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
9112         if (ret)
9113                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
9114                         ret);
9115
9116         kfree(rss_indir);
9117
9118 out:
9119         if (!ret)
9120                 dev_info(&hdev->pdev->dev,
9121                          "Channels changed, rss_size from %d to %d, tqps from %d to %d",
9122                          cur_rss_size, kinfo->rss_size,
9123                          cur_tqps, kinfo->rss_size * kinfo->num_tc);
9124
9125         return ret;
9126 }
9127
9128 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
9129                               u32 *regs_num_64_bit)
9130 {
9131         struct hclge_desc desc;
9132         u32 total_num;
9133         int ret;
9134
9135         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
9136         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9137         if (ret) {
9138                 dev_err(&hdev->pdev->dev,
9139                         "Query register number cmd failed, ret = %d.\n", ret);
9140                 return ret;
9141         }
9142
9143         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
9144         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
9145
9146         total_num = *regs_num_32_bit + *regs_num_64_bit;
9147         if (!total_num)
9148                 return -EINVAL;
9149
9150         return 0;
9151 }
9152
9153 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9154                                  void *data)
9155 {
9156 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
9157 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
9158
9159         struct hclge_desc *desc;
9160         u32 *reg_val = data;
9161         __le32 *desc_data;
9162         int nodata_num;
9163         int cmd_num;
9164         int i, k, n;
9165         int ret;
9166
9167         if (regs_num == 0)
9168                 return 0;
9169
9170         nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
9171         cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
9172                                HCLGE_32_BIT_REG_RTN_DATANUM);
9173         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9174         if (!desc)
9175                 return -ENOMEM;
9176
9177         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
9178         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9179         if (ret) {
9180                 dev_err(&hdev->pdev->dev,
9181                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
9182                 kfree(desc);
9183                 return ret;
9184         }
9185
9186         for (i = 0; i < cmd_num; i++) {
9187                 if (i == 0) {
9188                         desc_data = (__le32 *)(&desc[i].data[0]);
9189                         n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
9190                 } else {
9191                         desc_data = (__le32 *)(&desc[i]);
9192                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
9193                 }
9194                 for (k = 0; k < n; k++) {
9195                         *reg_val++ = le32_to_cpu(*desc_data++);
9196
9197                         regs_num--;
9198                         if (!regs_num)
9199                                 break;
9200                 }
9201         }
9202
9203         kfree(desc);
9204         return 0;
9205 }
9206
9207 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
9208                                  void *data)
9209 {
9210 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
9211 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
9212
9213         struct hclge_desc *desc;
9214         u64 *reg_val = data;
9215         __le64 *desc_data;
9216         int nodata_len;
9217         int cmd_num;
9218         int i, k, n;
9219         int ret;
9220
9221         if (regs_num == 0)
9222                 return 0;
9223
9224         nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
9225         cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
9226                                HCLGE_64_BIT_REG_RTN_DATANUM);
9227         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
9228         if (!desc)
9229                 return -ENOMEM;
9230
9231         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
9232         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
9233         if (ret) {
9234                 dev_err(&hdev->pdev->dev,
9235                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
9236                 kfree(desc);
9237                 return ret;
9238         }
9239
9240         for (i = 0; i < cmd_num; i++) {
9241                 if (i == 0) {
9242                         desc_data = (__le64 *)(&desc[i].data[0]);
9243                         n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
9244                 } else {
9245                         desc_data = (__le64 *)(&desc[i]);
9246                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
9247                 }
9248                 for (k = 0; k < n; k++) {
9249                         *reg_val++ = le64_to_cpu(*desc_data++);
9250
9251                         regs_num--;
9252                         if (!regs_num)
9253                                 break;
9254                 }
9255         }
9256
9257         kfree(desc);
9258         return 0;
9259 }
9260
9261 #define MAX_SEPARATE_NUM        4
9262 #define SEPARATOR_VALUE         0xFFFFFFFF
9263 #define REG_NUM_PER_LINE        4
9264 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
9265
9266 static int hclge_get_regs_len(struct hnae3_handle *handle)
9267 {
9268         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
9269         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9270         struct hclge_vport *vport = hclge_get_vport(handle);
9271         struct hclge_dev *hdev = vport->back;
9272         u32 regs_num_32_bit, regs_num_64_bit;
9273         int ret;
9274
9275         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
9276         if (ret) {
9277                 dev_err(&hdev->pdev->dev,
9278                         "Get register number failed, ret = %d.\n", ret);
9279                 return -EOPNOTSUPP;
9280         }
9281
9282         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
9283         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
9284         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
9285         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
9286
9287         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
9288                 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
9289                 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
9290 }
9291
9292 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
9293                            void *data)
9294 {
9295         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9296         struct hclge_vport *vport = hclge_get_vport(handle);
9297         struct hclge_dev *hdev = vport->back;
9298         u32 regs_num_32_bit, regs_num_64_bit;
9299         int i, j, reg_um, separator_num;
9300         u32 *reg = data;
9301         int ret;
9302
9303         *version = hdev->fw_version;
9304
9305         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
9306         if (ret) {
9307                 dev_err(&hdev->pdev->dev,
9308                         "Get register number failed, ret = %d.\n", ret);
9309                 return;
9310         }
9311
9312         /* fetching per-PF registers valus from PF PCIe register space */
9313         reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
9314         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9315         for (i = 0; i < reg_um; i++)
9316                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
9317         for (i = 0; i < separator_num; i++)
9318                 *reg++ = SEPARATOR_VALUE;
9319
9320         reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
9321         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9322         for (i = 0; i < reg_um; i++)
9323                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
9324         for (i = 0; i < separator_num; i++)
9325                 *reg++ = SEPARATOR_VALUE;
9326
9327         reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
9328         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9329         for (j = 0; j < kinfo->num_tqps; j++) {
9330                 for (i = 0; i < reg_um; i++)
9331                         *reg++ = hclge_read_dev(&hdev->hw,
9332                                                 ring_reg_addr_list[i] +
9333                                                 0x200 * j);
9334                 for (i = 0; i < separator_num; i++)
9335                         *reg++ = SEPARATOR_VALUE;
9336         }
9337
9338         reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
9339         separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
9340         for (j = 0; j < hdev->num_msi_used - 1; j++) {
9341                 for (i = 0; i < reg_um; i++)
9342                         *reg++ = hclge_read_dev(&hdev->hw,
9343                                                 tqp_intr_reg_addr_list[i] +
9344                                                 4 * j);
9345                 for (i = 0; i < separator_num; i++)
9346                         *reg++ = SEPARATOR_VALUE;
9347         }
9348
9349         /* fetching PF common registers values from firmware */
9350         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
9351         if (ret) {
9352                 dev_err(&hdev->pdev->dev,
9353                         "Get 32 bit register failed, ret = %d.\n", ret);
9354                 return;
9355         }
9356
9357         reg += regs_num_32_bit;
9358         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
9359         if (ret)
9360                 dev_err(&hdev->pdev->dev,
9361                         "Get 64 bit register failed, ret = %d.\n", ret);
9362 }
9363
9364 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
9365 {
9366         struct hclge_set_led_state_cmd *req;
9367         struct hclge_desc desc;
9368         int ret;
9369
9370         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
9371
9372         req = (struct hclge_set_led_state_cmd *)desc.data;
9373         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
9374                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
9375
9376         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9377         if (ret)
9378                 dev_err(&hdev->pdev->dev,
9379                         "Send set led state cmd error, ret =%d\n", ret);
9380
9381         return ret;
9382 }
9383
9384 enum hclge_led_status {
9385         HCLGE_LED_OFF,
9386         HCLGE_LED_ON,
9387         HCLGE_LED_NO_CHANGE = 0xFF,
9388 };
9389
9390 static int hclge_set_led_id(struct hnae3_handle *handle,
9391                             enum ethtool_phys_id_state status)
9392 {
9393         struct hclge_vport *vport = hclge_get_vport(handle);
9394         struct hclge_dev *hdev = vport->back;
9395
9396         switch (status) {
9397         case ETHTOOL_ID_ACTIVE:
9398                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
9399         case ETHTOOL_ID_INACTIVE:
9400                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
9401         default:
9402                 return -EINVAL;
9403         }
9404 }
9405
9406 static void hclge_get_link_mode(struct hnae3_handle *handle,
9407                                 unsigned long *supported,
9408                                 unsigned long *advertising)
9409 {
9410         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
9411         struct hclge_vport *vport = hclge_get_vport(handle);
9412         struct hclge_dev *hdev = vport->back;
9413         unsigned int idx = 0;
9414
9415         for (; idx < size; idx++) {
9416                 supported[idx] = hdev->hw.mac.supported[idx];
9417                 advertising[idx] = hdev->hw.mac.advertising[idx];
9418         }
9419 }
9420
9421 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
9422 {
9423         struct hclge_vport *vport = hclge_get_vport(handle);
9424         struct hclge_dev *hdev = vport->back;
9425
9426         return hclge_config_gro(hdev, enable);
9427 }
9428
9429 static const struct hnae3_ae_ops hclge_ops = {
9430         .init_ae_dev = hclge_init_ae_dev,
9431         .uninit_ae_dev = hclge_uninit_ae_dev,
9432         .flr_prepare = hclge_flr_prepare,
9433         .flr_done = hclge_flr_done,
9434         .init_client_instance = hclge_init_client_instance,
9435         .uninit_client_instance = hclge_uninit_client_instance,
9436         .map_ring_to_vector = hclge_map_ring_to_vector,
9437         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
9438         .get_vector = hclge_get_vector,
9439         .put_vector = hclge_put_vector,
9440         .set_promisc_mode = hclge_set_promisc_mode,
9441         .set_loopback = hclge_set_loopback,
9442         .start = hclge_ae_start,
9443         .stop = hclge_ae_stop,
9444         .client_start = hclge_client_start,
9445         .client_stop = hclge_client_stop,
9446         .get_status = hclge_get_status,
9447         .get_ksettings_an_result = hclge_get_ksettings_an_result,
9448         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
9449         .get_media_type = hclge_get_media_type,
9450         .check_port_speed = hclge_check_port_speed,
9451         .get_fec = hclge_get_fec,
9452         .set_fec = hclge_set_fec,
9453         .get_rss_key_size = hclge_get_rss_key_size,
9454         .get_rss_indir_size = hclge_get_rss_indir_size,
9455         .get_rss = hclge_get_rss,
9456         .set_rss = hclge_set_rss,
9457         .set_rss_tuple = hclge_set_rss_tuple,
9458         .get_rss_tuple = hclge_get_rss_tuple,
9459         .get_tc_size = hclge_get_tc_size,
9460         .get_mac_addr = hclge_get_mac_addr,
9461         .set_mac_addr = hclge_set_mac_addr,
9462         .do_ioctl = hclge_do_ioctl,
9463         .add_uc_addr = hclge_add_uc_addr,
9464         .rm_uc_addr = hclge_rm_uc_addr,
9465         .add_mc_addr = hclge_add_mc_addr,
9466         .rm_mc_addr = hclge_rm_mc_addr,
9467         .set_autoneg = hclge_set_autoneg,
9468         .get_autoneg = hclge_get_autoneg,
9469         .restart_autoneg = hclge_restart_autoneg,
9470         .halt_autoneg = hclge_halt_autoneg,
9471         .get_pauseparam = hclge_get_pauseparam,
9472         .set_pauseparam = hclge_set_pauseparam,
9473         .set_mtu = hclge_set_mtu,
9474         .reset_queue = hclge_reset_tqp,
9475         .get_stats = hclge_get_stats,
9476         .get_mac_pause_stats = hclge_get_mac_pause_stat,
9477         .update_stats = hclge_update_stats,
9478         .get_strings = hclge_get_strings,
9479         .get_sset_count = hclge_get_sset_count,
9480         .get_fw_version = hclge_get_fw_version,
9481         .get_mdix_mode = hclge_get_mdix_mode,
9482         .enable_vlan_filter = hclge_enable_vlan_filter,
9483         .set_vlan_filter = hclge_set_vlan_filter,
9484         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
9485         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
9486         .reset_event = hclge_reset_event,
9487         .get_reset_level = hclge_get_reset_level,
9488         .set_default_reset_request = hclge_set_def_reset_request,
9489         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
9490         .set_channels = hclge_set_channels,
9491         .get_channels = hclge_get_channels,
9492         .get_regs_len = hclge_get_regs_len,
9493         .get_regs = hclge_get_regs,
9494         .set_led_id = hclge_set_led_id,
9495         .get_link_mode = hclge_get_link_mode,
9496         .add_fd_entry = hclge_add_fd_entry,
9497         .del_fd_entry = hclge_del_fd_entry,
9498         .del_all_fd_entries = hclge_del_all_fd_entries,
9499         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
9500         .get_fd_rule_info = hclge_get_fd_rule_info,
9501         .get_fd_all_rules = hclge_get_all_rules,
9502         .restore_fd_rules = hclge_restore_fd_entries,
9503         .enable_fd = hclge_enable_fd,
9504         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
9505         .dbg_run_cmd = hclge_dbg_run_cmd,
9506         .handle_hw_ras_error = hclge_handle_hw_ras_error,
9507         .get_hw_reset_stat = hclge_get_hw_reset_stat,
9508         .ae_dev_resetting = hclge_ae_dev_resetting,
9509         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
9510         .set_gro_en = hclge_gro_en,
9511         .get_global_queue_id = hclge_covert_handle_qid_global,
9512         .set_timer_task = hclge_set_timer_task,
9513         .mac_connect_phy = hclge_mac_connect_phy,
9514         .mac_disconnect_phy = hclge_mac_disconnect_phy,
9515         .restore_vlan_table = hclge_restore_vlan_table,
9516 };
9517
9518 static struct hnae3_ae_algo ae_algo = {
9519         .ops = &hclge_ops,
9520         .pdev_id_table = ae_algo_pci_tbl,
9521 };
9522
9523 static int hclge_init(void)
9524 {
9525         pr_info("%s is initializing\n", HCLGE_NAME);
9526
9527         hnae3_register_ae_algo(&ae_algo);
9528
9529         return 0;
9530 }
9531
9532 static void hclge_exit(void)
9533 {
9534         hnae3_unregister_ae_algo(&ae_algo);
9535 }
9536 module_init(hclge_init);
9537 module_exit(hclge_exit);
9538
9539 MODULE_LICENSE("GPL");
9540 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
9541 MODULE_DESCRIPTION("HCLGE Driver");
9542 MODULE_VERSION(HCLGE_MOD_VERSION);