net: hns3: allocate WQ with WQ_MEM_RECLAIM flag
[platform/kernel/linux-rpi.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
22 #include "hclge_tm.h"
23 #include "hclge_err.h"
24 #include "hnae3.h"
25
26 #define HCLGE_NAME                      "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
29
30 #define HCLGE_BUF_SIZE_UNIT     256U
31 #define HCLGE_BUF_MUL_BY        2
32 #define HCLGE_BUF_DIV_BY        2
33 #define NEED_RESERVE_TC_NUM     2
34 #define BUF_MAX_PERCENT         100
35 #define BUF_RESERVE_PERCENT     90
36
37 #define HCLGE_RESET_MAX_FAIL_CNT        5
38 #define HCLGE_RESET_SYNC_TIME           100
39 #define HCLGE_PF_RESET_SYNC_TIME        20
40 #define HCLGE_PF_RESET_SYNC_CNT         1500
41
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET        1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
46 #define HCLGE_DFX_IGU_BD_OFFSET         4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
49 #define HCLGE_DFX_NCSI_BD_OFFSET        7
50 #define HCLGE_DFX_RTC_BD_OFFSET         8
51 #define HCLGE_DFX_PPP_BD_OFFSET         9
52 #define HCLGE_DFX_RCB_BD_OFFSET         10
53 #define HCLGE_DFX_TQP_BD_OFFSET         11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
55
56 #define HCLGE_LINK_STATUS_MS    10
57
58 #define HCLGE_VF_VPORT_START_NUM        1
59
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
66                                u16 *allocated_size, bool is_alloc);
67 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
68 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
69 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
70                                                    unsigned long *addr);
71 static int hclge_set_default_loopback(struct hclge_dev *hdev);
72
73 static struct hnae3_ae_algo ae_algo;
74
75 static struct workqueue_struct *hclge_wq;
76
77 static const struct pci_device_id ae_algo_pci_tbl[] = {
78         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
79         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
80         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
81         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
82         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
83         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
84         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
85         /* required last entry */
86         {0, }
87 };
88
89 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
90
91 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
92                                          HCLGE_CMDQ_TX_ADDR_H_REG,
93                                          HCLGE_CMDQ_TX_DEPTH_REG,
94                                          HCLGE_CMDQ_TX_TAIL_REG,
95                                          HCLGE_CMDQ_TX_HEAD_REG,
96                                          HCLGE_CMDQ_RX_ADDR_L_REG,
97                                          HCLGE_CMDQ_RX_ADDR_H_REG,
98                                          HCLGE_CMDQ_RX_DEPTH_REG,
99                                          HCLGE_CMDQ_RX_TAIL_REG,
100                                          HCLGE_CMDQ_RX_HEAD_REG,
101                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
102                                          HCLGE_CMDQ_INTR_STS_REG,
103                                          HCLGE_CMDQ_INTR_EN_REG,
104                                          HCLGE_CMDQ_INTR_GEN_REG};
105
106 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
107                                            HCLGE_VECTOR0_OTER_EN_REG,
108                                            HCLGE_MISC_RESET_STS_REG,
109                                            HCLGE_MISC_VECTOR_INT_STS,
110                                            HCLGE_GLOBAL_RESET_REG,
111                                            HCLGE_FUN_RST_ING,
112                                            HCLGE_GRO_EN_REG};
113
114 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
115                                          HCLGE_RING_RX_ADDR_H_REG,
116                                          HCLGE_RING_RX_BD_NUM_REG,
117                                          HCLGE_RING_RX_BD_LENGTH_REG,
118                                          HCLGE_RING_RX_MERGE_EN_REG,
119                                          HCLGE_RING_RX_TAIL_REG,
120                                          HCLGE_RING_RX_HEAD_REG,
121                                          HCLGE_RING_RX_FBD_NUM_REG,
122                                          HCLGE_RING_RX_OFFSET_REG,
123                                          HCLGE_RING_RX_FBD_OFFSET_REG,
124                                          HCLGE_RING_RX_STASH_REG,
125                                          HCLGE_RING_RX_BD_ERR_REG,
126                                          HCLGE_RING_TX_ADDR_L_REG,
127                                          HCLGE_RING_TX_ADDR_H_REG,
128                                          HCLGE_RING_TX_BD_NUM_REG,
129                                          HCLGE_RING_TX_PRIORITY_REG,
130                                          HCLGE_RING_TX_TC_REG,
131                                          HCLGE_RING_TX_MERGE_EN_REG,
132                                          HCLGE_RING_TX_TAIL_REG,
133                                          HCLGE_RING_TX_HEAD_REG,
134                                          HCLGE_RING_TX_FBD_NUM_REG,
135                                          HCLGE_RING_TX_OFFSET_REG,
136                                          HCLGE_RING_TX_EBD_NUM_REG,
137                                          HCLGE_RING_TX_EBD_OFFSET_REG,
138                                          HCLGE_RING_TX_BD_ERR_REG,
139                                          HCLGE_RING_EN_REG};
140
141 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
142                                              HCLGE_TQP_INTR_GL0_REG,
143                                              HCLGE_TQP_INTR_GL1_REG,
144                                              HCLGE_TQP_INTR_GL2_REG,
145                                              HCLGE_TQP_INTR_RL_REG};
146
147 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
148         "App    Loopback test",
149         "Serdes serial Loopback test",
150         "Serdes parallel Loopback test",
151         "Phy    Loopback test"
152 };
153
154 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
155         {"mac_tx_mac_pause_num",
156                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
157         {"mac_rx_mac_pause_num",
158                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
159         {"mac_tx_control_pkt_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
161         {"mac_rx_control_pkt_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
163         {"mac_tx_pfc_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
165         {"mac_tx_pfc_pri0_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
167         {"mac_tx_pfc_pri1_pkt_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
169         {"mac_tx_pfc_pri2_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
171         {"mac_tx_pfc_pri3_pkt_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
173         {"mac_tx_pfc_pri4_pkt_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
175         {"mac_tx_pfc_pri5_pkt_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
177         {"mac_tx_pfc_pri6_pkt_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
179         {"mac_tx_pfc_pri7_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
181         {"mac_rx_pfc_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
183         {"mac_rx_pfc_pri0_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
185         {"mac_rx_pfc_pri1_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
187         {"mac_rx_pfc_pri2_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
189         {"mac_rx_pfc_pri3_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
191         {"mac_rx_pfc_pri4_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
193         {"mac_rx_pfc_pri5_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
195         {"mac_rx_pfc_pri6_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
197         {"mac_rx_pfc_pri7_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
199         {"mac_tx_total_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
201         {"mac_tx_total_oct_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
203         {"mac_tx_good_pkt_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
205         {"mac_tx_bad_pkt_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
207         {"mac_tx_good_oct_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
209         {"mac_tx_bad_oct_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
211         {"mac_tx_uni_pkt_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
213         {"mac_tx_multi_pkt_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
215         {"mac_tx_broad_pkt_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
217         {"mac_tx_undersize_pkt_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
219         {"mac_tx_oversize_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
221         {"mac_tx_64_oct_pkt_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
223         {"mac_tx_65_127_oct_pkt_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
225         {"mac_tx_128_255_oct_pkt_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
227         {"mac_tx_256_511_oct_pkt_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
229         {"mac_tx_512_1023_oct_pkt_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
231         {"mac_tx_1024_1518_oct_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
233         {"mac_tx_1519_2047_oct_pkt_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
235         {"mac_tx_2048_4095_oct_pkt_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
237         {"mac_tx_4096_8191_oct_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
239         {"mac_tx_8192_9216_oct_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
241         {"mac_tx_9217_12287_oct_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
243         {"mac_tx_12288_16383_oct_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
245         {"mac_tx_1519_max_good_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
247         {"mac_tx_1519_max_bad_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
249         {"mac_rx_total_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
251         {"mac_rx_total_oct_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
253         {"mac_rx_good_pkt_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
255         {"mac_rx_bad_pkt_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
257         {"mac_rx_good_oct_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
259         {"mac_rx_bad_oct_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
261         {"mac_rx_uni_pkt_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
263         {"mac_rx_multi_pkt_num",
264                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
265         {"mac_rx_broad_pkt_num",
266                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
267         {"mac_rx_undersize_pkt_num",
268                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
269         {"mac_rx_oversize_pkt_num",
270                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
271         {"mac_rx_64_oct_pkt_num",
272                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
273         {"mac_rx_65_127_oct_pkt_num",
274                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
275         {"mac_rx_128_255_oct_pkt_num",
276                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
277         {"mac_rx_256_511_oct_pkt_num",
278                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
279         {"mac_rx_512_1023_oct_pkt_num",
280                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
281         {"mac_rx_1024_1518_oct_pkt_num",
282                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
283         {"mac_rx_1519_2047_oct_pkt_num",
284                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
285         {"mac_rx_2048_4095_oct_pkt_num",
286                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
287         {"mac_rx_4096_8191_oct_pkt_num",
288                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
289         {"mac_rx_8192_9216_oct_pkt_num",
290                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
291         {"mac_rx_9217_12287_oct_pkt_num",
292                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
293         {"mac_rx_12288_16383_oct_pkt_num",
294                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
295         {"mac_rx_1519_max_good_pkt_num",
296                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
297         {"mac_rx_1519_max_bad_pkt_num",
298                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
299
300         {"mac_tx_fragment_pkt_num",
301                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
302         {"mac_tx_undermin_pkt_num",
303                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
304         {"mac_tx_jabber_pkt_num",
305                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
306         {"mac_tx_err_all_pkt_num",
307                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
308         {"mac_tx_from_app_good_pkt_num",
309                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
310         {"mac_tx_from_app_bad_pkt_num",
311                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
312         {"mac_rx_fragment_pkt_num",
313                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
314         {"mac_rx_undermin_pkt_num",
315                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
316         {"mac_rx_jabber_pkt_num",
317                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
318         {"mac_rx_fcs_err_pkt_num",
319                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
320         {"mac_rx_send_app_good_pkt_num",
321                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
322         {"mac_rx_send_app_bad_pkt_num",
323                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
324 };
325
326 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
327         {
328                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
329                 .ethter_type = cpu_to_le16(ETH_P_LLDP),
330                 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
331                 .i_port_bitmap = 0x1,
332         },
333 };
334
335 static const u8 hclge_hash_key[] = {
336         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
337         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
338         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
339         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
340         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
341 };
342
343 static const u32 hclge_dfx_bd_offset_list[] = {
344         HCLGE_DFX_BIOS_BD_OFFSET,
345         HCLGE_DFX_SSU_0_BD_OFFSET,
346         HCLGE_DFX_SSU_1_BD_OFFSET,
347         HCLGE_DFX_IGU_BD_OFFSET,
348         HCLGE_DFX_RPU_0_BD_OFFSET,
349         HCLGE_DFX_RPU_1_BD_OFFSET,
350         HCLGE_DFX_NCSI_BD_OFFSET,
351         HCLGE_DFX_RTC_BD_OFFSET,
352         HCLGE_DFX_PPP_BD_OFFSET,
353         HCLGE_DFX_RCB_BD_OFFSET,
354         HCLGE_DFX_TQP_BD_OFFSET,
355         HCLGE_DFX_SSU_2_BD_OFFSET
356 };
357
358 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
359         HCLGE_OPC_DFX_BIOS_COMMON_REG,
360         HCLGE_OPC_DFX_SSU_REG_0,
361         HCLGE_OPC_DFX_SSU_REG_1,
362         HCLGE_OPC_DFX_IGU_EGU_REG,
363         HCLGE_OPC_DFX_RPU_REG_0,
364         HCLGE_OPC_DFX_RPU_REG_1,
365         HCLGE_OPC_DFX_NCSI_REG,
366         HCLGE_OPC_DFX_RTC_REG,
367         HCLGE_OPC_DFX_PPP_REG,
368         HCLGE_OPC_DFX_RCB_REG,
369         HCLGE_OPC_DFX_TQP_REG,
370         HCLGE_OPC_DFX_SSU_REG_2
371 };
372
373 static const struct key_info meta_data_key_info[] = {
374         { PACKET_TYPE_ID, 6},
375         { IP_FRAGEMENT, 1},
376         { ROCE_TYPE, 1},
377         { NEXT_KEY, 5},
378         { VLAN_NUMBER, 2},
379         { SRC_VPORT, 12},
380         { DST_VPORT, 12},
381         { TUNNEL_PACKET, 1},
382 };
383
384 static const struct key_info tuple_key_info[] = {
385         { OUTER_DST_MAC, 48},
386         { OUTER_SRC_MAC, 48},
387         { OUTER_VLAN_TAG_FST, 16},
388         { OUTER_VLAN_TAG_SEC, 16},
389         { OUTER_ETH_TYPE, 16},
390         { OUTER_L2_RSV, 16},
391         { OUTER_IP_TOS, 8},
392         { OUTER_IP_PROTO, 8},
393         { OUTER_SRC_IP, 32},
394         { OUTER_DST_IP, 32},
395         { OUTER_L3_RSV, 16},
396         { OUTER_SRC_PORT, 16},
397         { OUTER_DST_PORT, 16},
398         { OUTER_L4_RSV, 32},
399         { OUTER_TUN_VNI, 24},
400         { OUTER_TUN_FLOW_ID, 8},
401         { INNER_DST_MAC, 48},
402         { INNER_SRC_MAC, 48},
403         { INNER_VLAN_TAG_FST, 16},
404         { INNER_VLAN_TAG_SEC, 16},
405         { INNER_ETH_TYPE, 16},
406         { INNER_L2_RSV, 16},
407         { INNER_IP_TOS, 8},
408         { INNER_IP_PROTO, 8},
409         { INNER_SRC_IP, 32},
410         { INNER_DST_IP, 32},
411         { INNER_L3_RSV, 16},
412         { INNER_SRC_PORT, 16},
413         { INNER_DST_PORT, 16},
414         { INNER_L4_RSV, 32},
415 };
416
417 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
418 {
419 #define HCLGE_MAC_CMD_NUM 21
420
421         u64 *data = (u64 *)(&hdev->mac_stats);
422         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
423         __le64 *desc_data;
424         int i, k, n;
425         int ret;
426
427         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
428         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
429         if (ret) {
430                 dev_err(&hdev->pdev->dev,
431                         "Get MAC pkt stats fail, status = %d.\n", ret);
432
433                 return ret;
434         }
435
436         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
437                 /* for special opcode 0032, only the first desc has the head */
438                 if (unlikely(i == 0)) {
439                         desc_data = (__le64 *)(&desc[i].data[0]);
440                         n = HCLGE_RD_FIRST_STATS_NUM;
441                 } else {
442                         desc_data = (__le64 *)(&desc[i]);
443                         n = HCLGE_RD_OTHER_STATS_NUM;
444                 }
445
446                 for (k = 0; k < n; k++) {
447                         *data += le64_to_cpu(*desc_data);
448                         data++;
449                         desc_data++;
450                 }
451         }
452
453         return 0;
454 }
455
456 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
457 {
458         u64 *data = (u64 *)(&hdev->mac_stats);
459         struct hclge_desc *desc;
460         __le64 *desc_data;
461         u16 i, k, n;
462         int ret;
463
464         /* This may be called inside atomic sections,
465          * so GFP_ATOMIC is more suitalbe here
466          */
467         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
468         if (!desc)
469                 return -ENOMEM;
470
471         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
472         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
473         if (ret) {
474                 kfree(desc);
475                 return ret;
476         }
477
478         for (i = 0; i < desc_num; i++) {
479                 /* for special opcode 0034, only the first desc has the head */
480                 if (i == 0) {
481                         desc_data = (__le64 *)(&desc[i].data[0]);
482                         n = HCLGE_RD_FIRST_STATS_NUM;
483                 } else {
484                         desc_data = (__le64 *)(&desc[i]);
485                         n = HCLGE_RD_OTHER_STATS_NUM;
486                 }
487
488                 for (k = 0; k < n; k++) {
489                         *data += le64_to_cpu(*desc_data);
490                         data++;
491                         desc_data++;
492                 }
493         }
494
495         kfree(desc);
496
497         return 0;
498 }
499
500 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
501 {
502         struct hclge_desc desc;
503         __le32 *desc_data;
504         u32 reg_num;
505         int ret;
506
507         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
508         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
509         if (ret)
510                 return ret;
511
512         desc_data = (__le32 *)(&desc.data[0]);
513         reg_num = le32_to_cpu(*desc_data);
514
515         *desc_num = 1 + ((reg_num - 3) >> 2) +
516                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
517
518         return 0;
519 }
520
521 static int hclge_mac_update_stats(struct hclge_dev *hdev)
522 {
523         u32 desc_num;
524         int ret;
525
526         ret = hclge_mac_query_reg_num(hdev, &desc_num);
527
528         /* The firmware supports the new statistics acquisition method */
529         if (!ret)
530                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
531         else if (ret == -EOPNOTSUPP)
532                 ret = hclge_mac_update_stats_defective(hdev);
533         else
534                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
535
536         return ret;
537 }
538
539 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
540 {
541         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
542         struct hclge_vport *vport = hclge_get_vport(handle);
543         struct hclge_dev *hdev = vport->back;
544         struct hnae3_queue *queue;
545         struct hclge_desc desc[1];
546         struct hclge_tqp *tqp;
547         int ret, i;
548
549         for (i = 0; i < kinfo->num_tqps; i++) {
550                 queue = handle->kinfo.tqp[i];
551                 tqp = container_of(queue, struct hclge_tqp, q);
552                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
553                 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
554                                            true);
555
556                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
557                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
558                 if (ret) {
559                         dev_err(&hdev->pdev->dev,
560                                 "Query tqp stat fail, status = %d,queue = %d\n",
561                                 ret, i);
562                         return ret;
563                 }
564                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
565                         le32_to_cpu(desc[0].data[1]);
566         }
567
568         for (i = 0; i < kinfo->num_tqps; i++) {
569                 queue = handle->kinfo.tqp[i];
570                 tqp = container_of(queue, struct hclge_tqp, q);
571                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
572                 hclge_cmd_setup_basic_desc(&desc[0],
573                                            HCLGE_OPC_QUERY_TX_STATUS,
574                                            true);
575
576                 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
577                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
578                 if (ret) {
579                         dev_err(&hdev->pdev->dev,
580                                 "Query tqp stat fail, status = %d,queue = %d\n",
581                                 ret, i);
582                         return ret;
583                 }
584                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
585                         le32_to_cpu(desc[0].data[1]);
586         }
587
588         return 0;
589 }
590
591 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
592 {
593         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
594         struct hclge_tqp *tqp;
595         u64 *buff = data;
596         int i;
597
598         for (i = 0; i < kinfo->num_tqps; i++) {
599                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
600                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
601         }
602
603         for (i = 0; i < kinfo->num_tqps; i++) {
604                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
605                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
606         }
607
608         return buff;
609 }
610
611 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
612 {
613         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
614
615         /* each tqp has TX & RX two queues */
616         return kinfo->num_tqps * (2);
617 }
618
619 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
620 {
621         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
622         u8 *buff = data;
623         int i = 0;
624
625         for (i = 0; i < kinfo->num_tqps; i++) {
626                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
627                         struct hclge_tqp, q);
628                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
629                          tqp->index);
630                 buff = buff + ETH_GSTRING_LEN;
631         }
632
633         for (i = 0; i < kinfo->num_tqps; i++) {
634                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
635                         struct hclge_tqp, q);
636                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
637                          tqp->index);
638                 buff = buff + ETH_GSTRING_LEN;
639         }
640
641         return buff;
642 }
643
644 static u64 *hclge_comm_get_stats(const void *comm_stats,
645                                  const struct hclge_comm_stats_str strs[],
646                                  int size, u64 *data)
647 {
648         u64 *buf = data;
649         u32 i;
650
651         for (i = 0; i < size; i++)
652                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
653
654         return buf + size;
655 }
656
657 static u8 *hclge_comm_get_strings(u32 stringset,
658                                   const struct hclge_comm_stats_str strs[],
659                                   int size, u8 *data)
660 {
661         char *buff = (char *)data;
662         u32 i;
663
664         if (stringset != ETH_SS_STATS)
665                 return buff;
666
667         for (i = 0; i < size; i++) {
668                 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
669                 buff = buff + ETH_GSTRING_LEN;
670         }
671
672         return (u8 *)buff;
673 }
674
675 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
676 {
677         struct hnae3_handle *handle;
678         int status;
679
680         handle = &hdev->vport[0].nic;
681         if (handle->client) {
682                 status = hclge_tqps_update_stats(handle);
683                 if (status) {
684                         dev_err(&hdev->pdev->dev,
685                                 "Update TQPS stats fail, status = %d.\n",
686                                 status);
687                 }
688         }
689
690         status = hclge_mac_update_stats(hdev);
691         if (status)
692                 dev_err(&hdev->pdev->dev,
693                         "Update MAC stats fail, status = %d.\n", status);
694 }
695
696 static void hclge_update_stats(struct hnae3_handle *handle,
697                                struct net_device_stats *net_stats)
698 {
699         struct hclge_vport *vport = hclge_get_vport(handle);
700         struct hclge_dev *hdev = vport->back;
701         int status;
702
703         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
704                 return;
705
706         status = hclge_mac_update_stats(hdev);
707         if (status)
708                 dev_err(&hdev->pdev->dev,
709                         "Update MAC stats fail, status = %d.\n",
710                         status);
711
712         status = hclge_tqps_update_stats(handle);
713         if (status)
714                 dev_err(&hdev->pdev->dev,
715                         "Update TQPS stats fail, status = %d.\n",
716                         status);
717
718         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
719 }
720
721 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
722 {
723 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
724                 HNAE3_SUPPORT_PHY_LOOPBACK |\
725                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
726                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
727
728         struct hclge_vport *vport = hclge_get_vport(handle);
729         struct hclge_dev *hdev = vport->back;
730         int count = 0;
731
732         /* Loopback test support rules:
733          * mac: only GE mode support
734          * serdes: all mac mode will support include GE/XGE/LGE/CGE
735          * phy: only support when phy device exist on board
736          */
737         if (stringset == ETH_SS_TEST) {
738                 /* clear loopback bit flags at first */
739                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
740                 if (hdev->pdev->revision >= 0x21 ||
741                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
742                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
743                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
744                         count += 1;
745                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
746                 }
747
748                 count += 2;
749                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
750                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
751
752                 if (hdev->hw.mac.phydev) {
753                         count += 1;
754                         handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
755                 }
756
757         } else if (stringset == ETH_SS_STATS) {
758                 count = ARRAY_SIZE(g_mac_stats_string) +
759                         hclge_tqps_get_sset_count(handle, stringset);
760         }
761
762         return count;
763 }
764
765 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
766                               u8 *data)
767 {
768         u8 *p = (char *)data;
769         int size;
770
771         if (stringset == ETH_SS_STATS) {
772                 size = ARRAY_SIZE(g_mac_stats_string);
773                 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
774                                            size, p);
775                 p = hclge_tqps_get_strings(handle, p);
776         } else if (stringset == ETH_SS_TEST) {
777                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
778                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
779                                ETH_GSTRING_LEN);
780                         p += ETH_GSTRING_LEN;
781                 }
782                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
783                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
784                                ETH_GSTRING_LEN);
785                         p += ETH_GSTRING_LEN;
786                 }
787                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
788                         memcpy(p,
789                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
790                                ETH_GSTRING_LEN);
791                         p += ETH_GSTRING_LEN;
792                 }
793                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
794                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
795                                ETH_GSTRING_LEN);
796                         p += ETH_GSTRING_LEN;
797                 }
798         }
799 }
800
801 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
802 {
803         struct hclge_vport *vport = hclge_get_vport(handle);
804         struct hclge_dev *hdev = vport->back;
805         u64 *p;
806
807         p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
808                                  ARRAY_SIZE(g_mac_stats_string), data);
809         p = hclge_tqps_get_stats(handle, p);
810 }
811
812 static void hclge_get_mac_stat(struct hnae3_handle *handle,
813                                struct hns3_mac_stats *mac_stats)
814 {
815         struct hclge_vport *vport = hclge_get_vport(handle);
816         struct hclge_dev *hdev = vport->back;
817
818         hclge_update_stats(handle, NULL);
819
820         mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
821         mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
822 }
823
824 static int hclge_parse_func_status(struct hclge_dev *hdev,
825                                    struct hclge_func_status_cmd *status)
826 {
827         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
828                 return -EINVAL;
829
830         /* Set the pf to main pf */
831         if (status->pf_state & HCLGE_PF_STATE_MAIN)
832                 hdev->flag |= HCLGE_FLAG_MAIN;
833         else
834                 hdev->flag &= ~HCLGE_FLAG_MAIN;
835
836         return 0;
837 }
838
839 static int hclge_query_function_status(struct hclge_dev *hdev)
840 {
841 #define HCLGE_QUERY_MAX_CNT     5
842
843         struct hclge_func_status_cmd *req;
844         struct hclge_desc desc;
845         int timeout = 0;
846         int ret;
847
848         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
849         req = (struct hclge_func_status_cmd *)desc.data;
850
851         do {
852                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
853                 if (ret) {
854                         dev_err(&hdev->pdev->dev,
855                                 "query function status failed %d.\n", ret);
856                         return ret;
857                 }
858
859                 /* Check pf reset is done */
860                 if (req->pf_state)
861                         break;
862                 usleep_range(1000, 2000);
863         } while (timeout++ < HCLGE_QUERY_MAX_CNT);
864
865         ret = hclge_parse_func_status(hdev, req);
866
867         return ret;
868 }
869
870 static int hclge_query_pf_resource(struct hclge_dev *hdev)
871 {
872         struct hclge_pf_res_cmd *req;
873         struct hclge_desc desc;
874         int ret;
875
876         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
877         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
878         if (ret) {
879                 dev_err(&hdev->pdev->dev,
880                         "query pf resource failed %d.\n", ret);
881                 return ret;
882         }
883
884         req = (struct hclge_pf_res_cmd *)desc.data;
885         hdev->num_tqps = __le16_to_cpu(req->tqp_num);
886         hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
887
888         if (req->tx_buf_size)
889                 hdev->tx_buf_size =
890                         __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
891         else
892                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
893
894         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
895
896         if (req->dv_buf_size)
897                 hdev->dv_buf_size =
898                         __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
899         else
900                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
901
902         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
903
904         if (hnae3_dev_roce_supported(hdev)) {
905                 hdev->roce_base_msix_offset =
906                 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
907                                 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
908                 hdev->num_roce_msi =
909                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
910                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
911
912                 /* nic's msix numbers is always equals to the roce's. */
913                 hdev->num_nic_msi = hdev->num_roce_msi;
914
915                 /* PF should have NIC vectors and Roce vectors,
916                  * NIC vectors are queued before Roce vectors.
917                  */
918                 hdev->num_msi = hdev->num_roce_msi +
919                                 hdev->roce_base_msix_offset;
920         } else {
921                 hdev->num_msi =
922                 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
923                                 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
924
925                 hdev->num_nic_msi = hdev->num_msi;
926         }
927
928         if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
929                 dev_err(&hdev->pdev->dev,
930                         "Just %u msi resources, not enough for pf(min:2).\n",
931                         hdev->num_nic_msi);
932                 return -EINVAL;
933         }
934
935         return 0;
936 }
937
938 static int hclge_parse_speed(int speed_cmd, int *speed)
939 {
940         switch (speed_cmd) {
941         case 6:
942                 *speed = HCLGE_MAC_SPEED_10M;
943                 break;
944         case 7:
945                 *speed = HCLGE_MAC_SPEED_100M;
946                 break;
947         case 0:
948                 *speed = HCLGE_MAC_SPEED_1G;
949                 break;
950         case 1:
951                 *speed = HCLGE_MAC_SPEED_10G;
952                 break;
953         case 2:
954                 *speed = HCLGE_MAC_SPEED_25G;
955                 break;
956         case 3:
957                 *speed = HCLGE_MAC_SPEED_40G;
958                 break;
959         case 4:
960                 *speed = HCLGE_MAC_SPEED_50G;
961                 break;
962         case 5:
963                 *speed = HCLGE_MAC_SPEED_100G;
964                 break;
965         default:
966                 return -EINVAL;
967         }
968
969         return 0;
970 }
971
972 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
973 {
974         struct hclge_vport *vport = hclge_get_vport(handle);
975         struct hclge_dev *hdev = vport->back;
976         u32 speed_ability = hdev->hw.mac.speed_ability;
977         u32 speed_bit = 0;
978
979         switch (speed) {
980         case HCLGE_MAC_SPEED_10M:
981                 speed_bit = HCLGE_SUPPORT_10M_BIT;
982                 break;
983         case HCLGE_MAC_SPEED_100M:
984                 speed_bit = HCLGE_SUPPORT_100M_BIT;
985                 break;
986         case HCLGE_MAC_SPEED_1G:
987                 speed_bit = HCLGE_SUPPORT_1G_BIT;
988                 break;
989         case HCLGE_MAC_SPEED_10G:
990                 speed_bit = HCLGE_SUPPORT_10G_BIT;
991                 break;
992         case HCLGE_MAC_SPEED_25G:
993                 speed_bit = HCLGE_SUPPORT_25G_BIT;
994                 break;
995         case HCLGE_MAC_SPEED_40G:
996                 speed_bit = HCLGE_SUPPORT_40G_BIT;
997                 break;
998         case HCLGE_MAC_SPEED_50G:
999                 speed_bit = HCLGE_SUPPORT_50G_BIT;
1000                 break;
1001         case HCLGE_MAC_SPEED_100G:
1002                 speed_bit = HCLGE_SUPPORT_100G_BIT;
1003                 break;
1004         default:
1005                 return -EINVAL;
1006         }
1007
1008         if (speed_bit & speed_ability)
1009                 return 0;
1010
1011         return -EINVAL;
1012 }
1013
1014 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1015 {
1016         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1017                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1018                                  mac->supported);
1019         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1020                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1021                                  mac->supported);
1022         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1023                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1024                                  mac->supported);
1025         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1026                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1027                                  mac->supported);
1028         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1029                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1030                                  mac->supported);
1031 }
1032
1033 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1034 {
1035         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1036                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1037                                  mac->supported);
1038         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1039                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1040                                  mac->supported);
1041         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1042                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1043                                  mac->supported);
1044         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1045                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1046                                  mac->supported);
1047         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1048                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1049                                  mac->supported);
1050 }
1051
1052 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1053 {
1054         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1055                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1056                                  mac->supported);
1057         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1058                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1059                                  mac->supported);
1060         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1061                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1062                                  mac->supported);
1063         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1064                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1065                                  mac->supported);
1066         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1067                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1068                                  mac->supported);
1069 }
1070
1071 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1072 {
1073         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1074                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1075                                  mac->supported);
1076         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1077                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1078                                  mac->supported);
1079         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1080                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1081                                  mac->supported);
1082         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1083                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1084                                  mac->supported);
1085         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1086                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1087                                  mac->supported);
1088         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1089                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1090                                  mac->supported);
1091 }
1092
1093 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1094 {
1095         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1096         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1097
1098         switch (mac->speed) {
1099         case HCLGE_MAC_SPEED_10G:
1100         case HCLGE_MAC_SPEED_40G:
1101                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1102                                  mac->supported);
1103                 mac->fec_ability =
1104                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1105                 break;
1106         case HCLGE_MAC_SPEED_25G:
1107         case HCLGE_MAC_SPEED_50G:
1108                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1109                                  mac->supported);
1110                 mac->fec_ability =
1111                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1112                         BIT(HNAE3_FEC_AUTO);
1113                 break;
1114         case HCLGE_MAC_SPEED_100G:
1115                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1116                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1117                 break;
1118         default:
1119                 mac->fec_ability = 0;
1120                 break;
1121         }
1122 }
1123
1124 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1125                                         u8 speed_ability)
1126 {
1127         struct hclge_mac *mac = &hdev->hw.mac;
1128
1129         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1130                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1131                                  mac->supported);
1132
1133         hclge_convert_setting_sr(mac, speed_ability);
1134         hclge_convert_setting_lr(mac, speed_ability);
1135         hclge_convert_setting_cr(mac, speed_ability);
1136         if (hdev->pdev->revision >= 0x21)
1137                 hclge_convert_setting_fec(mac);
1138
1139         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1140         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1141         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1142 }
1143
1144 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1145                                             u8 speed_ability)
1146 {
1147         struct hclge_mac *mac = &hdev->hw.mac;
1148
1149         hclge_convert_setting_kr(mac, speed_ability);
1150         if (hdev->pdev->revision >= 0x21)
1151                 hclge_convert_setting_fec(mac);
1152         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1153         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1154         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1155 }
1156
1157 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1158                                          u8 speed_ability)
1159 {
1160         unsigned long *supported = hdev->hw.mac.supported;
1161
1162         /* default to support all speed for GE port */
1163         if (!speed_ability)
1164                 speed_ability = HCLGE_SUPPORT_GE;
1165
1166         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1167                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1168                                  supported);
1169
1170         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1171                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1172                                  supported);
1173                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1174                                  supported);
1175         }
1176
1177         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1178                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1179                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1180         }
1181
1182         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1183         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1184         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1185         linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1186 }
1187
1188 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1189 {
1190         u8 media_type = hdev->hw.mac.media_type;
1191
1192         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1193                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1194         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1195                 hclge_parse_copper_link_mode(hdev, speed_ability);
1196         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1197                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1198 }
1199
1200 static u32 hclge_get_max_speed(u8 speed_ability)
1201 {
1202         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1203                 return HCLGE_MAC_SPEED_100G;
1204
1205         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1206                 return HCLGE_MAC_SPEED_50G;
1207
1208         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1209                 return HCLGE_MAC_SPEED_40G;
1210
1211         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1212                 return HCLGE_MAC_SPEED_25G;
1213
1214         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1215                 return HCLGE_MAC_SPEED_10G;
1216
1217         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1218                 return HCLGE_MAC_SPEED_1G;
1219
1220         if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1221                 return HCLGE_MAC_SPEED_100M;
1222
1223         if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1224                 return HCLGE_MAC_SPEED_10M;
1225
1226         return HCLGE_MAC_SPEED_1G;
1227 }
1228
1229 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1230 {
1231         struct hclge_cfg_param_cmd *req;
1232         u64 mac_addr_tmp_high;
1233         u64 mac_addr_tmp;
1234         unsigned int i;
1235
1236         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1237
1238         /* get the configuration */
1239         cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1240                                               HCLGE_CFG_VMDQ_M,
1241                                               HCLGE_CFG_VMDQ_S);
1242         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1243                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1244         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1245                                             HCLGE_CFG_TQP_DESC_N_M,
1246                                             HCLGE_CFG_TQP_DESC_N_S);
1247
1248         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1249                                         HCLGE_CFG_PHY_ADDR_M,
1250                                         HCLGE_CFG_PHY_ADDR_S);
1251         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1252                                           HCLGE_CFG_MEDIA_TP_M,
1253                                           HCLGE_CFG_MEDIA_TP_S);
1254         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1255                                           HCLGE_CFG_RX_BUF_LEN_M,
1256                                           HCLGE_CFG_RX_BUF_LEN_S);
1257         /* get mac_address */
1258         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1259         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1260                                             HCLGE_CFG_MAC_ADDR_H_M,
1261                                             HCLGE_CFG_MAC_ADDR_H_S);
1262
1263         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1264
1265         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1266                                              HCLGE_CFG_DEFAULT_SPEED_M,
1267                                              HCLGE_CFG_DEFAULT_SPEED_S);
1268         cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1269                                             HCLGE_CFG_RSS_SIZE_M,
1270                                             HCLGE_CFG_RSS_SIZE_S);
1271
1272         for (i = 0; i < ETH_ALEN; i++)
1273                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1274
1275         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1276         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1277
1278         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1279                                              HCLGE_CFG_SPEED_ABILITY_M,
1280                                              HCLGE_CFG_SPEED_ABILITY_S);
1281         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1282                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1283                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1284         if (!cfg->umv_space)
1285                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1286 }
1287
1288 /* hclge_get_cfg: query the static parameter from flash
1289  * @hdev: pointer to struct hclge_dev
1290  * @hcfg: the config structure to be getted
1291  */
1292 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1293 {
1294         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1295         struct hclge_cfg_param_cmd *req;
1296         unsigned int i;
1297         int ret;
1298
1299         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1300                 u32 offset = 0;
1301
1302                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1303                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1304                                            true);
1305                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1306                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1307                 /* Len should be united by 4 bytes when send to hardware */
1308                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1309                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1310                 req->offset = cpu_to_le32(offset);
1311         }
1312
1313         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1314         if (ret) {
1315                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1316                 return ret;
1317         }
1318
1319         hclge_parse_cfg(hcfg, desc);
1320
1321         return 0;
1322 }
1323
1324 static int hclge_get_cap(struct hclge_dev *hdev)
1325 {
1326         int ret;
1327
1328         ret = hclge_query_function_status(hdev);
1329         if (ret) {
1330                 dev_err(&hdev->pdev->dev,
1331                         "query function status error %d.\n", ret);
1332                 return ret;
1333         }
1334
1335         /* get pf resource */
1336         ret = hclge_query_pf_resource(hdev);
1337         if (ret)
1338                 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1339
1340         return ret;
1341 }
1342
1343 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1344 {
1345 #define HCLGE_MIN_TX_DESC       64
1346 #define HCLGE_MIN_RX_DESC       64
1347
1348         if (!is_kdump_kernel())
1349                 return;
1350
1351         dev_info(&hdev->pdev->dev,
1352                  "Running kdump kernel. Using minimal resources\n");
1353
1354         /* minimal queue pairs equals to the number of vports */
1355         hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1356         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1357         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1358 }
1359
1360 static int hclge_configure(struct hclge_dev *hdev)
1361 {
1362         struct hclge_cfg cfg;
1363         unsigned int i;
1364         int ret;
1365
1366         ret = hclge_get_cfg(hdev, &cfg);
1367         if (ret) {
1368                 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1369                 return ret;
1370         }
1371
1372         hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1373         hdev->base_tqp_pid = 0;
1374         hdev->rss_size_max = cfg.rss_size_max;
1375         hdev->rx_buf_len = cfg.rx_buf_len;
1376         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1377         hdev->hw.mac.media_type = cfg.media_type;
1378         hdev->hw.mac.phy_addr = cfg.phy_addr;
1379         hdev->num_tx_desc = cfg.tqp_desc_num;
1380         hdev->num_rx_desc = cfg.tqp_desc_num;
1381         hdev->tm_info.num_pg = 1;
1382         hdev->tc_max = cfg.tc_num;
1383         hdev->tm_info.hw_pfc_map = 0;
1384         hdev->wanted_umv_size = cfg.umv_space;
1385
1386         if (hnae3_dev_fd_supported(hdev)) {
1387                 hdev->fd_en = true;
1388                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1389         }
1390
1391         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1392         if (ret) {
1393                 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1394                 return ret;
1395         }
1396
1397         hclge_parse_link_mode(hdev, cfg.speed_ability);
1398
1399         hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1400
1401         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1402             (hdev->tc_max < 1)) {
1403                 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1404                          hdev->tc_max);
1405                 hdev->tc_max = 1;
1406         }
1407
1408         /* Dev does not support DCB */
1409         if (!hnae3_dev_dcb_supported(hdev)) {
1410                 hdev->tc_max = 1;
1411                 hdev->pfc_max = 0;
1412         } else {
1413                 hdev->pfc_max = hdev->tc_max;
1414         }
1415
1416         hdev->tm_info.num_tc = 1;
1417
1418         /* Currently not support uncontiuous tc */
1419         for (i = 0; i < hdev->tm_info.num_tc; i++)
1420                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1421
1422         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1423
1424         hclge_init_kdump_kernel_config(hdev);
1425
1426         /* Set the init affinity based on pci func number */
1427         i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1428         i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1429         cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1430                         &hdev->affinity_mask);
1431
1432         return ret;
1433 }
1434
1435 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1436                             unsigned int tso_mss_max)
1437 {
1438         struct hclge_cfg_tso_status_cmd *req;
1439         struct hclge_desc desc;
1440         u16 tso_mss;
1441
1442         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1443
1444         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1445
1446         tso_mss = 0;
1447         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1448                         HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1449         req->tso_mss_min = cpu_to_le16(tso_mss);
1450
1451         tso_mss = 0;
1452         hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1453                         HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1454         req->tso_mss_max = cpu_to_le16(tso_mss);
1455
1456         return hclge_cmd_send(&hdev->hw, &desc, 1);
1457 }
1458
1459 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1460 {
1461         struct hclge_cfg_gro_status_cmd *req;
1462         struct hclge_desc desc;
1463         int ret;
1464
1465         if (!hnae3_dev_gro_supported(hdev))
1466                 return 0;
1467
1468         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1469         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1470
1471         req->gro_en = cpu_to_le16(en ? 1 : 0);
1472
1473         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1474         if (ret)
1475                 dev_err(&hdev->pdev->dev,
1476                         "GRO hardware config cmd failed, ret = %d\n", ret);
1477
1478         return ret;
1479 }
1480
1481 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1482 {
1483         struct hclge_tqp *tqp;
1484         int i;
1485
1486         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1487                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1488         if (!hdev->htqp)
1489                 return -ENOMEM;
1490
1491         tqp = hdev->htqp;
1492
1493         for (i = 0; i < hdev->num_tqps; i++) {
1494                 tqp->dev = &hdev->pdev->dev;
1495                 tqp->index = i;
1496
1497                 tqp->q.ae_algo = &ae_algo;
1498                 tqp->q.buf_size = hdev->rx_buf_len;
1499                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1500                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1501                 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1502                         i * HCLGE_TQP_REG_SIZE;
1503
1504                 tqp++;
1505         }
1506
1507         return 0;
1508 }
1509
1510 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1511                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1512 {
1513         struct hclge_tqp_map_cmd *req;
1514         struct hclge_desc desc;
1515         int ret;
1516
1517         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1518
1519         req = (struct hclge_tqp_map_cmd *)desc.data;
1520         req->tqp_id = cpu_to_le16(tqp_pid);
1521         req->tqp_vf = func_id;
1522         req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1523         if (!is_pf)
1524                 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1525         req->tqp_vid = cpu_to_le16(tqp_vid);
1526
1527         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1528         if (ret)
1529                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1530
1531         return ret;
1532 }
1533
1534 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1535 {
1536         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1537         struct hclge_dev *hdev = vport->back;
1538         int i, alloced;
1539
1540         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1541              alloced < num_tqps; i++) {
1542                 if (!hdev->htqp[i].alloced) {
1543                         hdev->htqp[i].q.handle = &vport->nic;
1544                         hdev->htqp[i].q.tqp_index = alloced;
1545                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1546                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1547                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1548                         hdev->htqp[i].alloced = true;
1549                         alloced++;
1550                 }
1551         }
1552         vport->alloc_tqps = alloced;
1553         kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1554                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1555
1556         /* ensure one to one mapping between irq and queue at default */
1557         kinfo->rss_size = min_t(u16, kinfo->rss_size,
1558                                 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1559
1560         return 0;
1561 }
1562
1563 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1564                             u16 num_tx_desc, u16 num_rx_desc)
1565
1566 {
1567         struct hnae3_handle *nic = &vport->nic;
1568         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1569         struct hclge_dev *hdev = vport->back;
1570         int ret;
1571
1572         kinfo->num_tx_desc = num_tx_desc;
1573         kinfo->num_rx_desc = num_rx_desc;
1574
1575         kinfo->rx_buf_len = hdev->rx_buf_len;
1576
1577         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1578                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1579         if (!kinfo->tqp)
1580                 return -ENOMEM;
1581
1582         ret = hclge_assign_tqp(vport, num_tqps);
1583         if (ret)
1584                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1585
1586         return ret;
1587 }
1588
1589 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1590                                   struct hclge_vport *vport)
1591 {
1592         struct hnae3_handle *nic = &vport->nic;
1593         struct hnae3_knic_private_info *kinfo;
1594         u16 i;
1595
1596         kinfo = &nic->kinfo;
1597         for (i = 0; i < vport->alloc_tqps; i++) {
1598                 struct hclge_tqp *q =
1599                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1600                 bool is_pf;
1601                 int ret;
1602
1603                 is_pf = !(vport->vport_id);
1604                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1605                                              i, is_pf);
1606                 if (ret)
1607                         return ret;
1608         }
1609
1610         return 0;
1611 }
1612
1613 static int hclge_map_tqp(struct hclge_dev *hdev)
1614 {
1615         struct hclge_vport *vport = hdev->vport;
1616         u16 i, num_vport;
1617
1618         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1619         for (i = 0; i < num_vport; i++) {
1620                 int ret;
1621
1622                 ret = hclge_map_tqp_to_vport(hdev, vport);
1623                 if (ret)
1624                         return ret;
1625
1626                 vport++;
1627         }
1628
1629         return 0;
1630 }
1631
1632 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1633 {
1634         struct hnae3_handle *nic = &vport->nic;
1635         struct hclge_dev *hdev = vport->back;
1636         int ret;
1637
1638         nic->pdev = hdev->pdev;
1639         nic->ae_algo = &ae_algo;
1640         nic->numa_node_mask = hdev->numa_node_mask;
1641
1642         ret = hclge_knic_setup(vport, num_tqps,
1643                                hdev->num_tx_desc, hdev->num_rx_desc);
1644         if (ret)
1645                 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1646
1647         return ret;
1648 }
1649
1650 static int hclge_alloc_vport(struct hclge_dev *hdev)
1651 {
1652         struct pci_dev *pdev = hdev->pdev;
1653         struct hclge_vport *vport;
1654         u32 tqp_main_vport;
1655         u32 tqp_per_vport;
1656         int num_vport, i;
1657         int ret;
1658
1659         /* We need to alloc a vport for main NIC of PF */
1660         num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1661
1662         if (hdev->num_tqps < num_vport) {
1663                 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1664                         hdev->num_tqps, num_vport);
1665                 return -EINVAL;
1666         }
1667
1668         /* Alloc the same number of TQPs for every vport */
1669         tqp_per_vport = hdev->num_tqps / num_vport;
1670         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1671
1672         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1673                              GFP_KERNEL);
1674         if (!vport)
1675                 return -ENOMEM;
1676
1677         hdev->vport = vport;
1678         hdev->num_alloc_vport = num_vport;
1679
1680         if (IS_ENABLED(CONFIG_PCI_IOV))
1681                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1682
1683         for (i = 0; i < num_vport; i++) {
1684                 vport->back = hdev;
1685                 vport->vport_id = i;
1686                 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1687                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1688                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1689                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1690                 INIT_LIST_HEAD(&vport->vlan_list);
1691                 INIT_LIST_HEAD(&vport->uc_mac_list);
1692                 INIT_LIST_HEAD(&vport->mc_mac_list);
1693
1694                 if (i == 0)
1695                         ret = hclge_vport_setup(vport, tqp_main_vport);
1696                 else
1697                         ret = hclge_vport_setup(vport, tqp_per_vport);
1698                 if (ret) {
1699                         dev_err(&pdev->dev,
1700                                 "vport setup failed for vport %d, %d\n",
1701                                 i, ret);
1702                         return ret;
1703                 }
1704
1705                 vport++;
1706         }
1707
1708         return 0;
1709 }
1710
1711 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1712                                     struct hclge_pkt_buf_alloc *buf_alloc)
1713 {
1714 /* TX buffer size is unit by 128 byte */
1715 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1716 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1717         struct hclge_tx_buff_alloc_cmd *req;
1718         struct hclge_desc desc;
1719         int ret;
1720         u8 i;
1721
1722         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1723
1724         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1725         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1726                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1727
1728                 req->tx_pkt_buff[i] =
1729                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1730                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1731         }
1732
1733         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1734         if (ret)
1735                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1736                         ret);
1737
1738         return ret;
1739 }
1740
1741 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1742                                  struct hclge_pkt_buf_alloc *buf_alloc)
1743 {
1744         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1745
1746         if (ret)
1747                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1748
1749         return ret;
1750 }
1751
1752 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1753 {
1754         unsigned int i;
1755         u32 cnt = 0;
1756
1757         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1758                 if (hdev->hw_tc_map & BIT(i))
1759                         cnt++;
1760         return cnt;
1761 }
1762
1763 /* Get the number of pfc enabled TCs, which have private buffer */
1764 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1765                                   struct hclge_pkt_buf_alloc *buf_alloc)
1766 {
1767         struct hclge_priv_buf *priv;
1768         unsigned int i;
1769         int cnt = 0;
1770
1771         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1772                 priv = &buf_alloc->priv_buf[i];
1773                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1774                     priv->enable)
1775                         cnt++;
1776         }
1777
1778         return cnt;
1779 }
1780
1781 /* Get the number of pfc disabled TCs, which have private buffer */
1782 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1783                                      struct hclge_pkt_buf_alloc *buf_alloc)
1784 {
1785         struct hclge_priv_buf *priv;
1786         unsigned int i;
1787         int cnt = 0;
1788
1789         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1790                 priv = &buf_alloc->priv_buf[i];
1791                 if (hdev->hw_tc_map & BIT(i) &&
1792                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1793                     priv->enable)
1794                         cnt++;
1795         }
1796
1797         return cnt;
1798 }
1799
1800 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1801 {
1802         struct hclge_priv_buf *priv;
1803         u32 rx_priv = 0;
1804         int i;
1805
1806         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1807                 priv = &buf_alloc->priv_buf[i];
1808                 if (priv->enable)
1809                         rx_priv += priv->buf_size;
1810         }
1811         return rx_priv;
1812 }
1813
1814 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1815 {
1816         u32 i, total_tx_size = 0;
1817
1818         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1819                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1820
1821         return total_tx_size;
1822 }
1823
1824 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1825                                 struct hclge_pkt_buf_alloc *buf_alloc,
1826                                 u32 rx_all)
1827 {
1828         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1829         u32 tc_num = hclge_get_tc_num(hdev);
1830         u32 shared_buf, aligned_mps;
1831         u32 rx_priv;
1832         int i;
1833
1834         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1835
1836         if (hnae3_dev_dcb_supported(hdev))
1837                 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1838                                         hdev->dv_buf_size;
1839         else
1840                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1841                                         + hdev->dv_buf_size;
1842
1843         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1844         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1845                              HCLGE_BUF_SIZE_UNIT);
1846
1847         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1848         if (rx_all < rx_priv + shared_std)
1849                 return false;
1850
1851         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1852         buf_alloc->s_buf.buf_size = shared_buf;
1853         if (hnae3_dev_dcb_supported(hdev)) {
1854                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1855                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1856                         - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1857                                   HCLGE_BUF_SIZE_UNIT);
1858         } else {
1859                 buf_alloc->s_buf.self.high = aligned_mps +
1860                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
1861                 buf_alloc->s_buf.self.low = aligned_mps;
1862         }
1863
1864         if (hnae3_dev_dcb_supported(hdev)) {
1865                 hi_thrd = shared_buf - hdev->dv_buf_size;
1866
1867                 if (tc_num <= NEED_RESERVE_TC_NUM)
1868                         hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1869                                         / BUF_MAX_PERCENT;
1870
1871                 if (tc_num)
1872                         hi_thrd = hi_thrd / tc_num;
1873
1874                 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1875                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1876                 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1877         } else {
1878                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1879                 lo_thrd = aligned_mps;
1880         }
1881
1882         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1883                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1884                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1885         }
1886
1887         return true;
1888 }
1889
1890 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1891                                 struct hclge_pkt_buf_alloc *buf_alloc)
1892 {
1893         u32 i, total_size;
1894
1895         total_size = hdev->pkt_buf_size;
1896
1897         /* alloc tx buffer for all enabled tc */
1898         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1899                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1900
1901                 if (hdev->hw_tc_map & BIT(i)) {
1902                         if (total_size < hdev->tx_buf_size)
1903                                 return -ENOMEM;
1904
1905                         priv->tx_buf_size = hdev->tx_buf_size;
1906                 } else {
1907                         priv->tx_buf_size = 0;
1908                 }
1909
1910                 total_size -= priv->tx_buf_size;
1911         }
1912
1913         return 0;
1914 }
1915
1916 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1917                                   struct hclge_pkt_buf_alloc *buf_alloc)
1918 {
1919         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1920         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1921         unsigned int i;
1922
1923         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1924                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1925
1926                 priv->enable = 0;
1927                 priv->wl.low = 0;
1928                 priv->wl.high = 0;
1929                 priv->buf_size = 0;
1930
1931                 if (!(hdev->hw_tc_map & BIT(i)))
1932                         continue;
1933
1934                 priv->enable = 1;
1935
1936                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1937                         priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1938                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
1939                                                 HCLGE_BUF_SIZE_UNIT);
1940                 } else {
1941                         priv->wl.low = 0;
1942                         priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1943                                         aligned_mps;
1944                 }
1945
1946                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1947         }
1948
1949         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1950 }
1951
1952 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1953                                           struct hclge_pkt_buf_alloc *buf_alloc)
1954 {
1955         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1956         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1957         int i;
1958
1959         /* let the last to be cleared first */
1960         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1961                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1962                 unsigned int mask = BIT((unsigned int)i);
1963
1964                 if (hdev->hw_tc_map & mask &&
1965                     !(hdev->tm_info.hw_pfc_map & mask)) {
1966                         /* Clear the no pfc TC private buffer */
1967                         priv->wl.low = 0;
1968                         priv->wl.high = 0;
1969                         priv->buf_size = 0;
1970                         priv->enable = 0;
1971                         no_pfc_priv_num--;
1972                 }
1973
1974                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1975                     no_pfc_priv_num == 0)
1976                         break;
1977         }
1978
1979         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1980 }
1981
1982 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1983                                         struct hclge_pkt_buf_alloc *buf_alloc)
1984 {
1985         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1986         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1987         int i;
1988
1989         /* let the last to be cleared first */
1990         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1991                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1992                 unsigned int mask = BIT((unsigned int)i);
1993
1994                 if (hdev->hw_tc_map & mask &&
1995                     hdev->tm_info.hw_pfc_map & mask) {
1996                         /* Reduce the number of pfc TC with private buffer */
1997                         priv->wl.low = 0;
1998                         priv->enable = 0;
1999                         priv->wl.high = 0;
2000                         priv->buf_size = 0;
2001                         pfc_priv_num--;
2002                 }
2003
2004                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2005                     pfc_priv_num == 0)
2006                         break;
2007         }
2008
2009         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2010 }
2011
2012 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2013                                       struct hclge_pkt_buf_alloc *buf_alloc)
2014 {
2015 #define COMPENSATE_BUFFER       0x3C00
2016 #define COMPENSATE_HALF_MPS_NUM 5
2017 #define PRIV_WL_GAP             0x1800
2018
2019         u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2020         u32 tc_num = hclge_get_tc_num(hdev);
2021         u32 half_mps = hdev->mps >> 1;
2022         u32 min_rx_priv;
2023         unsigned int i;
2024
2025         if (tc_num)
2026                 rx_priv = rx_priv / tc_num;
2027
2028         if (tc_num <= NEED_RESERVE_TC_NUM)
2029                 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2030
2031         min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2032                         COMPENSATE_HALF_MPS_NUM * half_mps;
2033         min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2034         rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2035
2036         if (rx_priv < min_rx_priv)
2037                 return false;
2038
2039         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2040                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2041
2042                 priv->enable = 0;
2043                 priv->wl.low = 0;
2044                 priv->wl.high = 0;
2045                 priv->buf_size = 0;
2046
2047                 if (!(hdev->hw_tc_map & BIT(i)))
2048                         continue;
2049
2050                 priv->enable = 1;
2051                 priv->buf_size = rx_priv;
2052                 priv->wl.high = rx_priv - hdev->dv_buf_size;
2053                 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2054         }
2055
2056         buf_alloc->s_buf.buf_size = 0;
2057
2058         return true;
2059 }
2060
2061 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2062  * @hdev: pointer to struct hclge_dev
2063  * @buf_alloc: pointer to buffer calculation data
2064  * @return: 0: calculate sucessful, negative: fail
2065  */
2066 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2067                                 struct hclge_pkt_buf_alloc *buf_alloc)
2068 {
2069         /* When DCB is not supported, rx private buffer is not allocated. */
2070         if (!hnae3_dev_dcb_supported(hdev)) {
2071                 u32 rx_all = hdev->pkt_buf_size;
2072
2073                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2074                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2075                         return -ENOMEM;
2076
2077                 return 0;
2078         }
2079
2080         if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2081                 return 0;
2082
2083         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2084                 return 0;
2085
2086         /* try to decrease the buffer size */
2087         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2088                 return 0;
2089
2090         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2091                 return 0;
2092
2093         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2094                 return 0;
2095
2096         return -ENOMEM;
2097 }
2098
2099 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2100                                    struct hclge_pkt_buf_alloc *buf_alloc)
2101 {
2102         struct hclge_rx_priv_buff_cmd *req;
2103         struct hclge_desc desc;
2104         int ret;
2105         int i;
2106
2107         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2108         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2109
2110         /* Alloc private buffer TCs */
2111         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2112                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2113
2114                 req->buf_num[i] =
2115                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2116                 req->buf_num[i] |=
2117                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2118         }
2119
2120         req->shared_buf =
2121                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2122                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
2123
2124         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2125         if (ret)
2126                 dev_err(&hdev->pdev->dev,
2127                         "rx private buffer alloc cmd failed %d\n", ret);
2128
2129         return ret;
2130 }
2131
2132 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2133                                    struct hclge_pkt_buf_alloc *buf_alloc)
2134 {
2135         struct hclge_rx_priv_wl_buf *req;
2136         struct hclge_priv_buf *priv;
2137         struct hclge_desc desc[2];
2138         int i, j;
2139         int ret;
2140
2141         for (i = 0; i < 2; i++) {
2142                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2143                                            false);
2144                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2145
2146                 /* The first descriptor set the NEXT bit to 1 */
2147                 if (i == 0)
2148                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2149                 else
2150                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2151
2152                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2153                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2154
2155                         priv = &buf_alloc->priv_buf[idx];
2156                         req->tc_wl[j].high =
2157                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2158                         req->tc_wl[j].high |=
2159                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2160                         req->tc_wl[j].low =
2161                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2162                         req->tc_wl[j].low |=
2163                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2164                 }
2165         }
2166
2167         /* Send 2 descriptor at one time */
2168         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2169         if (ret)
2170                 dev_err(&hdev->pdev->dev,
2171                         "rx private waterline config cmd failed %d\n",
2172                         ret);
2173         return ret;
2174 }
2175
2176 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2177                                     struct hclge_pkt_buf_alloc *buf_alloc)
2178 {
2179         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2180         struct hclge_rx_com_thrd *req;
2181         struct hclge_desc desc[2];
2182         struct hclge_tc_thrd *tc;
2183         int i, j;
2184         int ret;
2185
2186         for (i = 0; i < 2; i++) {
2187                 hclge_cmd_setup_basic_desc(&desc[i],
2188                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2189                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2190
2191                 /* The first descriptor set the NEXT bit to 1 */
2192                 if (i == 0)
2193                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2194                 else
2195                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2196
2197                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2198                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2199
2200                         req->com_thrd[j].high =
2201                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2202                         req->com_thrd[j].high |=
2203                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2204                         req->com_thrd[j].low =
2205                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2206                         req->com_thrd[j].low |=
2207                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2208                 }
2209         }
2210
2211         /* Send 2 descriptors at one time */
2212         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2213         if (ret)
2214                 dev_err(&hdev->pdev->dev,
2215                         "common threshold config cmd failed %d\n", ret);
2216         return ret;
2217 }
2218
2219 static int hclge_common_wl_config(struct hclge_dev *hdev,
2220                                   struct hclge_pkt_buf_alloc *buf_alloc)
2221 {
2222         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2223         struct hclge_rx_com_wl *req;
2224         struct hclge_desc desc;
2225         int ret;
2226
2227         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2228
2229         req = (struct hclge_rx_com_wl *)desc.data;
2230         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2231         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2232
2233         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2234         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2235
2236         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2237         if (ret)
2238                 dev_err(&hdev->pdev->dev,
2239                         "common waterline config cmd failed %d\n", ret);
2240
2241         return ret;
2242 }
2243
2244 int hclge_buffer_alloc(struct hclge_dev *hdev)
2245 {
2246         struct hclge_pkt_buf_alloc *pkt_buf;
2247         int ret;
2248
2249         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2250         if (!pkt_buf)
2251                 return -ENOMEM;
2252
2253         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2254         if (ret) {
2255                 dev_err(&hdev->pdev->dev,
2256                         "could not calc tx buffer size for all TCs %d\n", ret);
2257                 goto out;
2258         }
2259
2260         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2261         if (ret) {
2262                 dev_err(&hdev->pdev->dev,
2263                         "could not alloc tx buffers %d\n", ret);
2264                 goto out;
2265         }
2266
2267         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2268         if (ret) {
2269                 dev_err(&hdev->pdev->dev,
2270                         "could not calc rx priv buffer size for all TCs %d\n",
2271                         ret);
2272                 goto out;
2273         }
2274
2275         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2276         if (ret) {
2277                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2278                         ret);
2279                 goto out;
2280         }
2281
2282         if (hnae3_dev_dcb_supported(hdev)) {
2283                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2284                 if (ret) {
2285                         dev_err(&hdev->pdev->dev,
2286                                 "could not configure rx private waterline %d\n",
2287                                 ret);
2288                         goto out;
2289                 }
2290
2291                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2292                 if (ret) {
2293                         dev_err(&hdev->pdev->dev,
2294                                 "could not configure common threshold %d\n",
2295                                 ret);
2296                         goto out;
2297                 }
2298         }
2299
2300         ret = hclge_common_wl_config(hdev, pkt_buf);
2301         if (ret)
2302                 dev_err(&hdev->pdev->dev,
2303                         "could not configure common waterline %d\n", ret);
2304
2305 out:
2306         kfree(pkt_buf);
2307         return ret;
2308 }
2309
2310 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2311 {
2312         struct hnae3_handle *roce = &vport->roce;
2313         struct hnae3_handle *nic = &vport->nic;
2314
2315         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2316
2317         if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2318             vport->back->num_msi_left == 0)
2319                 return -EINVAL;
2320
2321         roce->rinfo.base_vector = vport->back->roce_base_vector;
2322
2323         roce->rinfo.netdev = nic->kinfo.netdev;
2324         roce->rinfo.roce_io_base = vport->back->hw.io_base;
2325
2326         roce->pdev = nic->pdev;
2327         roce->ae_algo = nic->ae_algo;
2328         roce->numa_node_mask = nic->numa_node_mask;
2329
2330         return 0;
2331 }
2332
2333 static int hclge_init_msi(struct hclge_dev *hdev)
2334 {
2335         struct pci_dev *pdev = hdev->pdev;
2336         int vectors;
2337         int i;
2338
2339         vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2340                                         hdev->num_msi,
2341                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2342         if (vectors < 0) {
2343                 dev_err(&pdev->dev,
2344                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2345                         vectors);
2346                 return vectors;
2347         }
2348         if (vectors < hdev->num_msi)
2349                 dev_warn(&hdev->pdev->dev,
2350                          "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2351                          hdev->num_msi, vectors);
2352
2353         hdev->num_msi = vectors;
2354         hdev->num_msi_left = vectors;
2355
2356         hdev->base_msi_vector = pdev->irq;
2357         hdev->roce_base_vector = hdev->base_msi_vector +
2358                                 hdev->roce_base_msix_offset;
2359
2360         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2361                                            sizeof(u16), GFP_KERNEL);
2362         if (!hdev->vector_status) {
2363                 pci_free_irq_vectors(pdev);
2364                 return -ENOMEM;
2365         }
2366
2367         for (i = 0; i < hdev->num_msi; i++)
2368                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2369
2370         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2371                                         sizeof(int), GFP_KERNEL);
2372         if (!hdev->vector_irq) {
2373                 pci_free_irq_vectors(pdev);
2374                 return -ENOMEM;
2375         }
2376
2377         return 0;
2378 }
2379
2380 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2381 {
2382         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2383                 duplex = HCLGE_MAC_FULL;
2384
2385         return duplex;
2386 }
2387
2388 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2389                                       u8 duplex)
2390 {
2391         struct hclge_config_mac_speed_dup_cmd *req;
2392         struct hclge_desc desc;
2393         int ret;
2394
2395         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2396
2397         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2398
2399         if (duplex)
2400                 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2401
2402         switch (speed) {
2403         case HCLGE_MAC_SPEED_10M:
2404                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2405                                 HCLGE_CFG_SPEED_S, 6);
2406                 break;
2407         case HCLGE_MAC_SPEED_100M:
2408                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2409                                 HCLGE_CFG_SPEED_S, 7);
2410                 break;
2411         case HCLGE_MAC_SPEED_1G:
2412                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2413                                 HCLGE_CFG_SPEED_S, 0);
2414                 break;
2415         case HCLGE_MAC_SPEED_10G:
2416                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2417                                 HCLGE_CFG_SPEED_S, 1);
2418                 break;
2419         case HCLGE_MAC_SPEED_25G:
2420                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2421                                 HCLGE_CFG_SPEED_S, 2);
2422                 break;
2423         case HCLGE_MAC_SPEED_40G:
2424                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2425                                 HCLGE_CFG_SPEED_S, 3);
2426                 break;
2427         case HCLGE_MAC_SPEED_50G:
2428                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2429                                 HCLGE_CFG_SPEED_S, 4);
2430                 break;
2431         case HCLGE_MAC_SPEED_100G:
2432                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2433                                 HCLGE_CFG_SPEED_S, 5);
2434                 break;
2435         default:
2436                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2437                 return -EINVAL;
2438         }
2439
2440         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2441                       1);
2442
2443         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2444         if (ret) {
2445                 dev_err(&hdev->pdev->dev,
2446                         "mac speed/duplex config cmd failed %d.\n", ret);
2447                 return ret;
2448         }
2449
2450         return 0;
2451 }
2452
2453 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2454 {
2455         int ret;
2456
2457         duplex = hclge_check_speed_dup(duplex, speed);
2458         if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2459                 return 0;
2460
2461         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2462         if (ret)
2463                 return ret;
2464
2465         hdev->hw.mac.speed = speed;
2466         hdev->hw.mac.duplex = duplex;
2467
2468         return 0;
2469 }
2470
2471 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2472                                      u8 duplex)
2473 {
2474         struct hclge_vport *vport = hclge_get_vport(handle);
2475         struct hclge_dev *hdev = vport->back;
2476
2477         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2478 }
2479
2480 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2481 {
2482         struct hclge_config_auto_neg_cmd *req;
2483         struct hclge_desc desc;
2484         u32 flag = 0;
2485         int ret;
2486
2487         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2488
2489         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2490         if (enable)
2491                 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2492         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2493
2494         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2495         if (ret)
2496                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2497                         ret);
2498
2499         return ret;
2500 }
2501
2502 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2503 {
2504         struct hclge_vport *vport = hclge_get_vport(handle);
2505         struct hclge_dev *hdev = vport->back;
2506
2507         if (!hdev->hw.mac.support_autoneg) {
2508                 if (enable) {
2509                         dev_err(&hdev->pdev->dev,
2510                                 "autoneg is not supported by current port\n");
2511                         return -EOPNOTSUPP;
2512                 } else {
2513                         return 0;
2514                 }
2515         }
2516
2517         return hclge_set_autoneg_en(hdev, enable);
2518 }
2519
2520 static int hclge_get_autoneg(struct hnae3_handle *handle)
2521 {
2522         struct hclge_vport *vport = hclge_get_vport(handle);
2523         struct hclge_dev *hdev = vport->back;
2524         struct phy_device *phydev = hdev->hw.mac.phydev;
2525
2526         if (phydev)
2527                 return phydev->autoneg;
2528
2529         return hdev->hw.mac.autoneg;
2530 }
2531
2532 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2533 {
2534         struct hclge_vport *vport = hclge_get_vport(handle);
2535         struct hclge_dev *hdev = vport->back;
2536         int ret;
2537
2538         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2539
2540         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2541         if (ret)
2542                 return ret;
2543         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2544 }
2545
2546 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2547 {
2548         struct hclge_vport *vport = hclge_get_vport(handle);
2549         struct hclge_dev *hdev = vport->back;
2550
2551         if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2552                 return hclge_set_autoneg_en(hdev, !halt);
2553
2554         return 0;
2555 }
2556
2557 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2558 {
2559         struct hclge_config_fec_cmd *req;
2560         struct hclge_desc desc;
2561         int ret;
2562
2563         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2564
2565         req = (struct hclge_config_fec_cmd *)desc.data;
2566         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2567                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2568         if (fec_mode & BIT(HNAE3_FEC_RS))
2569                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2570                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2571         if (fec_mode & BIT(HNAE3_FEC_BASER))
2572                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2573                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2574
2575         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2576         if (ret)
2577                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2578
2579         return ret;
2580 }
2581
2582 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2583 {
2584         struct hclge_vport *vport = hclge_get_vport(handle);
2585         struct hclge_dev *hdev = vport->back;
2586         struct hclge_mac *mac = &hdev->hw.mac;
2587         int ret;
2588
2589         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2590                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2591                 return -EINVAL;
2592         }
2593
2594         ret = hclge_set_fec_hw(hdev, fec_mode);
2595         if (ret)
2596                 return ret;
2597
2598         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2599         return 0;
2600 }
2601
2602 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2603                           u8 *fec_mode)
2604 {
2605         struct hclge_vport *vport = hclge_get_vport(handle);
2606         struct hclge_dev *hdev = vport->back;
2607         struct hclge_mac *mac = &hdev->hw.mac;
2608
2609         if (fec_ability)
2610                 *fec_ability = mac->fec_ability;
2611         if (fec_mode)
2612                 *fec_mode = mac->fec_mode;
2613 }
2614
2615 static int hclge_mac_init(struct hclge_dev *hdev)
2616 {
2617         struct hclge_mac *mac = &hdev->hw.mac;
2618         int ret;
2619
2620         hdev->support_sfp_query = true;
2621         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2622         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2623                                          hdev->hw.mac.duplex);
2624         if (ret) {
2625                 dev_err(&hdev->pdev->dev,
2626                         "Config mac speed dup fail ret=%d\n", ret);
2627                 return ret;
2628         }
2629
2630         if (hdev->hw.mac.support_autoneg) {
2631                 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2632                 if (ret) {
2633                         dev_err(&hdev->pdev->dev,
2634                                 "Config mac autoneg fail ret=%d\n", ret);
2635                         return ret;
2636                 }
2637         }
2638
2639         mac->link = 0;
2640
2641         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2642                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2643                 if (ret) {
2644                         dev_err(&hdev->pdev->dev,
2645                                 "Fec mode init fail, ret = %d\n", ret);
2646                         return ret;
2647                 }
2648         }
2649
2650         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2651         if (ret) {
2652                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2653                 return ret;
2654         }
2655
2656         ret = hclge_set_default_loopback(hdev);
2657         if (ret)
2658                 return ret;
2659
2660         ret = hclge_buffer_alloc(hdev);
2661         if (ret)
2662                 dev_err(&hdev->pdev->dev,
2663                         "allocate buffer fail, ret=%d\n", ret);
2664
2665         return ret;
2666 }
2667
2668 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2669 {
2670         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2671             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2672                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2673                                     hclge_wq, &hdev->service_task, 0);
2674 }
2675
2676 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2677 {
2678         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2679             !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2680                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2681                                     hclge_wq, &hdev->service_task, 0);
2682 }
2683
2684 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2685 {
2686         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state))
2687                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2688                                     hclge_wq, &hdev->service_task,
2689                                     delay_time);
2690 }
2691
2692 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2693 {
2694         struct hclge_link_status_cmd *req;
2695         struct hclge_desc desc;
2696         int link_status;
2697         int ret;
2698
2699         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2700         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2701         if (ret) {
2702                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2703                         ret);
2704                 return ret;
2705         }
2706
2707         req = (struct hclge_link_status_cmd *)desc.data;
2708         link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2709
2710         return !!link_status;
2711 }
2712
2713 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2714 {
2715         unsigned int mac_state;
2716         int link_stat;
2717
2718         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2719                 return 0;
2720
2721         mac_state = hclge_get_mac_link_status(hdev);
2722
2723         if (hdev->hw.mac.phydev) {
2724                 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2725                         link_stat = mac_state &
2726                                 hdev->hw.mac.phydev->link;
2727                 else
2728                         link_stat = 0;
2729
2730         } else {
2731                 link_stat = mac_state;
2732         }
2733
2734         return !!link_stat;
2735 }
2736
2737 static void hclge_update_link_status(struct hclge_dev *hdev)
2738 {
2739         struct hnae3_client *rclient = hdev->roce_client;
2740         struct hnae3_client *client = hdev->nic_client;
2741         struct hnae3_handle *rhandle;
2742         struct hnae3_handle *handle;
2743         int state;
2744         int i;
2745
2746         if (!client)
2747                 return;
2748
2749         if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2750                 return;
2751
2752         state = hclge_get_mac_phy_link(hdev);
2753         if (state != hdev->hw.mac.link) {
2754                 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2755                         handle = &hdev->vport[i].nic;
2756                         client->ops->link_status_change(handle, state);
2757                         hclge_config_mac_tnl_int(hdev, state);
2758                         rhandle = &hdev->vport[i].roce;
2759                         if (rclient && rclient->ops->link_status_change)
2760                                 rclient->ops->link_status_change(rhandle,
2761                                                                  state);
2762                 }
2763                 hdev->hw.mac.link = state;
2764         }
2765
2766         clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2767 }
2768
2769 static void hclge_update_port_capability(struct hclge_mac *mac)
2770 {
2771         /* update fec ability by speed */
2772         hclge_convert_setting_fec(mac);
2773
2774         /* firmware can not identify back plane type, the media type
2775          * read from configuration can help deal it
2776          */
2777         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2778             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2779                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2780         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2781                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2782
2783         if (mac->support_autoneg) {
2784                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2785                 linkmode_copy(mac->advertising, mac->supported);
2786         } else {
2787                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2788                                    mac->supported);
2789                 linkmode_zero(mac->advertising);
2790         }
2791 }
2792
2793 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2794 {
2795         struct hclge_sfp_info_cmd *resp;
2796         struct hclge_desc desc;
2797         int ret;
2798
2799         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2800         resp = (struct hclge_sfp_info_cmd *)desc.data;
2801         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2802         if (ret == -EOPNOTSUPP) {
2803                 dev_warn(&hdev->pdev->dev,
2804                          "IMP do not support get SFP speed %d\n", ret);
2805                 return ret;
2806         } else if (ret) {
2807                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2808                 return ret;
2809         }
2810
2811         *speed = le32_to_cpu(resp->speed);
2812
2813         return 0;
2814 }
2815
2816 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2817 {
2818         struct hclge_sfp_info_cmd *resp;
2819         struct hclge_desc desc;
2820         int ret;
2821
2822         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2823         resp = (struct hclge_sfp_info_cmd *)desc.data;
2824
2825         resp->query_type = QUERY_ACTIVE_SPEED;
2826
2827         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2828         if (ret == -EOPNOTSUPP) {
2829                 dev_warn(&hdev->pdev->dev,
2830                          "IMP does not support get SFP info %d\n", ret);
2831                 return ret;
2832         } else if (ret) {
2833                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2834                 return ret;
2835         }
2836
2837         mac->speed = le32_to_cpu(resp->speed);
2838         /* if resp->speed_ability is 0, it means it's an old version
2839          * firmware, do not update these params
2840          */
2841         if (resp->speed_ability) {
2842                 mac->module_type = le32_to_cpu(resp->module_type);
2843                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2844                 mac->autoneg = resp->autoneg;
2845                 mac->support_autoneg = resp->autoneg_ability;
2846                 mac->speed_type = QUERY_ACTIVE_SPEED;
2847                 if (!resp->active_fec)
2848                         mac->fec_mode = 0;
2849                 else
2850                         mac->fec_mode = BIT(resp->active_fec);
2851         } else {
2852                 mac->speed_type = QUERY_SFP_SPEED;
2853         }
2854
2855         return 0;
2856 }
2857
2858 static int hclge_update_port_info(struct hclge_dev *hdev)
2859 {
2860         struct hclge_mac *mac = &hdev->hw.mac;
2861         int speed = HCLGE_MAC_SPEED_UNKNOWN;
2862         int ret;
2863
2864         /* get the port info from SFP cmd if not copper port */
2865         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2866                 return 0;
2867
2868         /* if IMP does not support get SFP/qSFP info, return directly */
2869         if (!hdev->support_sfp_query)
2870                 return 0;
2871
2872         if (hdev->pdev->revision >= 0x21)
2873                 ret = hclge_get_sfp_info(hdev, mac);
2874         else
2875                 ret = hclge_get_sfp_speed(hdev, &speed);
2876
2877         if (ret == -EOPNOTSUPP) {
2878                 hdev->support_sfp_query = false;
2879                 return ret;
2880         } else if (ret) {
2881                 return ret;
2882         }
2883
2884         if (hdev->pdev->revision >= 0x21) {
2885                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2886                         hclge_update_port_capability(mac);
2887                         return 0;
2888                 }
2889                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2890                                                HCLGE_MAC_FULL);
2891         } else {
2892                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2893                         return 0; /* do nothing if no SFP */
2894
2895                 /* must config full duplex for SFP */
2896                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2897         }
2898 }
2899
2900 static int hclge_get_status(struct hnae3_handle *handle)
2901 {
2902         struct hclge_vport *vport = hclge_get_vport(handle);
2903         struct hclge_dev *hdev = vport->back;
2904
2905         hclge_update_link_status(hdev);
2906
2907         return hdev->hw.mac.link;
2908 }
2909
2910 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
2911 {
2912         if (pci_num_vf(hdev->pdev) == 0) {
2913                 dev_err(&hdev->pdev->dev,
2914                         "SRIOV is disabled, can not get vport(%d) info.\n", vf);
2915                 return NULL;
2916         }
2917
2918         if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
2919                 dev_err(&hdev->pdev->dev,
2920                         "vf id(%d) is out of range(0 <= vfid < %d)\n",
2921                         vf, pci_num_vf(hdev->pdev));
2922                 return NULL;
2923         }
2924
2925         /* VF start from 1 in vport */
2926         vf += HCLGE_VF_VPORT_START_NUM;
2927         return &hdev->vport[vf];
2928 }
2929
2930 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
2931                                struct ifla_vf_info *ivf)
2932 {
2933         struct hclge_vport *vport = hclge_get_vport(handle);
2934         struct hclge_dev *hdev = vport->back;
2935
2936         vport = hclge_get_vf_vport(hdev, vf);
2937         if (!vport)
2938                 return -EINVAL;
2939
2940         ivf->vf = vf;
2941         ivf->linkstate = vport->vf_info.link_state;
2942         ivf->spoofchk = vport->vf_info.spoofchk;
2943         ivf->trusted = vport->vf_info.trusted;
2944         ivf->min_tx_rate = 0;
2945         ivf->max_tx_rate = vport->vf_info.max_tx_rate;
2946         ether_addr_copy(ivf->mac, vport->vf_info.mac);
2947
2948         return 0;
2949 }
2950
2951 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
2952                                    int link_state)
2953 {
2954         struct hclge_vport *vport = hclge_get_vport(handle);
2955         struct hclge_dev *hdev = vport->back;
2956
2957         vport = hclge_get_vf_vport(hdev, vf);
2958         if (!vport)
2959                 return -EINVAL;
2960
2961         vport->vf_info.link_state = link_state;
2962
2963         return 0;
2964 }
2965
2966 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2967 {
2968         u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2969
2970         /* fetch the events from their corresponding regs */
2971         rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2972         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2973         msix_src_reg = hclge_read_dev(&hdev->hw,
2974                                       HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2975
2976         /* Assumption: If by any chance reset and mailbox events are reported
2977          * together then we will only process reset event in this go and will
2978          * defer the processing of the mailbox events. Since, we would have not
2979          * cleared RX CMDQ event this time we would receive again another
2980          * interrupt from H/W just for the mailbox.
2981          *
2982          * check for vector0 reset event sources
2983          */
2984         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2985                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2986                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2987                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2988                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2989                 hdev->rst_stats.imp_rst_cnt++;
2990                 return HCLGE_VECTOR0_EVENT_RST;
2991         }
2992
2993         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2994                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2995                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2996                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2997                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2998                 hdev->rst_stats.global_rst_cnt++;
2999                 return HCLGE_VECTOR0_EVENT_RST;
3000         }
3001
3002         /* check for vector0 msix event source */
3003         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3004                 dev_info(&hdev->pdev->dev, "received event 0x%x\n",
3005                          msix_src_reg);
3006                 *clearval = msix_src_reg;
3007                 return HCLGE_VECTOR0_EVENT_ERR;
3008         }
3009
3010         /* check for vector0 mailbox(=CMDQ RX) event source */
3011         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3012                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3013                 *clearval = cmdq_src_reg;
3014                 return HCLGE_VECTOR0_EVENT_MBX;
3015         }
3016
3017         /* print other vector0 event source */
3018         dev_info(&hdev->pdev->dev,
3019                  "CMDQ INT status:0x%x, other INT status:0x%x\n",
3020                  cmdq_src_reg, msix_src_reg);
3021         *clearval = msix_src_reg;
3022
3023         return HCLGE_VECTOR0_EVENT_OTHER;
3024 }
3025
3026 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3027                                     u32 regclr)
3028 {
3029         switch (event_type) {
3030         case HCLGE_VECTOR0_EVENT_RST:
3031                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3032                 break;
3033         case HCLGE_VECTOR0_EVENT_MBX:
3034                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3035                 break;
3036         default:
3037                 break;
3038         }
3039 }
3040
3041 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3042 {
3043         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3044                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3045                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3046                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3047         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3048 }
3049
3050 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3051 {
3052         writel(enable ? 1 : 0, vector->addr);
3053 }
3054
3055 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3056 {
3057         struct hclge_dev *hdev = data;
3058         u32 clearval = 0;
3059         u32 event_cause;
3060
3061         hclge_enable_vector(&hdev->misc_vector, false);
3062         event_cause = hclge_check_event_cause(hdev, &clearval);
3063
3064         /* vector 0 interrupt is shared with reset and mailbox source events.*/
3065         switch (event_cause) {
3066         case HCLGE_VECTOR0_EVENT_ERR:
3067                 /* we do not know what type of reset is required now. This could
3068                  * only be decided after we fetch the type of errors which
3069                  * caused this event. Therefore, we will do below for now:
3070                  * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3071                  *    have defered type of reset to be used.
3072                  * 2. Schedule the reset serivce task.
3073                  * 3. When service task receives  HNAE3_UNKNOWN_RESET type it
3074                  *    will fetch the correct type of reset.  This would be done
3075                  *    by first decoding the types of errors.
3076                  */
3077                 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3078                 /* fall through */
3079         case HCLGE_VECTOR0_EVENT_RST:
3080                 hclge_reset_task_schedule(hdev);
3081                 break;
3082         case HCLGE_VECTOR0_EVENT_MBX:
3083                 /* If we are here then,
3084                  * 1. Either we are not handling any mbx task and we are not
3085                  *    scheduled as well
3086                  *                        OR
3087                  * 2. We could be handling a mbx task but nothing more is
3088                  *    scheduled.
3089                  * In both cases, we should schedule mbx task as there are more
3090                  * mbx messages reported by this interrupt.
3091                  */
3092                 hclge_mbx_task_schedule(hdev);
3093                 break;
3094         default:
3095                 dev_warn(&hdev->pdev->dev,
3096                          "received unknown or unhandled event of vector0\n");
3097                 break;
3098         }
3099
3100         hclge_clear_event_cause(hdev, event_cause, clearval);
3101
3102         /* Enable interrupt if it is not cause by reset. And when
3103          * clearval equal to 0, it means interrupt status may be
3104          * cleared by hardware before driver reads status register.
3105          * For this case, vector0 interrupt also should be enabled.
3106          */
3107         if (!clearval ||
3108             event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3109                 hclge_enable_vector(&hdev->misc_vector, true);
3110         }
3111
3112         return IRQ_HANDLED;
3113 }
3114
3115 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3116 {
3117         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3118                 dev_warn(&hdev->pdev->dev,
3119                          "vector(vector_id %d) has been freed.\n", vector_id);
3120                 return;
3121         }
3122
3123         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3124         hdev->num_msi_left += 1;
3125         hdev->num_msi_used -= 1;
3126 }
3127
3128 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3129 {
3130         struct hclge_misc_vector *vector = &hdev->misc_vector;
3131
3132         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3133
3134         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3135         hdev->vector_status[0] = 0;
3136
3137         hdev->num_msi_left -= 1;
3138         hdev->num_msi_used += 1;
3139 }
3140
3141 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3142                                       const cpumask_t *mask)
3143 {
3144         struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3145                                               affinity_notify);
3146
3147         cpumask_copy(&hdev->affinity_mask, mask);
3148 }
3149
3150 static void hclge_irq_affinity_release(struct kref *ref)
3151 {
3152 }
3153
3154 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3155 {
3156         irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3157                               &hdev->affinity_mask);
3158
3159         hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3160         hdev->affinity_notify.release = hclge_irq_affinity_release;
3161         irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3162                                   &hdev->affinity_notify);
3163 }
3164
3165 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3166 {
3167         irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3168         irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3169 }
3170
3171 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3172 {
3173         int ret;
3174
3175         hclge_get_misc_vector(hdev);
3176
3177         /* this would be explicitly freed in the end */
3178         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3179                           0, "hclge_misc", hdev);
3180         if (ret) {
3181                 hclge_free_vector(hdev, 0);
3182                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3183                         hdev->misc_vector.vector_irq);
3184         }
3185
3186         return ret;
3187 }
3188
3189 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3190 {
3191         free_irq(hdev->misc_vector.vector_irq, hdev);
3192         hclge_free_vector(hdev, 0);
3193 }
3194
3195 int hclge_notify_client(struct hclge_dev *hdev,
3196                         enum hnae3_reset_notify_type type)
3197 {
3198         struct hnae3_client *client = hdev->nic_client;
3199         u16 i;
3200
3201         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3202                 return 0;
3203
3204         if (!client->ops->reset_notify)
3205                 return -EOPNOTSUPP;
3206
3207         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3208                 struct hnae3_handle *handle = &hdev->vport[i].nic;
3209                 int ret;
3210
3211                 ret = client->ops->reset_notify(handle, type);
3212                 if (ret) {
3213                         dev_err(&hdev->pdev->dev,
3214                                 "notify nic client failed %d(%d)\n", type, ret);
3215                         return ret;
3216                 }
3217         }
3218
3219         return 0;
3220 }
3221
3222 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3223                                     enum hnae3_reset_notify_type type)
3224 {
3225         struct hnae3_client *client = hdev->roce_client;
3226         int ret = 0;
3227         u16 i;
3228
3229         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3230                 return 0;
3231
3232         if (!client->ops->reset_notify)
3233                 return -EOPNOTSUPP;
3234
3235         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3236                 struct hnae3_handle *handle = &hdev->vport[i].roce;
3237
3238                 ret = client->ops->reset_notify(handle, type);
3239                 if (ret) {
3240                         dev_err(&hdev->pdev->dev,
3241                                 "notify roce client failed %d(%d)",
3242                                 type, ret);
3243                         return ret;
3244                 }
3245         }
3246
3247         return ret;
3248 }
3249
3250 static int hclge_reset_wait(struct hclge_dev *hdev)
3251 {
3252 #define HCLGE_RESET_WATI_MS     100
3253 #define HCLGE_RESET_WAIT_CNT    200
3254         u32 val, reg, reg_bit;
3255         u32 cnt = 0;
3256
3257         switch (hdev->reset_type) {
3258         case HNAE3_IMP_RESET:
3259                 reg = HCLGE_GLOBAL_RESET_REG;
3260                 reg_bit = HCLGE_IMP_RESET_BIT;
3261                 break;
3262         case HNAE3_GLOBAL_RESET:
3263                 reg = HCLGE_GLOBAL_RESET_REG;
3264                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3265                 break;
3266         case HNAE3_FUNC_RESET:
3267                 reg = HCLGE_FUN_RST_ING;
3268                 reg_bit = HCLGE_FUN_RST_ING_B;
3269                 break;
3270         case HNAE3_FLR_RESET:
3271                 break;
3272         default:
3273                 dev_err(&hdev->pdev->dev,
3274                         "Wait for unsupported reset type: %d\n",
3275                         hdev->reset_type);
3276                 return -EINVAL;
3277         }
3278
3279         if (hdev->reset_type == HNAE3_FLR_RESET) {
3280                 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
3281                        cnt++ < HCLGE_RESET_WAIT_CNT)
3282                         msleep(HCLGE_RESET_WATI_MS);
3283
3284                 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
3285                         dev_err(&hdev->pdev->dev,
3286                                 "flr wait timeout: %u\n", cnt);
3287                         return -EBUSY;
3288                 }
3289
3290                 return 0;
3291         }
3292
3293         val = hclge_read_dev(&hdev->hw, reg);
3294         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3295                 msleep(HCLGE_RESET_WATI_MS);
3296                 val = hclge_read_dev(&hdev->hw, reg);
3297                 cnt++;
3298         }
3299
3300         if (cnt >= HCLGE_RESET_WAIT_CNT) {
3301                 dev_warn(&hdev->pdev->dev,
3302                          "Wait for reset timeout: %d\n", hdev->reset_type);
3303                 return -EBUSY;
3304         }
3305
3306         return 0;
3307 }
3308
3309 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3310 {
3311         struct hclge_vf_rst_cmd *req;
3312         struct hclge_desc desc;
3313
3314         req = (struct hclge_vf_rst_cmd *)desc.data;
3315         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3316         req->dest_vfid = func_id;
3317
3318         if (reset)
3319                 req->vf_rst = 0x1;
3320
3321         return hclge_cmd_send(&hdev->hw, &desc, 1);
3322 }
3323
3324 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3325 {
3326         int i;
3327
3328         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3329                 struct hclge_vport *vport = &hdev->vport[i];
3330                 int ret;
3331
3332                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3333                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3334                 if (ret) {
3335                         dev_err(&hdev->pdev->dev,
3336                                 "set vf(%u) rst failed %d!\n",
3337                                 vport->vport_id, ret);
3338                         return ret;
3339                 }
3340
3341                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3342                         continue;
3343
3344                 /* Inform VF to process the reset.
3345                  * hclge_inform_reset_assert_to_vf may fail if VF
3346                  * driver is not loaded.
3347                  */
3348                 ret = hclge_inform_reset_assert_to_vf(vport);
3349                 if (ret)
3350                         dev_warn(&hdev->pdev->dev,
3351                                  "inform reset to vf(%u) failed %d!\n",
3352                                  vport->vport_id, ret);
3353         }
3354
3355         return 0;
3356 }
3357
3358 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3359 {
3360         if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3361             test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3362             test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3363                 return;
3364
3365         hclge_mbx_handler(hdev);
3366
3367         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3368 }
3369
3370 static int hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3371 {
3372         struct hclge_pf_rst_sync_cmd *req;
3373         struct hclge_desc desc;
3374         int cnt = 0;
3375         int ret;
3376
3377         req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3378         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3379
3380         do {
3381                 /* vf need to down netdev by mbx during PF or FLR reset */
3382                 hclge_mailbox_service_task(hdev);
3383
3384                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3385                 /* for compatible with old firmware, wait
3386                  * 100 ms for VF to stop IO
3387                  */
3388                 if (ret == -EOPNOTSUPP) {
3389                         msleep(HCLGE_RESET_SYNC_TIME);
3390                         return 0;
3391                 } else if (ret) {
3392                         dev_err(&hdev->pdev->dev, "sync with VF fail %d!\n",
3393                                 ret);
3394                         return ret;
3395                 } else if (req->all_vf_ready) {
3396                         return 0;
3397                 }
3398                 msleep(HCLGE_PF_RESET_SYNC_TIME);
3399                 hclge_cmd_reuse_desc(&desc, true);
3400         } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3401
3402         dev_err(&hdev->pdev->dev, "sync with VF timeout!\n");
3403         return -ETIME;
3404 }
3405
3406 void hclge_report_hw_error(struct hclge_dev *hdev,
3407                            enum hnae3_hw_error_type type)
3408 {
3409         struct hnae3_client *client = hdev->nic_client;
3410         u16 i;
3411
3412         if (!client || !client->ops->process_hw_error ||
3413             !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3414                 return;
3415
3416         for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3417                 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3418 }
3419
3420 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3421 {
3422         u32 reg_val;
3423
3424         reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3425         if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3426                 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3427                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3428                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3429         }
3430
3431         if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3432                 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3433                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3434                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3435         }
3436 }
3437
3438 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3439 {
3440         struct hclge_desc desc;
3441         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3442         int ret;
3443
3444         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3445         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3446         req->fun_reset_vfid = func_id;
3447
3448         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3449         if (ret)
3450                 dev_err(&hdev->pdev->dev,
3451                         "send function reset cmd fail, status =%d\n", ret);
3452
3453         return ret;
3454 }
3455
3456 static void hclge_do_reset(struct hclge_dev *hdev)
3457 {
3458         struct hnae3_handle *handle = &hdev->vport[0].nic;
3459         struct pci_dev *pdev = hdev->pdev;
3460         u32 val;
3461
3462         if (hclge_get_hw_reset_stat(handle)) {
3463                 dev_info(&pdev->dev, "Hardware reset not finish\n");
3464                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3465                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3466                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3467                 return;
3468         }
3469
3470         switch (hdev->reset_type) {
3471         case HNAE3_GLOBAL_RESET:
3472                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3473                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3474                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3475                 dev_info(&pdev->dev, "Global Reset requested\n");
3476                 break;
3477         case HNAE3_FUNC_RESET:
3478                 dev_info(&pdev->dev, "PF Reset requested\n");
3479                 /* schedule again to check later */
3480                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3481                 hclge_reset_task_schedule(hdev);
3482                 break;
3483         case HNAE3_FLR_RESET:
3484                 dev_info(&pdev->dev, "FLR requested\n");
3485                 /* schedule again to check later */
3486                 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3487                 hclge_reset_task_schedule(hdev);
3488                 break;
3489         default:
3490                 dev_warn(&pdev->dev,
3491                          "Unsupported reset type: %d\n", hdev->reset_type);
3492                 break;
3493         }
3494 }
3495
3496 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3497                                                    unsigned long *addr)
3498 {
3499         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3500         struct hclge_dev *hdev = ae_dev->priv;
3501
3502         /* first, resolve any unknown reset type to the known type(s) */
3503         if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3504                 /* we will intentionally ignore any errors from this function
3505                  *  as we will end up in *some* reset request in any case
3506                  */
3507                 hclge_handle_hw_msix_error(hdev, addr);
3508                 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3509                 /* We defered the clearing of the error event which caused
3510                  * interrupt since it was not posssible to do that in
3511                  * interrupt context (and this is the reason we introduced
3512                  * new UNKNOWN reset type). Now, the errors have been
3513                  * handled and cleared in hardware we can safely enable
3514                  * interrupts. This is an exception to the norm.
3515                  */
3516                 hclge_enable_vector(&hdev->misc_vector, true);
3517         }
3518
3519         /* return the highest priority reset level amongst all */
3520         if (test_bit(HNAE3_IMP_RESET, addr)) {
3521                 rst_level = HNAE3_IMP_RESET;
3522                 clear_bit(HNAE3_IMP_RESET, addr);
3523                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3524                 clear_bit(HNAE3_FUNC_RESET, addr);
3525         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3526                 rst_level = HNAE3_GLOBAL_RESET;
3527                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3528                 clear_bit(HNAE3_FUNC_RESET, addr);
3529         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3530                 rst_level = HNAE3_FUNC_RESET;
3531                 clear_bit(HNAE3_FUNC_RESET, addr);
3532         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3533                 rst_level = HNAE3_FLR_RESET;
3534                 clear_bit(HNAE3_FLR_RESET, addr);
3535         }
3536
3537         if (hdev->reset_type != HNAE3_NONE_RESET &&
3538             rst_level < hdev->reset_type)
3539                 return HNAE3_NONE_RESET;
3540
3541         return rst_level;
3542 }
3543
3544 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3545 {
3546         u32 clearval = 0;
3547
3548         switch (hdev->reset_type) {
3549         case HNAE3_IMP_RESET:
3550                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3551                 break;
3552         case HNAE3_GLOBAL_RESET:
3553                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3554                 break;
3555         default:
3556                 break;
3557         }
3558
3559         if (!clearval)
3560                 return;
3561
3562         /* For revision 0x20, the reset interrupt source
3563          * can only be cleared after hardware reset done
3564          */
3565         if (hdev->pdev->revision == 0x20)
3566                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3567                                 clearval);
3568
3569         hclge_enable_vector(&hdev->misc_vector, true);
3570 }
3571
3572 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3573 {
3574         int ret = 0;
3575
3576         switch (hdev->reset_type) {
3577         case HNAE3_FUNC_RESET:
3578                 /* fall through */
3579         case HNAE3_FLR_RESET:
3580                 ret = hclge_set_all_vf_rst(hdev, true);
3581                 break;
3582         default:
3583                 break;
3584         }
3585
3586         return ret;
3587 }
3588
3589 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3590 {
3591         u32 reg_val;
3592
3593         reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3594         if (enable)
3595                 reg_val |= HCLGE_NIC_SW_RST_RDY;
3596         else
3597                 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3598
3599         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3600 }
3601
3602 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3603 {
3604         u32 reg_val;
3605         int ret = 0;
3606
3607         switch (hdev->reset_type) {
3608         case HNAE3_FUNC_RESET:
3609                 /* to confirm whether all running VF is ready
3610                  * before request PF reset
3611                  */
3612                 ret = hclge_func_reset_sync_vf(hdev);
3613                 if (ret)
3614                         return ret;
3615
3616                 ret = hclge_func_reset_cmd(hdev, 0);
3617                 if (ret) {
3618                         dev_err(&hdev->pdev->dev,
3619                                 "asserting function reset fail %d!\n", ret);
3620                         return ret;
3621                 }
3622
3623                 /* After performaning pf reset, it is not necessary to do the
3624                  * mailbox handling or send any command to firmware, because
3625                  * any mailbox handling or command to firmware is only valid
3626                  * after hclge_cmd_init is called.
3627                  */
3628                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3629                 hdev->rst_stats.pf_rst_cnt++;
3630                 break;
3631         case HNAE3_FLR_RESET:
3632                 /* to confirm whether all running VF is ready
3633                  * before request PF reset
3634                  */
3635                 ret = hclge_func_reset_sync_vf(hdev);
3636                 if (ret)
3637                         return ret;
3638
3639                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3640                 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3641                 hdev->rst_stats.flr_rst_cnt++;
3642                 break;
3643         case HNAE3_IMP_RESET:
3644                 hclge_handle_imp_error(hdev);
3645                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3646                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3647                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3648                 break;
3649         default:
3650                 break;
3651         }
3652
3653         /* inform hardware that preparatory work is done */
3654         msleep(HCLGE_RESET_SYNC_TIME);
3655         hclge_reset_handshake(hdev, true);
3656         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3657
3658         return ret;
3659 }
3660
3661 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3662 {
3663 #define MAX_RESET_FAIL_CNT 5
3664
3665         if (hdev->reset_pending) {
3666                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3667                          hdev->reset_pending);
3668                 return true;
3669         } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3670                    HCLGE_RESET_INT_M) {
3671                 dev_info(&hdev->pdev->dev,
3672                          "reset failed because new reset interrupt\n");
3673                 hclge_clear_reset_cause(hdev);
3674                 return false;
3675         } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3676                 hdev->rst_stats.reset_fail_cnt++;
3677                 set_bit(hdev->reset_type, &hdev->reset_pending);
3678                 dev_info(&hdev->pdev->dev,
3679                          "re-schedule reset task(%u)\n",
3680                          hdev->rst_stats.reset_fail_cnt);
3681                 return true;
3682         }
3683
3684         hclge_clear_reset_cause(hdev);
3685
3686         /* recover the handshake status when reset fail */
3687         hclge_reset_handshake(hdev, true);
3688
3689         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3690
3691         hclge_dbg_dump_rst_info(hdev);
3692
3693         return false;
3694 }
3695
3696 static int hclge_set_rst_done(struct hclge_dev *hdev)
3697 {
3698         struct hclge_pf_rst_done_cmd *req;
3699         struct hclge_desc desc;
3700         int ret;
3701
3702         req = (struct hclge_pf_rst_done_cmd *)desc.data;
3703         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3704         req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3705
3706         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3707         /* To be compatible with the old firmware, which does not support
3708          * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3709          * return success
3710          */
3711         if (ret == -EOPNOTSUPP) {
3712                 dev_warn(&hdev->pdev->dev,
3713                          "current firmware does not support command(0x%x)!\n",
3714                          HCLGE_OPC_PF_RST_DONE);
3715                 return 0;
3716         } else if (ret) {
3717                 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3718                         ret);
3719         }
3720
3721         return ret;
3722 }
3723
3724 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3725 {
3726         int ret = 0;
3727
3728         switch (hdev->reset_type) {
3729         case HNAE3_FUNC_RESET:
3730                 /* fall through */
3731         case HNAE3_FLR_RESET:
3732                 ret = hclge_set_all_vf_rst(hdev, false);
3733                 break;
3734         case HNAE3_GLOBAL_RESET:
3735                 /* fall through */
3736         case HNAE3_IMP_RESET:
3737                 ret = hclge_set_rst_done(hdev);
3738                 break;
3739         default:
3740                 break;
3741         }
3742
3743         /* clear up the handshake status after re-initialize done */
3744         hclge_reset_handshake(hdev, false);
3745
3746         return ret;
3747 }
3748
3749 static int hclge_reset_stack(struct hclge_dev *hdev)
3750 {
3751         int ret;
3752
3753         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3754         if (ret)
3755                 return ret;
3756
3757         ret = hclge_reset_ae_dev(hdev->ae_dev);
3758         if (ret)
3759                 return ret;
3760
3761         ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3762         if (ret)
3763                 return ret;
3764
3765         return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3766 }
3767
3768 static void hclge_reset(struct hclge_dev *hdev)
3769 {
3770         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3771         enum hnae3_reset_type reset_level;
3772         int ret;
3773
3774         /* Initialize ae_dev reset status as well, in case enet layer wants to
3775          * know if device is undergoing reset
3776          */
3777         ae_dev->reset_type = hdev->reset_type;
3778         hdev->rst_stats.reset_cnt++;
3779         /* perform reset of the stack & ae device for a client */
3780         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3781         if (ret)
3782                 goto err_reset;
3783
3784         ret = hclge_reset_prepare_down(hdev);
3785         if (ret)
3786                 goto err_reset;
3787
3788         rtnl_lock();
3789         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3790         if (ret)
3791                 goto err_reset_lock;
3792
3793         rtnl_unlock();
3794
3795         ret = hclge_reset_prepare_wait(hdev);
3796         if (ret)
3797                 goto err_reset;
3798
3799         if (hclge_reset_wait(hdev))
3800                 goto err_reset;
3801
3802         hdev->rst_stats.hw_reset_done_cnt++;
3803
3804         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3805         if (ret)
3806                 goto err_reset;
3807
3808         rtnl_lock();
3809
3810         ret = hclge_reset_stack(hdev);
3811         if (ret)
3812                 goto err_reset_lock;
3813
3814         hclge_clear_reset_cause(hdev);
3815
3816         ret = hclge_reset_prepare_up(hdev);
3817         if (ret)
3818                 goto err_reset_lock;
3819
3820         rtnl_unlock();
3821
3822         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3823         /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3824          * times
3825          */
3826         if (ret &&
3827             hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3828                 goto err_reset;
3829
3830         rtnl_lock();
3831
3832         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3833         if (ret)
3834                 goto err_reset_lock;
3835
3836         rtnl_unlock();
3837
3838         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3839         if (ret)
3840                 goto err_reset;
3841
3842         hdev->last_reset_time = jiffies;
3843         hdev->rst_stats.reset_fail_cnt = 0;
3844         hdev->rst_stats.reset_done_cnt++;
3845         ae_dev->reset_type = HNAE3_NONE_RESET;
3846
3847         /* if default_reset_request has a higher level reset request,
3848          * it should be handled as soon as possible. since some errors
3849          * need this kind of reset to fix.
3850          */
3851         reset_level = hclge_get_reset_level(ae_dev,
3852                                             &hdev->default_reset_request);
3853         if (reset_level != HNAE3_NONE_RESET)
3854                 set_bit(reset_level, &hdev->reset_request);
3855
3856         return;
3857
3858 err_reset_lock:
3859         rtnl_unlock();
3860 err_reset:
3861         if (hclge_reset_err_handle(hdev))
3862                 hclge_reset_task_schedule(hdev);
3863 }
3864
3865 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3866 {
3867         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3868         struct hclge_dev *hdev = ae_dev->priv;
3869
3870         /* We might end up getting called broadly because of 2 below cases:
3871          * 1. Recoverable error was conveyed through APEI and only way to bring
3872          *    normalcy is to reset.
3873          * 2. A new reset request from the stack due to timeout
3874          *
3875          * For the first case,error event might not have ae handle available.
3876          * check if this is a new reset request and we are not here just because
3877          * last reset attempt did not succeed and watchdog hit us again. We will
3878          * know this if last reset request did not occur very recently (watchdog
3879          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3880          * In case of new request we reset the "reset level" to PF reset.
3881          * And if it is a repeat reset request of the most recent one then we
3882          * want to make sure we throttle the reset request. Therefore, we will
3883          * not allow it again before 3*HZ times.
3884          */
3885         if (!handle)
3886                 handle = &hdev->vport[0].nic;
3887
3888         if (time_before(jiffies, (hdev->last_reset_time +
3889                                   HCLGE_RESET_INTERVAL))) {
3890                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3891                 return;
3892         } else if (hdev->default_reset_request) {
3893                 hdev->reset_level =
3894                         hclge_get_reset_level(ae_dev,
3895                                               &hdev->default_reset_request);
3896         } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3897                 hdev->reset_level = HNAE3_FUNC_RESET;
3898         }
3899
3900         dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3901                  hdev->reset_level);
3902
3903         /* request reset & schedule reset task */
3904         set_bit(hdev->reset_level, &hdev->reset_request);
3905         hclge_reset_task_schedule(hdev);
3906
3907         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3908                 hdev->reset_level++;
3909 }
3910
3911 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3912                                         enum hnae3_reset_type rst_type)
3913 {
3914         struct hclge_dev *hdev = ae_dev->priv;
3915
3916         set_bit(rst_type, &hdev->default_reset_request);
3917 }
3918
3919 static void hclge_reset_timer(struct timer_list *t)
3920 {
3921         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3922
3923         /* if default_reset_request has no value, it means that this reset
3924          * request has already be handled, so just return here
3925          */
3926         if (!hdev->default_reset_request)
3927                 return;
3928
3929         dev_info(&hdev->pdev->dev,
3930                  "triggering reset in reset timer\n");
3931         hclge_reset_event(hdev->pdev, NULL);
3932 }
3933
3934 static void hclge_reset_subtask(struct hclge_dev *hdev)
3935 {
3936         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3937
3938         /* check if there is any ongoing reset in the hardware. This status can
3939          * be checked from reset_pending. If there is then, we need to wait for
3940          * hardware to complete reset.
3941          *    a. If we are able to figure out in reasonable time that hardware
3942          *       has fully resetted then, we can proceed with driver, client
3943          *       reset.
3944          *    b. else, we can come back later to check this status so re-sched
3945          *       now.
3946          */
3947         hdev->last_reset_time = jiffies;
3948         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3949         if (hdev->reset_type != HNAE3_NONE_RESET)
3950                 hclge_reset(hdev);
3951
3952         /* check if we got any *new* reset requests to be honored */
3953         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3954         if (hdev->reset_type != HNAE3_NONE_RESET)
3955                 hclge_do_reset(hdev);
3956
3957         hdev->reset_type = HNAE3_NONE_RESET;
3958 }
3959
3960 static void hclge_reset_service_task(struct hclge_dev *hdev)
3961 {
3962         if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
3963                 return;
3964
3965         if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3966                 return;
3967
3968         hclge_reset_subtask(hdev);
3969
3970         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3971 }
3972
3973 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3974 {
3975         int i;
3976
3977         /* start from vport 1 for PF is always alive */
3978         for (i = 1; i < hdev->num_alloc_vport; i++) {
3979                 struct hclge_vport *vport = &hdev->vport[i];
3980
3981                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3982                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3983
3984                 /* If vf is not alive, set to default value */
3985                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3986                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3987         }
3988 }
3989
3990 static void hclge_periodic_service_task(struct hclge_dev *hdev)
3991 {
3992         unsigned long delta = round_jiffies_relative(HZ);
3993
3994         /* Always handle the link updating to make sure link state is
3995          * updated when it is triggered by mbx.
3996          */
3997         hclge_update_link_status(hdev);
3998
3999         if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4000                 delta = jiffies - hdev->last_serv_processed;
4001
4002                 if (delta < round_jiffies_relative(HZ)) {
4003                         delta = round_jiffies_relative(HZ) - delta;
4004                         goto out;
4005                 }
4006         }
4007
4008         hdev->serv_processed_cnt++;
4009         hclge_update_vport_alive(hdev);
4010
4011         if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4012                 hdev->last_serv_processed = jiffies;
4013                 goto out;
4014         }
4015
4016         if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4017                 hclge_update_stats_for_all(hdev);
4018
4019         hclge_update_port_info(hdev);
4020         hclge_sync_vlan_filter(hdev);
4021
4022         if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4023                 hclge_rfs_filter_expire(hdev);
4024
4025         hdev->last_serv_processed = jiffies;
4026
4027 out:
4028         hclge_task_schedule(hdev, delta);
4029 }
4030
4031 static void hclge_service_task(struct work_struct *work)
4032 {
4033         struct hclge_dev *hdev =
4034                 container_of(work, struct hclge_dev, service_task.work);
4035
4036         hclge_reset_service_task(hdev);
4037         hclge_mailbox_service_task(hdev);
4038         hclge_periodic_service_task(hdev);
4039
4040         /* Handle reset and mbx again in case periodical task delays the
4041          * handling by calling hclge_task_schedule() in
4042          * hclge_periodic_service_task().
4043          */
4044         hclge_reset_service_task(hdev);
4045         hclge_mailbox_service_task(hdev);
4046 }
4047
4048 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4049 {
4050         /* VF handle has no client */
4051         if (!handle->client)
4052                 return container_of(handle, struct hclge_vport, nic);
4053         else if (handle->client->type == HNAE3_CLIENT_ROCE)
4054                 return container_of(handle, struct hclge_vport, roce);
4055         else
4056                 return container_of(handle, struct hclge_vport, nic);
4057 }
4058
4059 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4060                             struct hnae3_vector_info *vector_info)
4061 {
4062         struct hclge_vport *vport = hclge_get_vport(handle);
4063         struct hnae3_vector_info *vector = vector_info;
4064         struct hclge_dev *hdev = vport->back;
4065         int alloc = 0;
4066         int i, j;
4067
4068         vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4069         vector_num = min(hdev->num_msi_left, vector_num);
4070
4071         for (j = 0; j < vector_num; j++) {
4072                 for (i = 1; i < hdev->num_msi; i++) {
4073                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4074                                 vector->vector = pci_irq_vector(hdev->pdev, i);
4075                                 vector->io_addr = hdev->hw.io_base +
4076                                         HCLGE_VECTOR_REG_BASE +
4077                                         (i - 1) * HCLGE_VECTOR_REG_OFFSET +
4078                                         vport->vport_id *
4079                                         HCLGE_VECTOR_VF_OFFSET;
4080                                 hdev->vector_status[i] = vport->vport_id;
4081                                 hdev->vector_irq[i] = vector->vector;
4082
4083                                 vector++;
4084                                 alloc++;
4085
4086                                 break;
4087                         }
4088                 }
4089         }
4090         hdev->num_msi_left -= alloc;
4091         hdev->num_msi_used += alloc;
4092
4093         return alloc;
4094 }
4095
4096 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4097 {
4098         int i;
4099
4100         for (i = 0; i < hdev->num_msi; i++)
4101                 if (vector == hdev->vector_irq[i])
4102                         return i;
4103
4104         return -EINVAL;
4105 }
4106
4107 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4108 {
4109         struct hclge_vport *vport = hclge_get_vport(handle);
4110         struct hclge_dev *hdev = vport->back;
4111         int vector_id;
4112
4113         vector_id = hclge_get_vector_index(hdev, vector);
4114         if (vector_id < 0) {
4115                 dev_err(&hdev->pdev->dev,
4116                         "Get vector index fail. vector_id =%d\n", vector_id);
4117                 return vector_id;
4118         }
4119
4120         hclge_free_vector(hdev, vector_id);
4121
4122         return 0;
4123 }
4124
4125 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4126 {
4127         return HCLGE_RSS_KEY_SIZE;
4128 }
4129
4130 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4131 {
4132         return HCLGE_RSS_IND_TBL_SIZE;
4133 }
4134
4135 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4136                                   const u8 hfunc, const u8 *key)
4137 {
4138         struct hclge_rss_config_cmd *req;
4139         unsigned int key_offset = 0;
4140         struct hclge_desc desc;
4141         int key_counts;
4142         int key_size;
4143         int ret;
4144
4145         key_counts = HCLGE_RSS_KEY_SIZE;
4146         req = (struct hclge_rss_config_cmd *)desc.data;
4147
4148         while (key_counts) {
4149                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4150                                            false);
4151
4152                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4153                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4154
4155                 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4156                 memcpy(req->hash_key,
4157                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4158
4159                 key_counts -= key_size;
4160                 key_offset++;
4161                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4162                 if (ret) {
4163                         dev_err(&hdev->pdev->dev,
4164                                 "Configure RSS config fail, status = %d\n",
4165                                 ret);
4166                         return ret;
4167                 }
4168         }
4169         return 0;
4170 }
4171
4172 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4173 {
4174         struct hclge_rss_indirection_table_cmd *req;
4175         struct hclge_desc desc;
4176         int i, j;
4177         int ret;
4178
4179         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4180
4181         for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4182                 hclge_cmd_setup_basic_desc
4183                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4184
4185                 req->start_table_index =
4186                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4187                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4188
4189                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4190                         req->rss_result[j] =
4191                                 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4192
4193                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4194                 if (ret) {
4195                         dev_err(&hdev->pdev->dev,
4196                                 "Configure rss indir table fail,status = %d\n",
4197                                 ret);
4198                         return ret;
4199                 }
4200         }
4201         return 0;
4202 }
4203
4204 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4205                                  u16 *tc_size, u16 *tc_offset)
4206 {
4207         struct hclge_rss_tc_mode_cmd *req;
4208         struct hclge_desc desc;
4209         int ret;
4210         int i;
4211
4212         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4213         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4214
4215         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4216                 u16 mode = 0;
4217
4218                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4219                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4220                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4221                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4222                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4223
4224                 req->rss_tc_mode[i] = cpu_to_le16(mode);
4225         }
4226
4227         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4228         if (ret)
4229                 dev_err(&hdev->pdev->dev,
4230                         "Configure rss tc mode fail, status = %d\n", ret);
4231
4232         return ret;
4233 }
4234
4235 static void hclge_get_rss_type(struct hclge_vport *vport)
4236 {
4237         if (vport->rss_tuple_sets.ipv4_tcp_en ||
4238             vport->rss_tuple_sets.ipv4_udp_en ||
4239             vport->rss_tuple_sets.ipv4_sctp_en ||
4240             vport->rss_tuple_sets.ipv6_tcp_en ||
4241             vport->rss_tuple_sets.ipv6_udp_en ||
4242             vport->rss_tuple_sets.ipv6_sctp_en)
4243                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4244         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4245                  vport->rss_tuple_sets.ipv6_fragment_en)
4246                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4247         else
4248                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4249 }
4250
4251 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4252 {
4253         struct hclge_rss_input_tuple_cmd *req;
4254         struct hclge_desc desc;
4255         int ret;
4256
4257         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4258
4259         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4260
4261         /* Get the tuple cfg from pf */
4262         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4263         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4264         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4265         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4266         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4267         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4268         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4269         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4270         hclge_get_rss_type(&hdev->vport[0]);
4271         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4272         if (ret)
4273                 dev_err(&hdev->pdev->dev,
4274                         "Configure rss input fail, status = %d\n", ret);
4275         return ret;
4276 }
4277
4278 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4279                          u8 *key, u8 *hfunc)
4280 {
4281         struct hclge_vport *vport = hclge_get_vport(handle);
4282         int i;
4283
4284         /* Get hash algorithm */
4285         if (hfunc) {
4286                 switch (vport->rss_algo) {
4287                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4288                         *hfunc = ETH_RSS_HASH_TOP;
4289                         break;
4290                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4291                         *hfunc = ETH_RSS_HASH_XOR;
4292                         break;
4293                 default:
4294                         *hfunc = ETH_RSS_HASH_UNKNOWN;
4295                         break;
4296                 }
4297         }
4298
4299         /* Get the RSS Key required by the user */
4300         if (key)
4301                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4302
4303         /* Get indirect table */
4304         if (indir)
4305                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4306                         indir[i] =  vport->rss_indirection_tbl[i];
4307
4308         return 0;
4309 }
4310
4311 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4312                          const  u8 *key, const  u8 hfunc)
4313 {
4314         struct hclge_vport *vport = hclge_get_vport(handle);
4315         struct hclge_dev *hdev = vport->back;
4316         u8 hash_algo;
4317         int ret, i;
4318
4319         /* Set the RSS Hash Key if specififed by the user */
4320         if (key) {
4321                 switch (hfunc) {
4322                 case ETH_RSS_HASH_TOP:
4323                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4324                         break;
4325                 case ETH_RSS_HASH_XOR:
4326                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4327                         break;
4328                 case ETH_RSS_HASH_NO_CHANGE:
4329                         hash_algo = vport->rss_algo;
4330                         break;
4331                 default:
4332                         return -EINVAL;
4333                 }
4334
4335                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4336                 if (ret)
4337                         return ret;
4338
4339                 /* Update the shadow RSS key with user specified qids */
4340                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4341                 vport->rss_algo = hash_algo;
4342         }
4343
4344         /* Update the shadow RSS table with user specified qids */
4345         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4346                 vport->rss_indirection_tbl[i] = indir[i];
4347
4348         /* Update the hardware */
4349         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4350 }
4351
4352 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4353 {
4354         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4355
4356         if (nfc->data & RXH_L4_B_2_3)
4357                 hash_sets |= HCLGE_D_PORT_BIT;
4358         else
4359                 hash_sets &= ~HCLGE_D_PORT_BIT;
4360
4361         if (nfc->data & RXH_IP_SRC)
4362                 hash_sets |= HCLGE_S_IP_BIT;
4363         else
4364                 hash_sets &= ~HCLGE_S_IP_BIT;
4365
4366         if (nfc->data & RXH_IP_DST)
4367                 hash_sets |= HCLGE_D_IP_BIT;
4368         else
4369                 hash_sets &= ~HCLGE_D_IP_BIT;
4370
4371         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4372                 hash_sets |= HCLGE_V_TAG_BIT;
4373
4374         return hash_sets;
4375 }
4376
4377 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4378                                struct ethtool_rxnfc *nfc)
4379 {
4380         struct hclge_vport *vport = hclge_get_vport(handle);
4381         struct hclge_dev *hdev = vport->back;
4382         struct hclge_rss_input_tuple_cmd *req;
4383         struct hclge_desc desc;
4384         u8 tuple_sets;
4385         int ret;
4386
4387         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4388                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
4389                 return -EINVAL;
4390
4391         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4392         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4393
4394         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4395         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4396         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4397         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4398         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4399         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4400         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4401         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4402
4403         tuple_sets = hclge_get_rss_hash_bits(nfc);
4404         switch (nfc->flow_type) {
4405         case TCP_V4_FLOW:
4406                 req->ipv4_tcp_en = tuple_sets;
4407                 break;
4408         case TCP_V6_FLOW:
4409                 req->ipv6_tcp_en = tuple_sets;
4410                 break;
4411         case UDP_V4_FLOW:
4412                 req->ipv4_udp_en = tuple_sets;
4413                 break;
4414         case UDP_V6_FLOW:
4415                 req->ipv6_udp_en = tuple_sets;
4416                 break;
4417         case SCTP_V4_FLOW:
4418                 req->ipv4_sctp_en = tuple_sets;
4419                 break;
4420         case SCTP_V6_FLOW:
4421                 if ((nfc->data & RXH_L4_B_0_1) ||
4422                     (nfc->data & RXH_L4_B_2_3))
4423                         return -EINVAL;
4424
4425                 req->ipv6_sctp_en = tuple_sets;
4426                 break;
4427         case IPV4_FLOW:
4428                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4429                 break;
4430         case IPV6_FLOW:
4431                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4432                 break;
4433         default:
4434                 return -EINVAL;
4435         }
4436
4437         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4438         if (ret) {
4439                 dev_err(&hdev->pdev->dev,
4440                         "Set rss tuple fail, status = %d\n", ret);
4441                 return ret;
4442         }
4443
4444         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4445         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4446         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4447         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4448         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4449         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4450         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4451         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4452         hclge_get_rss_type(vport);
4453         return 0;
4454 }
4455
4456 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4457                                struct ethtool_rxnfc *nfc)
4458 {
4459         struct hclge_vport *vport = hclge_get_vport(handle);
4460         u8 tuple_sets;
4461
4462         nfc->data = 0;
4463
4464         switch (nfc->flow_type) {
4465         case TCP_V4_FLOW:
4466                 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4467                 break;
4468         case UDP_V4_FLOW:
4469                 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4470                 break;
4471         case TCP_V6_FLOW:
4472                 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4473                 break;
4474         case UDP_V6_FLOW:
4475                 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4476                 break;
4477         case SCTP_V4_FLOW:
4478                 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4479                 break;
4480         case SCTP_V6_FLOW:
4481                 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4482                 break;
4483         case IPV4_FLOW:
4484         case IPV6_FLOW:
4485                 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4486                 break;
4487         default:
4488                 return -EINVAL;
4489         }
4490
4491         if (!tuple_sets)
4492                 return 0;
4493
4494         if (tuple_sets & HCLGE_D_PORT_BIT)
4495                 nfc->data |= RXH_L4_B_2_3;
4496         if (tuple_sets & HCLGE_S_PORT_BIT)
4497                 nfc->data |= RXH_L4_B_0_1;
4498         if (tuple_sets & HCLGE_D_IP_BIT)
4499                 nfc->data |= RXH_IP_DST;
4500         if (tuple_sets & HCLGE_S_IP_BIT)
4501                 nfc->data |= RXH_IP_SRC;
4502
4503         return 0;
4504 }
4505
4506 static int hclge_get_tc_size(struct hnae3_handle *handle)
4507 {
4508         struct hclge_vport *vport = hclge_get_vport(handle);
4509         struct hclge_dev *hdev = vport->back;
4510
4511         return hdev->rss_size_max;
4512 }
4513
4514 int hclge_rss_init_hw(struct hclge_dev *hdev)
4515 {
4516         struct hclge_vport *vport = hdev->vport;
4517         u8 *rss_indir = vport[0].rss_indirection_tbl;
4518         u16 rss_size = vport[0].alloc_rss_size;
4519         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4520         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4521         u8 *key = vport[0].rss_hash_key;
4522         u8 hfunc = vport[0].rss_algo;
4523         u16 tc_valid[HCLGE_MAX_TC_NUM];
4524         u16 roundup_size;
4525         unsigned int i;
4526         int ret;
4527
4528         ret = hclge_set_rss_indir_table(hdev, rss_indir);
4529         if (ret)
4530                 return ret;
4531
4532         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4533         if (ret)
4534                 return ret;
4535
4536         ret = hclge_set_rss_input_tuple(hdev);
4537         if (ret)
4538                 return ret;
4539
4540         /* Each TC have the same queue size, and tc_size set to hardware is
4541          * the log2 of roundup power of two of rss_size, the acutal queue
4542          * size is limited by indirection table.
4543          */
4544         if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4545                 dev_err(&hdev->pdev->dev,
4546                         "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4547                         rss_size);
4548                 return -EINVAL;
4549         }
4550
4551         roundup_size = roundup_pow_of_two(rss_size);
4552         roundup_size = ilog2(roundup_size);
4553
4554         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4555                 tc_valid[i] = 0;
4556
4557                 if (!(hdev->hw_tc_map & BIT(i)))
4558                         continue;
4559
4560                 tc_valid[i] = 1;
4561                 tc_size[i] = roundup_size;
4562                 tc_offset[i] = rss_size * i;
4563         }
4564
4565         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4566 }
4567
4568 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4569 {
4570         struct hclge_vport *vport = hdev->vport;
4571         int i, j;
4572
4573         for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4574                 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4575                         vport[j].rss_indirection_tbl[i] =
4576                                 i % vport[j].alloc_rss_size;
4577         }
4578 }
4579
4580 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4581 {
4582         int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4583         struct hclge_vport *vport = hdev->vport;
4584
4585         if (hdev->pdev->revision >= 0x21)
4586                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4587
4588         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4589                 vport[i].rss_tuple_sets.ipv4_tcp_en =
4590                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4591                 vport[i].rss_tuple_sets.ipv4_udp_en =
4592                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4593                 vport[i].rss_tuple_sets.ipv4_sctp_en =
4594                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4595                 vport[i].rss_tuple_sets.ipv4_fragment_en =
4596                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4597                 vport[i].rss_tuple_sets.ipv6_tcp_en =
4598                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4599                 vport[i].rss_tuple_sets.ipv6_udp_en =
4600                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4601                 vport[i].rss_tuple_sets.ipv6_sctp_en =
4602                         HCLGE_RSS_INPUT_TUPLE_SCTP;
4603                 vport[i].rss_tuple_sets.ipv6_fragment_en =
4604                         HCLGE_RSS_INPUT_TUPLE_OTHER;
4605
4606                 vport[i].rss_algo = rss_algo;
4607
4608                 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4609                        HCLGE_RSS_KEY_SIZE);
4610         }
4611
4612         hclge_rss_indir_init_cfg(hdev);
4613 }
4614
4615 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4616                                 int vector_id, bool en,
4617                                 struct hnae3_ring_chain_node *ring_chain)
4618 {
4619         struct hclge_dev *hdev = vport->back;
4620         struct hnae3_ring_chain_node *node;
4621         struct hclge_desc desc;
4622         struct hclge_ctrl_vector_chain_cmd *req =
4623                 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4624         enum hclge_cmd_status status;
4625         enum hclge_opcode_type op;
4626         u16 tqp_type_and_id;
4627         int i;
4628
4629         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4630         hclge_cmd_setup_basic_desc(&desc, op, false);
4631         req->int_vector_id = vector_id;
4632
4633         i = 0;
4634         for (node = ring_chain; node; node = node->next) {
4635                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4636                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4637                                 HCLGE_INT_TYPE_S,
4638                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4639                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4640                                 HCLGE_TQP_ID_S, node->tqp_index);
4641                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4642                                 HCLGE_INT_GL_IDX_S,
4643                                 hnae3_get_field(node->int_gl_idx,
4644                                                 HNAE3_RING_GL_IDX_M,
4645                                                 HNAE3_RING_GL_IDX_S));
4646                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4647                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4648                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4649                         req->vfid = vport->vport_id;
4650
4651                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4652                         if (status) {
4653                                 dev_err(&hdev->pdev->dev,
4654                                         "Map TQP fail, status is %d.\n",
4655                                         status);
4656                                 return -EIO;
4657                         }
4658                         i = 0;
4659
4660                         hclge_cmd_setup_basic_desc(&desc,
4661                                                    op,
4662                                                    false);
4663                         req->int_vector_id = vector_id;
4664                 }
4665         }
4666
4667         if (i > 0) {
4668                 req->int_cause_num = i;
4669                 req->vfid = vport->vport_id;
4670                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4671                 if (status) {
4672                         dev_err(&hdev->pdev->dev,
4673                                 "Map TQP fail, status is %d.\n", status);
4674                         return -EIO;
4675                 }
4676         }
4677
4678         return 0;
4679 }
4680
4681 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4682                                     struct hnae3_ring_chain_node *ring_chain)
4683 {
4684         struct hclge_vport *vport = hclge_get_vport(handle);
4685         struct hclge_dev *hdev = vport->back;
4686         int vector_id;
4687
4688         vector_id = hclge_get_vector_index(hdev, vector);
4689         if (vector_id < 0) {
4690                 dev_err(&hdev->pdev->dev,
4691                         "Get vector index fail. vector_id =%d\n", vector_id);
4692                 return vector_id;
4693         }
4694
4695         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4696 }
4697
4698 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4699                                        struct hnae3_ring_chain_node *ring_chain)
4700 {
4701         struct hclge_vport *vport = hclge_get_vport(handle);
4702         struct hclge_dev *hdev = vport->back;
4703         int vector_id, ret;
4704
4705         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4706                 return 0;
4707
4708         vector_id = hclge_get_vector_index(hdev, vector);
4709         if (vector_id < 0) {
4710                 dev_err(&handle->pdev->dev,
4711                         "Get vector index fail. ret =%d\n", vector_id);
4712                 return vector_id;
4713         }
4714
4715         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4716         if (ret)
4717                 dev_err(&handle->pdev->dev,
4718                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4719                         vector_id, ret);
4720
4721         return ret;
4722 }
4723
4724 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4725                                       struct hclge_promisc_param *param)
4726 {
4727         struct hclge_promisc_cfg_cmd *req;
4728         struct hclge_desc desc;
4729         int ret;
4730
4731         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4732
4733         req = (struct hclge_promisc_cfg_cmd *)desc.data;
4734         req->vf_id = param->vf_id;
4735
4736         /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4737          * pdev revision(0x20), new revision support them. The
4738          * value of this two fields will not return error when driver
4739          * send command to fireware in revision(0x20).
4740          */
4741         req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4742                 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4743
4744         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4745         if (ret)
4746                 dev_err(&hdev->pdev->dev,
4747                         "Set promisc mode fail, status is %d.\n", ret);
4748
4749         return ret;
4750 }
4751
4752 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4753                                      bool en_uc, bool en_mc, bool en_bc,
4754                                      int vport_id)
4755 {
4756         if (!param)
4757                 return;
4758
4759         memset(param, 0, sizeof(struct hclge_promisc_param));
4760         if (en_uc)
4761                 param->enable = HCLGE_PROMISC_EN_UC;
4762         if (en_mc)
4763                 param->enable |= HCLGE_PROMISC_EN_MC;
4764         if (en_bc)
4765                 param->enable |= HCLGE_PROMISC_EN_BC;
4766         param->vf_id = vport_id;
4767 }
4768
4769 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4770                                  bool en_mc_pmc, bool en_bc_pmc)
4771 {
4772         struct hclge_dev *hdev = vport->back;
4773         struct hclge_promisc_param param;
4774
4775         hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4776                                  vport->vport_id);
4777         return hclge_cmd_set_promisc_mode(hdev, &param);
4778 }
4779
4780 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4781                                   bool en_mc_pmc)
4782 {
4783         struct hclge_vport *vport = hclge_get_vport(handle);
4784         bool en_bc_pmc = true;
4785
4786         /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4787          * always bypassed. So broadcast promisc should be disabled until
4788          * user enable promisc mode
4789          */
4790         if (handle->pdev->revision == 0x20)
4791                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4792
4793         return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4794                                             en_bc_pmc);
4795 }
4796
4797 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4798 {
4799         struct hclge_get_fd_mode_cmd *req;
4800         struct hclge_desc desc;
4801         int ret;
4802
4803         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4804
4805         req = (struct hclge_get_fd_mode_cmd *)desc.data;
4806
4807         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4808         if (ret) {
4809                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4810                 return ret;
4811         }
4812
4813         *fd_mode = req->mode;
4814
4815         return ret;
4816 }
4817
4818 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4819                                    u32 *stage1_entry_num,
4820                                    u32 *stage2_entry_num,
4821                                    u16 *stage1_counter_num,
4822                                    u16 *stage2_counter_num)
4823 {
4824         struct hclge_get_fd_allocation_cmd *req;
4825         struct hclge_desc desc;
4826         int ret;
4827
4828         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4829
4830         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4831
4832         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4833         if (ret) {
4834                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4835                         ret);
4836                 return ret;
4837         }
4838
4839         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4840         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4841         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4842         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4843
4844         return ret;
4845 }
4846
4847 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4848 {
4849         struct hclge_set_fd_key_config_cmd *req;
4850         struct hclge_fd_key_cfg *stage;
4851         struct hclge_desc desc;
4852         int ret;
4853
4854         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4855
4856         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4857         stage = &hdev->fd_cfg.key_cfg[stage_num];
4858         req->stage = stage_num;
4859         req->key_select = stage->key_sel;
4860         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4861         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4862         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4863         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4864         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4865         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4866
4867         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4868         if (ret)
4869                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4870
4871         return ret;
4872 }
4873
4874 static int hclge_init_fd_config(struct hclge_dev *hdev)
4875 {
4876 #define LOW_2_WORDS             0x03
4877         struct hclge_fd_key_cfg *key_cfg;
4878         int ret;
4879
4880         if (!hnae3_dev_fd_supported(hdev))
4881                 return 0;
4882
4883         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4884         if (ret)
4885                 return ret;
4886
4887         switch (hdev->fd_cfg.fd_mode) {
4888         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4889                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4890                 break;
4891         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4892                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4893                 break;
4894         default:
4895                 dev_err(&hdev->pdev->dev,
4896                         "Unsupported flow director mode %u\n",
4897                         hdev->fd_cfg.fd_mode);
4898                 return -EOPNOTSUPP;
4899         }
4900
4901         hdev->fd_cfg.proto_support =
4902                 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4903                 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4904         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4905         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4906         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4907         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4908         key_cfg->outer_sipv6_word_en = 0;
4909         key_cfg->outer_dipv6_word_en = 0;
4910
4911         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4912                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4913                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4914                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4915
4916         /* If use max 400bit key, we can support tuples for ether type */
4917         if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4918                 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4919                 key_cfg->tuple_active |=
4920                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4921         }
4922
4923         /* roce_type is used to filter roce frames
4924          * dst_vport is used to specify the rule
4925          */
4926         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4927
4928         ret = hclge_get_fd_allocation(hdev,
4929                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4930                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4931                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4932                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4933         if (ret)
4934                 return ret;
4935
4936         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4937 }
4938
4939 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4940                                 int loc, u8 *key, bool is_add)
4941 {
4942         struct hclge_fd_tcam_config_1_cmd *req1;
4943         struct hclge_fd_tcam_config_2_cmd *req2;
4944         struct hclge_fd_tcam_config_3_cmd *req3;
4945         struct hclge_desc desc[3];
4946         int ret;
4947
4948         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4949         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4950         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4951         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4952         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4953
4954         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4955         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4956         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4957
4958         req1->stage = stage;
4959         req1->xy_sel = sel_x ? 1 : 0;
4960         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4961         req1->index = cpu_to_le32(loc);
4962         req1->entry_vld = sel_x ? is_add : 0;
4963
4964         if (key) {
4965                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4966                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4967                        sizeof(req2->tcam_data));
4968                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4969                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4970         }
4971
4972         ret = hclge_cmd_send(&hdev->hw, desc, 3);
4973         if (ret)
4974                 dev_err(&hdev->pdev->dev,
4975                         "config tcam key fail, ret=%d\n",
4976                         ret);
4977
4978         return ret;
4979 }
4980
4981 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4982                               struct hclge_fd_ad_data *action)
4983 {
4984         struct hclge_fd_ad_config_cmd *req;
4985         struct hclge_desc desc;
4986         u64 ad_data = 0;
4987         int ret;
4988
4989         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4990
4991         req = (struct hclge_fd_ad_config_cmd *)desc.data;
4992         req->index = cpu_to_le32(loc);
4993         req->stage = stage;
4994
4995         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4996                       action->write_rule_id_to_bd);
4997         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4998                         action->rule_id);
4999         ad_data <<= 32;
5000         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5001         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5002                       action->forward_to_direct_queue);
5003         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5004                         action->queue_id);
5005         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5006         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5007                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5008         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5009         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5010                         action->counter_id);
5011
5012         req->ad_data = cpu_to_le64(ad_data);
5013         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5014         if (ret)
5015                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5016
5017         return ret;
5018 }
5019
5020 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5021                                    struct hclge_fd_rule *rule)
5022 {
5023         u16 tmp_x_s, tmp_y_s;
5024         u32 tmp_x_l, tmp_y_l;
5025         int i;
5026
5027         if (rule->unused_tuple & tuple_bit)
5028                 return true;
5029
5030         switch (tuple_bit) {
5031         case 0:
5032                 return false;
5033         case BIT(INNER_DST_MAC):
5034                 for (i = 0; i < ETH_ALEN; i++) {
5035                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5036                                rule->tuples_mask.dst_mac[i]);
5037                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5038                                rule->tuples_mask.dst_mac[i]);
5039                 }
5040
5041                 return true;
5042         case BIT(INNER_SRC_MAC):
5043                 for (i = 0; i < ETH_ALEN; i++) {
5044                         calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5045                                rule->tuples.src_mac[i]);
5046                         calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5047                                rule->tuples.src_mac[i]);
5048                 }
5049
5050                 return true;
5051         case BIT(INNER_VLAN_TAG_FST):
5052                 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5053                        rule->tuples_mask.vlan_tag1);
5054                 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5055                        rule->tuples_mask.vlan_tag1);
5056                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5057                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5058
5059                 return true;
5060         case BIT(INNER_ETH_TYPE):
5061                 calc_x(tmp_x_s, rule->tuples.ether_proto,
5062                        rule->tuples_mask.ether_proto);
5063                 calc_y(tmp_y_s, rule->tuples.ether_proto,
5064                        rule->tuples_mask.ether_proto);
5065                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5066                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5067
5068                 return true;
5069         case BIT(INNER_IP_TOS):
5070                 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5071                 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5072
5073                 return true;
5074         case BIT(INNER_IP_PROTO):
5075                 calc_x(*key_x, rule->tuples.ip_proto,
5076                        rule->tuples_mask.ip_proto);
5077                 calc_y(*key_y, rule->tuples.ip_proto,
5078                        rule->tuples_mask.ip_proto);
5079
5080                 return true;
5081         case BIT(INNER_SRC_IP):
5082                 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5083                        rule->tuples_mask.src_ip[IPV4_INDEX]);
5084                 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5085                        rule->tuples_mask.src_ip[IPV4_INDEX]);
5086                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5087                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5088
5089                 return true;
5090         case BIT(INNER_DST_IP):
5091                 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5092                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
5093                 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5094                        rule->tuples_mask.dst_ip[IPV4_INDEX]);
5095                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5096                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5097
5098                 return true;
5099         case BIT(INNER_SRC_PORT):
5100                 calc_x(tmp_x_s, rule->tuples.src_port,
5101                        rule->tuples_mask.src_port);
5102                 calc_y(tmp_y_s, rule->tuples.src_port,
5103                        rule->tuples_mask.src_port);
5104                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5105                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5106
5107                 return true;
5108         case BIT(INNER_DST_PORT):
5109                 calc_x(tmp_x_s, rule->tuples.dst_port,
5110                        rule->tuples_mask.dst_port);
5111                 calc_y(tmp_y_s, rule->tuples.dst_port,
5112                        rule->tuples_mask.dst_port);
5113                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5114                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5115
5116                 return true;
5117         default:
5118                 return false;
5119         }
5120 }
5121
5122 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5123                                  u8 vf_id, u8 network_port_id)
5124 {
5125         u32 port_number = 0;
5126
5127         if (port_type == HOST_PORT) {
5128                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5129                                 pf_id);
5130                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5131                                 vf_id);
5132                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5133         } else {
5134                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5135                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5136                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5137         }
5138
5139         return port_number;
5140 }
5141
5142 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5143                                        __le32 *key_x, __le32 *key_y,
5144                                        struct hclge_fd_rule *rule)
5145 {
5146         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5147         u8 cur_pos = 0, tuple_size, shift_bits;
5148         unsigned int i;
5149
5150         for (i = 0; i < MAX_META_DATA; i++) {
5151                 tuple_size = meta_data_key_info[i].key_length;
5152                 tuple_bit = key_cfg->meta_data_active & BIT(i);
5153
5154                 switch (tuple_bit) {
5155                 case BIT(ROCE_TYPE):
5156                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5157                         cur_pos += tuple_size;
5158                         break;
5159                 case BIT(DST_VPORT):
5160                         port_number = hclge_get_port_number(HOST_PORT, 0,
5161                                                             rule->vf_id, 0);
5162                         hnae3_set_field(meta_data,
5163                                         GENMASK(cur_pos + tuple_size, cur_pos),
5164                                         cur_pos, port_number);
5165                         cur_pos += tuple_size;
5166                         break;
5167                 default:
5168                         break;
5169                 }
5170         }
5171
5172         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5173         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5174         shift_bits = sizeof(meta_data) * 8 - cur_pos;
5175
5176         *key_x = cpu_to_le32(tmp_x << shift_bits);
5177         *key_y = cpu_to_le32(tmp_y << shift_bits);
5178 }
5179
5180 /* A complete key is combined with meta data key and tuple key.
5181  * Meta data key is stored at the MSB region, and tuple key is stored at
5182  * the LSB region, unused bits will be filled 0.
5183  */
5184 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5185                             struct hclge_fd_rule *rule)
5186 {
5187         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5188         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5189         u8 *cur_key_x, *cur_key_y;
5190         unsigned int i;
5191         int ret, tuple_size;
5192         u8 meta_data_region;
5193
5194         memset(key_x, 0, sizeof(key_x));
5195         memset(key_y, 0, sizeof(key_y));
5196         cur_key_x = key_x;
5197         cur_key_y = key_y;
5198
5199         for (i = 0 ; i < MAX_TUPLE; i++) {
5200                 bool tuple_valid;
5201                 u32 check_tuple;
5202
5203                 tuple_size = tuple_key_info[i].key_length / 8;
5204                 check_tuple = key_cfg->tuple_active & BIT(i);
5205
5206                 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5207                                                      cur_key_y, rule);
5208                 if (tuple_valid) {
5209                         cur_key_x += tuple_size;
5210                         cur_key_y += tuple_size;
5211                 }
5212         }
5213
5214         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5215                         MAX_META_DATA_LENGTH / 8;
5216
5217         hclge_fd_convert_meta_data(key_cfg,
5218                                    (__le32 *)(key_x + meta_data_region),
5219                                    (__le32 *)(key_y + meta_data_region),
5220                                    rule);
5221
5222         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5223                                    true);
5224         if (ret) {
5225                 dev_err(&hdev->pdev->dev,
5226                         "fd key_y config fail, loc=%u, ret=%d\n",
5227                         rule->queue_id, ret);
5228                 return ret;
5229         }
5230
5231         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5232                                    true);
5233         if (ret)
5234                 dev_err(&hdev->pdev->dev,
5235                         "fd key_x config fail, loc=%u, ret=%d\n",
5236                         rule->queue_id, ret);
5237         return ret;
5238 }
5239
5240 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5241                                struct hclge_fd_rule *rule)
5242 {
5243         struct hclge_fd_ad_data ad_data;
5244
5245         ad_data.ad_id = rule->location;
5246
5247         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5248                 ad_data.drop_packet = true;
5249                 ad_data.forward_to_direct_queue = false;
5250                 ad_data.queue_id = 0;
5251         } else {
5252                 ad_data.drop_packet = false;
5253                 ad_data.forward_to_direct_queue = true;
5254                 ad_data.queue_id = rule->queue_id;
5255         }
5256
5257         ad_data.use_counter = false;
5258         ad_data.counter_id = 0;
5259
5260         ad_data.use_next_stage = false;
5261         ad_data.next_input_key = 0;
5262
5263         ad_data.write_rule_id_to_bd = true;
5264         ad_data.rule_id = rule->location;
5265
5266         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5267 }
5268
5269 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5270                                struct ethtool_rx_flow_spec *fs, u32 *unused)
5271 {
5272         struct ethtool_tcpip4_spec *tcp_ip4_spec;
5273         struct ethtool_usrip4_spec *usr_ip4_spec;
5274         struct ethtool_tcpip6_spec *tcp_ip6_spec;
5275         struct ethtool_usrip6_spec *usr_ip6_spec;
5276         struct ethhdr *ether_spec;
5277
5278         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5279                 return -EINVAL;
5280
5281         if (!(fs->flow_type & hdev->fd_cfg.proto_support))
5282                 return -EOPNOTSUPP;
5283
5284         if ((fs->flow_type & FLOW_EXT) &&
5285             (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5286                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5287                 return -EOPNOTSUPP;
5288         }
5289
5290         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5291         case SCTP_V4_FLOW:
5292         case TCP_V4_FLOW:
5293         case UDP_V4_FLOW:
5294                 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
5295                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5296
5297                 if (!tcp_ip4_spec->ip4src)
5298                         *unused |= BIT(INNER_SRC_IP);
5299
5300                 if (!tcp_ip4_spec->ip4dst)
5301                         *unused |= BIT(INNER_DST_IP);
5302
5303                 if (!tcp_ip4_spec->psrc)
5304                         *unused |= BIT(INNER_SRC_PORT);
5305
5306                 if (!tcp_ip4_spec->pdst)
5307                         *unused |= BIT(INNER_DST_PORT);
5308
5309                 if (!tcp_ip4_spec->tos)
5310                         *unused |= BIT(INNER_IP_TOS);
5311
5312                 break;
5313         case IP_USER_FLOW:
5314                 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
5315                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5316                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5317
5318                 if (!usr_ip4_spec->ip4src)
5319                         *unused |= BIT(INNER_SRC_IP);
5320
5321                 if (!usr_ip4_spec->ip4dst)
5322                         *unused |= BIT(INNER_DST_IP);
5323
5324                 if (!usr_ip4_spec->tos)
5325                         *unused |= BIT(INNER_IP_TOS);
5326
5327                 if (!usr_ip4_spec->proto)
5328                         *unused |= BIT(INNER_IP_PROTO);
5329
5330                 if (usr_ip4_spec->l4_4_bytes)
5331                         return -EOPNOTSUPP;
5332
5333                 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
5334                         return -EOPNOTSUPP;
5335
5336                 break;
5337         case SCTP_V6_FLOW:
5338         case TCP_V6_FLOW:
5339         case UDP_V6_FLOW:
5340                 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
5341                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5342                         BIT(INNER_IP_TOS);
5343
5344                 /* check whether src/dst ip address used */
5345                 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
5346                     !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
5347                         *unused |= BIT(INNER_SRC_IP);
5348
5349                 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
5350                     !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
5351                         *unused |= BIT(INNER_DST_IP);
5352
5353                 if (!tcp_ip6_spec->psrc)
5354                         *unused |= BIT(INNER_SRC_PORT);
5355
5356                 if (!tcp_ip6_spec->pdst)
5357                         *unused |= BIT(INNER_DST_PORT);
5358
5359                 if (tcp_ip6_spec->tclass)
5360                         return -EOPNOTSUPP;
5361
5362                 break;
5363         case IPV6_USER_FLOW:
5364                 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
5365                 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5366                         BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
5367                         BIT(INNER_DST_PORT);
5368
5369                 /* check whether src/dst ip address used */
5370                 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
5371                     !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
5372                         *unused |= BIT(INNER_SRC_IP);
5373
5374                 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
5375                     !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
5376                         *unused |= BIT(INNER_DST_IP);
5377
5378                 if (!usr_ip6_spec->l4_proto)
5379                         *unused |= BIT(INNER_IP_PROTO);
5380
5381                 if (usr_ip6_spec->tclass)
5382                         return -EOPNOTSUPP;
5383
5384                 if (usr_ip6_spec->l4_4_bytes)
5385                         return -EOPNOTSUPP;
5386
5387                 break;
5388         case ETHER_FLOW:
5389                 ether_spec = &fs->h_u.ether_spec;
5390                 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5391                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5392                         BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5393
5394                 if (is_zero_ether_addr(ether_spec->h_source))
5395                         *unused |= BIT(INNER_SRC_MAC);
5396
5397                 if (is_zero_ether_addr(ether_spec->h_dest))
5398                         *unused |= BIT(INNER_DST_MAC);
5399
5400                 if (!ether_spec->h_proto)
5401                         *unused |= BIT(INNER_ETH_TYPE);
5402
5403                 break;
5404         default:
5405                 return -EOPNOTSUPP;
5406         }
5407
5408         if ((fs->flow_type & FLOW_EXT)) {
5409                 if (fs->h_ext.vlan_etype)
5410                         return -EOPNOTSUPP;
5411                 if (!fs->h_ext.vlan_tci)
5412                         *unused |= BIT(INNER_VLAN_TAG_FST);
5413
5414                 if (fs->m_ext.vlan_tci) {
5415                         if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
5416                                 return -EINVAL;
5417                 }
5418         } else {
5419                 *unused |= BIT(INNER_VLAN_TAG_FST);
5420         }
5421
5422         if (fs->flow_type & FLOW_MAC_EXT) {
5423                 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
5424                         return -EOPNOTSUPP;
5425
5426                 if (is_zero_ether_addr(fs->h_ext.h_dest))
5427                         *unused |= BIT(INNER_DST_MAC);
5428                 else
5429                         *unused &= ~(BIT(INNER_DST_MAC));
5430         }
5431
5432         return 0;
5433 }
5434
5435 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5436 {
5437         struct hclge_fd_rule *rule = NULL;
5438         struct hlist_node *node2;
5439
5440         spin_lock_bh(&hdev->fd_rule_lock);
5441         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5442                 if (rule->location >= location)
5443                         break;
5444         }
5445
5446         spin_unlock_bh(&hdev->fd_rule_lock);
5447
5448         return  rule && rule->location == location;
5449 }
5450
5451 /* make sure being called after lock up with fd_rule_lock */
5452 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5453                                      struct hclge_fd_rule *new_rule,
5454                                      u16 location,
5455                                      bool is_add)
5456 {
5457         struct hclge_fd_rule *rule = NULL, *parent = NULL;
5458         struct hlist_node *node2;
5459
5460         if (is_add && !new_rule)
5461                 return -EINVAL;
5462
5463         hlist_for_each_entry_safe(rule, node2,
5464                                   &hdev->fd_rule_list, rule_node) {
5465                 if (rule->location >= location)
5466                         break;
5467                 parent = rule;
5468         }
5469
5470         if (rule && rule->location == location) {
5471                 hlist_del(&rule->rule_node);
5472                 kfree(rule);
5473                 hdev->hclge_fd_rule_num--;
5474
5475                 if (!is_add) {
5476                         if (!hdev->hclge_fd_rule_num)
5477                                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5478                         clear_bit(location, hdev->fd_bmap);
5479
5480                         return 0;
5481                 }
5482         } else if (!is_add) {
5483                 dev_err(&hdev->pdev->dev,
5484                         "delete fail, rule %u is inexistent\n",
5485                         location);
5486                 return -EINVAL;
5487         }
5488
5489         INIT_HLIST_NODE(&new_rule->rule_node);
5490
5491         if (parent)
5492                 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5493         else
5494                 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5495
5496         set_bit(location, hdev->fd_bmap);
5497         hdev->hclge_fd_rule_num++;
5498         hdev->fd_active_type = new_rule->rule_type;
5499
5500         return 0;
5501 }
5502
5503 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5504                               struct ethtool_rx_flow_spec *fs,
5505                               struct hclge_fd_rule *rule)
5506 {
5507         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5508
5509         switch (flow_type) {
5510         case SCTP_V4_FLOW:
5511         case TCP_V4_FLOW:
5512         case UDP_V4_FLOW:
5513                 rule->tuples.src_ip[IPV4_INDEX] =
5514                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5515                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5516                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5517
5518                 rule->tuples.dst_ip[IPV4_INDEX] =
5519                                 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5520                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5521                                 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5522
5523                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5524                 rule->tuples_mask.src_port =
5525                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5526
5527                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5528                 rule->tuples_mask.dst_port =
5529                                 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5530
5531                 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5532                 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5533
5534                 rule->tuples.ether_proto = ETH_P_IP;
5535                 rule->tuples_mask.ether_proto = 0xFFFF;
5536
5537                 break;
5538         case IP_USER_FLOW:
5539                 rule->tuples.src_ip[IPV4_INDEX] =
5540                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5541                 rule->tuples_mask.src_ip[IPV4_INDEX] =
5542                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5543
5544                 rule->tuples.dst_ip[IPV4_INDEX] =
5545                                 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5546                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5547                                 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5548
5549                 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5550                 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5551
5552                 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5553                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5554
5555                 rule->tuples.ether_proto = ETH_P_IP;
5556                 rule->tuples_mask.ether_proto = 0xFFFF;
5557
5558                 break;
5559         case SCTP_V6_FLOW:
5560         case TCP_V6_FLOW:
5561         case UDP_V6_FLOW:
5562                 be32_to_cpu_array(rule->tuples.src_ip,
5563                                   fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5564                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5565                                   fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5566
5567                 be32_to_cpu_array(rule->tuples.dst_ip,
5568                                   fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5569                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5570                                   fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5571
5572                 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5573                 rule->tuples_mask.src_port =
5574                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5575
5576                 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5577                 rule->tuples_mask.dst_port =
5578                                 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5579
5580                 rule->tuples.ether_proto = ETH_P_IPV6;
5581                 rule->tuples_mask.ether_proto = 0xFFFF;
5582
5583                 break;
5584         case IPV6_USER_FLOW:
5585                 be32_to_cpu_array(rule->tuples.src_ip,
5586                                   fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5587                 be32_to_cpu_array(rule->tuples_mask.src_ip,
5588                                   fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5589
5590                 be32_to_cpu_array(rule->tuples.dst_ip,
5591                                   fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5592                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5593                                   fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5594
5595                 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5596                 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5597
5598                 rule->tuples.ether_proto = ETH_P_IPV6;
5599                 rule->tuples_mask.ether_proto = 0xFFFF;
5600
5601                 break;
5602         case ETHER_FLOW:
5603                 ether_addr_copy(rule->tuples.src_mac,
5604                                 fs->h_u.ether_spec.h_source);
5605                 ether_addr_copy(rule->tuples_mask.src_mac,
5606                                 fs->m_u.ether_spec.h_source);
5607
5608                 ether_addr_copy(rule->tuples.dst_mac,
5609                                 fs->h_u.ether_spec.h_dest);
5610                 ether_addr_copy(rule->tuples_mask.dst_mac,
5611                                 fs->m_u.ether_spec.h_dest);
5612
5613                 rule->tuples.ether_proto =
5614                                 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5615                 rule->tuples_mask.ether_proto =
5616                                 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5617
5618                 break;
5619         default:
5620                 return -EOPNOTSUPP;
5621         }
5622
5623         switch (flow_type) {
5624         case SCTP_V4_FLOW:
5625         case SCTP_V6_FLOW:
5626                 rule->tuples.ip_proto = IPPROTO_SCTP;
5627                 rule->tuples_mask.ip_proto = 0xFF;
5628                 break;
5629         case TCP_V4_FLOW:
5630         case TCP_V6_FLOW:
5631                 rule->tuples.ip_proto = IPPROTO_TCP;
5632                 rule->tuples_mask.ip_proto = 0xFF;
5633                 break;
5634         case UDP_V4_FLOW:
5635         case UDP_V6_FLOW:
5636                 rule->tuples.ip_proto = IPPROTO_UDP;
5637                 rule->tuples_mask.ip_proto = 0xFF;
5638                 break;
5639         default:
5640                 break;
5641         }
5642
5643         if ((fs->flow_type & FLOW_EXT)) {
5644                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5645                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5646         }
5647
5648         if (fs->flow_type & FLOW_MAC_EXT) {
5649                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5650                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5651         }
5652
5653         return 0;
5654 }
5655
5656 /* make sure being called after lock up with fd_rule_lock */
5657 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5658                                 struct hclge_fd_rule *rule)
5659 {
5660         int ret;
5661
5662         if (!rule) {
5663                 dev_err(&hdev->pdev->dev,
5664                         "The flow director rule is NULL\n");
5665                 return -EINVAL;
5666         }
5667
5668         /* it will never fail here, so needn't to check return value */
5669         hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5670
5671         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5672         if (ret)
5673                 goto clear_rule;
5674
5675         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5676         if (ret)
5677                 goto clear_rule;
5678
5679         return 0;
5680
5681 clear_rule:
5682         hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5683         return ret;
5684 }
5685
5686 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5687                               struct ethtool_rxnfc *cmd)
5688 {
5689         struct hclge_vport *vport = hclge_get_vport(handle);
5690         struct hclge_dev *hdev = vport->back;
5691         u16 dst_vport_id = 0, q_index = 0;
5692         struct ethtool_rx_flow_spec *fs;
5693         struct hclge_fd_rule *rule;
5694         u32 unused = 0;
5695         u8 action;
5696         int ret;
5697
5698         if (!hnae3_dev_fd_supported(hdev))
5699                 return -EOPNOTSUPP;
5700
5701         if (!hdev->fd_en) {
5702                 dev_warn(&hdev->pdev->dev,
5703                          "Please enable flow director first\n");
5704                 return -EOPNOTSUPP;
5705         }
5706
5707         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5708
5709         ret = hclge_fd_check_spec(hdev, fs, &unused);
5710         if (ret) {
5711                 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5712                 return ret;
5713         }
5714
5715         if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5716                 action = HCLGE_FD_ACTION_DROP_PACKET;
5717         } else {
5718                 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5719                 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5720                 u16 tqps;
5721
5722                 if (vf > hdev->num_req_vfs) {
5723                         dev_err(&hdev->pdev->dev,
5724                                 "Error: vf id (%u) > max vf num (%u)\n",
5725                                 vf, hdev->num_req_vfs);
5726                         return -EINVAL;
5727                 }
5728
5729                 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5730                 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5731
5732                 if (ring >= tqps) {
5733                         dev_err(&hdev->pdev->dev,
5734                                 "Error: queue id (%u) > max tqp num (%u)\n",
5735                                 ring, tqps - 1);
5736                         return -EINVAL;
5737                 }
5738
5739                 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5740                 q_index = ring;
5741         }
5742
5743         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5744         if (!rule)
5745                 return -ENOMEM;
5746
5747         ret = hclge_fd_get_tuple(hdev, fs, rule);
5748         if (ret) {
5749                 kfree(rule);
5750                 return ret;
5751         }
5752
5753         rule->flow_type = fs->flow_type;
5754
5755         rule->location = fs->location;
5756         rule->unused_tuple = unused;
5757         rule->vf_id = dst_vport_id;
5758         rule->queue_id = q_index;
5759         rule->action = action;
5760         rule->rule_type = HCLGE_FD_EP_ACTIVE;
5761
5762         /* to avoid rule conflict, when user configure rule by ethtool,
5763          * we need to clear all arfs rules
5764          */
5765         hclge_clear_arfs_rules(handle);
5766
5767         spin_lock_bh(&hdev->fd_rule_lock);
5768         ret = hclge_fd_config_rule(hdev, rule);
5769
5770         spin_unlock_bh(&hdev->fd_rule_lock);
5771
5772         return ret;
5773 }
5774
5775 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5776                               struct ethtool_rxnfc *cmd)
5777 {
5778         struct hclge_vport *vport = hclge_get_vport(handle);
5779         struct hclge_dev *hdev = vport->back;
5780         struct ethtool_rx_flow_spec *fs;
5781         int ret;
5782
5783         if (!hnae3_dev_fd_supported(hdev))
5784                 return -EOPNOTSUPP;
5785
5786         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5787
5788         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5789                 return -EINVAL;
5790
5791         if (!hclge_fd_rule_exist(hdev, fs->location)) {
5792                 dev_err(&hdev->pdev->dev,
5793                         "Delete fail, rule %u is inexistent\n", fs->location);
5794                 return -ENOENT;
5795         }
5796
5797         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5798                                    NULL, false);
5799         if (ret)
5800                 return ret;
5801
5802         spin_lock_bh(&hdev->fd_rule_lock);
5803         ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5804
5805         spin_unlock_bh(&hdev->fd_rule_lock);
5806
5807         return ret;
5808 }
5809
5810 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5811                                      bool clear_list)
5812 {
5813         struct hclge_vport *vport = hclge_get_vport(handle);
5814         struct hclge_dev *hdev = vport->back;
5815         struct hclge_fd_rule *rule;
5816         struct hlist_node *node;
5817         u16 location;
5818
5819         if (!hnae3_dev_fd_supported(hdev))
5820                 return;
5821
5822         spin_lock_bh(&hdev->fd_rule_lock);
5823         for_each_set_bit(location, hdev->fd_bmap,
5824                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5825                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5826                                      NULL, false);
5827
5828         if (clear_list) {
5829                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5830                                           rule_node) {
5831                         hlist_del(&rule->rule_node);
5832                         kfree(rule);
5833                 }
5834                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5835                 hdev->hclge_fd_rule_num = 0;
5836                 bitmap_zero(hdev->fd_bmap,
5837                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5838         }
5839
5840         spin_unlock_bh(&hdev->fd_rule_lock);
5841 }
5842
5843 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5844 {
5845         struct hclge_vport *vport = hclge_get_vport(handle);
5846         struct hclge_dev *hdev = vport->back;
5847         struct hclge_fd_rule *rule;
5848         struct hlist_node *node;
5849         int ret;
5850
5851         /* Return ok here, because reset error handling will check this
5852          * return value. If error is returned here, the reset process will
5853          * fail.
5854          */
5855         if (!hnae3_dev_fd_supported(hdev))
5856                 return 0;
5857
5858         /* if fd is disabled, should not restore it when reset */
5859         if (!hdev->fd_en)
5860                 return 0;
5861
5862         spin_lock_bh(&hdev->fd_rule_lock);
5863         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5864                 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5865                 if (!ret)
5866                         ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5867
5868                 if (ret) {
5869                         dev_warn(&hdev->pdev->dev,
5870                                  "Restore rule %u failed, remove it\n",
5871                                  rule->location);
5872                         clear_bit(rule->location, hdev->fd_bmap);
5873                         hlist_del(&rule->rule_node);
5874                         kfree(rule);
5875                         hdev->hclge_fd_rule_num--;
5876                 }
5877         }
5878
5879         if (hdev->hclge_fd_rule_num)
5880                 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5881
5882         spin_unlock_bh(&hdev->fd_rule_lock);
5883
5884         return 0;
5885 }
5886
5887 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5888                                  struct ethtool_rxnfc *cmd)
5889 {
5890         struct hclge_vport *vport = hclge_get_vport(handle);
5891         struct hclge_dev *hdev = vport->back;
5892
5893         if (!hnae3_dev_fd_supported(hdev))
5894                 return -EOPNOTSUPP;
5895
5896         cmd->rule_cnt = hdev->hclge_fd_rule_num;
5897         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5898
5899         return 0;
5900 }
5901
5902 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5903                                   struct ethtool_rxnfc *cmd)
5904 {
5905         struct hclge_vport *vport = hclge_get_vport(handle);
5906         struct hclge_fd_rule *rule = NULL;
5907         struct hclge_dev *hdev = vport->back;
5908         struct ethtool_rx_flow_spec *fs;
5909         struct hlist_node *node2;
5910
5911         if (!hnae3_dev_fd_supported(hdev))
5912                 return -EOPNOTSUPP;
5913
5914         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5915
5916         spin_lock_bh(&hdev->fd_rule_lock);
5917
5918         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5919                 if (rule->location >= fs->location)
5920                         break;
5921         }
5922
5923         if (!rule || fs->location != rule->location) {
5924                 spin_unlock_bh(&hdev->fd_rule_lock);
5925
5926                 return -ENOENT;
5927         }
5928
5929         fs->flow_type = rule->flow_type;
5930         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5931         case SCTP_V4_FLOW:
5932         case TCP_V4_FLOW:
5933         case UDP_V4_FLOW:
5934                 fs->h_u.tcp_ip4_spec.ip4src =
5935                                 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5936                 fs->m_u.tcp_ip4_spec.ip4src =
5937                         rule->unused_tuple & BIT(INNER_SRC_IP) ?
5938                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5939
5940                 fs->h_u.tcp_ip4_spec.ip4dst =
5941                                 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5942                 fs->m_u.tcp_ip4_spec.ip4dst =
5943                         rule->unused_tuple & BIT(INNER_DST_IP) ?
5944                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5945
5946                 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5947                 fs->m_u.tcp_ip4_spec.psrc =
5948                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5949                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
5950
5951                 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5952                 fs->m_u.tcp_ip4_spec.pdst =
5953                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5954                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5955
5956                 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5957                 fs->m_u.tcp_ip4_spec.tos =
5958                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5959                                 0 : rule->tuples_mask.ip_tos;
5960
5961                 break;
5962         case IP_USER_FLOW:
5963                 fs->h_u.usr_ip4_spec.ip4src =
5964                                 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5965                 fs->m_u.tcp_ip4_spec.ip4src =
5966                         rule->unused_tuple & BIT(INNER_SRC_IP) ?
5967                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5968
5969                 fs->h_u.usr_ip4_spec.ip4dst =
5970                                 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5971                 fs->m_u.usr_ip4_spec.ip4dst =
5972                         rule->unused_tuple & BIT(INNER_DST_IP) ?
5973                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5974
5975                 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5976                 fs->m_u.usr_ip4_spec.tos =
5977                                 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5978                                 0 : rule->tuples_mask.ip_tos;
5979
5980                 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5981                 fs->m_u.usr_ip4_spec.proto =
5982                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5983                                 0 : rule->tuples_mask.ip_proto;
5984
5985                 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5986
5987                 break;
5988         case SCTP_V6_FLOW:
5989         case TCP_V6_FLOW:
5990         case UDP_V6_FLOW:
5991                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5992                                   rule->tuples.src_ip, IPV6_SIZE);
5993                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5994                         memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5995                                sizeof(int) * IPV6_SIZE);
5996                 else
5997                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5998                                           rule->tuples_mask.src_ip, IPV6_SIZE);
5999
6000                 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
6001                                   rule->tuples.dst_ip, IPV6_SIZE);
6002                 if (rule->unused_tuple & BIT(INNER_DST_IP))
6003                         memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
6004                                sizeof(int) * IPV6_SIZE);
6005                 else
6006                         cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
6007                                           rule->tuples_mask.dst_ip, IPV6_SIZE);
6008
6009                 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
6010                 fs->m_u.tcp_ip6_spec.psrc =
6011                                 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6012                                 0 : cpu_to_be16(rule->tuples_mask.src_port);
6013
6014                 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
6015                 fs->m_u.tcp_ip6_spec.pdst =
6016                                 rule->unused_tuple & BIT(INNER_DST_PORT) ?
6017                                 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6018
6019                 break;
6020         case IPV6_USER_FLOW:
6021                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
6022                                   rule->tuples.src_ip, IPV6_SIZE);
6023                 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6024                         memset(fs->m_u.usr_ip6_spec.ip6src, 0,
6025                                sizeof(int) * IPV6_SIZE);
6026                 else
6027                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
6028                                           rule->tuples_mask.src_ip, IPV6_SIZE);
6029
6030                 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
6031                                   rule->tuples.dst_ip, IPV6_SIZE);
6032                 if (rule->unused_tuple & BIT(INNER_DST_IP))
6033                         memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
6034                                sizeof(int) * IPV6_SIZE);
6035                 else
6036                         cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
6037                                           rule->tuples_mask.dst_ip, IPV6_SIZE);
6038
6039                 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
6040                 fs->m_u.usr_ip6_spec.l4_proto =
6041                                 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6042                                 0 : rule->tuples_mask.ip_proto;
6043
6044                 break;
6045         case ETHER_FLOW:
6046                 ether_addr_copy(fs->h_u.ether_spec.h_source,
6047                                 rule->tuples.src_mac);
6048                 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6049                         eth_zero_addr(fs->m_u.ether_spec.h_source);
6050                 else
6051                         ether_addr_copy(fs->m_u.ether_spec.h_source,
6052                                         rule->tuples_mask.src_mac);
6053
6054                 ether_addr_copy(fs->h_u.ether_spec.h_dest,
6055                                 rule->tuples.dst_mac);
6056                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6057                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
6058                 else
6059                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
6060                                         rule->tuples_mask.dst_mac);
6061
6062                 fs->h_u.ether_spec.h_proto =
6063                                 cpu_to_be16(rule->tuples.ether_proto);
6064                 fs->m_u.ether_spec.h_proto =
6065                                 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6066                                 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6067
6068                 break;
6069         default:
6070                 spin_unlock_bh(&hdev->fd_rule_lock);
6071                 return -EOPNOTSUPP;
6072         }
6073
6074         if (fs->flow_type & FLOW_EXT) {
6075                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6076                 fs->m_ext.vlan_tci =
6077                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6078                                 cpu_to_be16(VLAN_VID_MASK) :
6079                                 cpu_to_be16(rule->tuples_mask.vlan_tag1);
6080         }
6081
6082         if (fs->flow_type & FLOW_MAC_EXT) {
6083                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6084                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6085                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
6086                 else
6087                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
6088                                         rule->tuples_mask.dst_mac);
6089         }
6090
6091         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6092                 fs->ring_cookie = RX_CLS_FLOW_DISC;
6093         } else {
6094                 u64 vf_id;
6095
6096                 fs->ring_cookie = rule->queue_id;
6097                 vf_id = rule->vf_id;
6098                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6099                 fs->ring_cookie |= vf_id;
6100         }
6101
6102         spin_unlock_bh(&hdev->fd_rule_lock);
6103
6104         return 0;
6105 }
6106
6107 static int hclge_get_all_rules(struct hnae3_handle *handle,
6108                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
6109 {
6110         struct hclge_vport *vport = hclge_get_vport(handle);
6111         struct hclge_dev *hdev = vport->back;
6112         struct hclge_fd_rule *rule;
6113         struct hlist_node *node2;
6114         int cnt = 0;
6115
6116         if (!hnae3_dev_fd_supported(hdev))
6117                 return -EOPNOTSUPP;
6118
6119         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6120
6121         spin_lock_bh(&hdev->fd_rule_lock);
6122         hlist_for_each_entry_safe(rule, node2,
6123                                   &hdev->fd_rule_list, rule_node) {
6124                 if (cnt == cmd->rule_cnt) {
6125                         spin_unlock_bh(&hdev->fd_rule_lock);
6126                         return -EMSGSIZE;
6127                 }
6128
6129                 rule_locs[cnt] = rule->location;
6130                 cnt++;
6131         }
6132
6133         spin_unlock_bh(&hdev->fd_rule_lock);
6134
6135         cmd->rule_cnt = cnt;
6136
6137         return 0;
6138 }
6139
6140 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6141                                      struct hclge_fd_rule_tuples *tuples)
6142 {
6143         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6144         tuples->ip_proto = fkeys->basic.ip_proto;
6145         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6146
6147         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6148                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6149                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6150         } else {
6151                 memcpy(tuples->src_ip,
6152                        fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
6153                        sizeof(tuples->src_ip));
6154                 memcpy(tuples->dst_ip,
6155                        fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
6156                        sizeof(tuples->dst_ip));
6157         }
6158 }
6159
6160 /* traverse all rules, check whether an existed rule has the same tuples */
6161 static struct hclge_fd_rule *
6162 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6163                           const struct hclge_fd_rule_tuples *tuples)
6164 {
6165         struct hclge_fd_rule *rule = NULL;
6166         struct hlist_node *node;
6167
6168         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6169                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6170                         return rule;
6171         }
6172
6173         return NULL;
6174 }
6175
6176 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6177                                      struct hclge_fd_rule *rule)
6178 {
6179         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6180                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6181                              BIT(INNER_SRC_PORT);
6182         rule->action = 0;
6183         rule->vf_id = 0;
6184         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6185         if (tuples->ether_proto == ETH_P_IP) {
6186                 if (tuples->ip_proto == IPPROTO_TCP)
6187                         rule->flow_type = TCP_V4_FLOW;
6188                 else
6189                         rule->flow_type = UDP_V4_FLOW;
6190         } else {
6191                 if (tuples->ip_proto == IPPROTO_TCP)
6192                         rule->flow_type = TCP_V6_FLOW;
6193                 else
6194                         rule->flow_type = UDP_V6_FLOW;
6195         }
6196         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6197         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6198 }
6199
6200 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6201                                       u16 flow_id, struct flow_keys *fkeys)
6202 {
6203         struct hclge_vport *vport = hclge_get_vport(handle);
6204         struct hclge_fd_rule_tuples new_tuples;
6205         struct hclge_dev *hdev = vport->back;
6206         struct hclge_fd_rule *rule;
6207         u16 tmp_queue_id;
6208         u16 bit_id;
6209         int ret;
6210
6211         if (!hnae3_dev_fd_supported(hdev))
6212                 return -EOPNOTSUPP;
6213
6214         memset(&new_tuples, 0, sizeof(new_tuples));
6215         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6216
6217         spin_lock_bh(&hdev->fd_rule_lock);
6218
6219         /* when there is already fd rule existed add by user,
6220          * arfs should not work
6221          */
6222         if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6223                 spin_unlock_bh(&hdev->fd_rule_lock);
6224
6225                 return -EOPNOTSUPP;
6226         }
6227
6228         /* check is there flow director filter existed for this flow,
6229          * if not, create a new filter for it;
6230          * if filter exist with different queue id, modify the filter;
6231          * if filter exist with same queue id, do nothing
6232          */
6233         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6234         if (!rule) {
6235                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6236                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6237                         spin_unlock_bh(&hdev->fd_rule_lock);
6238
6239                         return -ENOSPC;
6240                 }
6241
6242                 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6243                 if (!rule) {
6244                         spin_unlock_bh(&hdev->fd_rule_lock);
6245
6246                         return -ENOMEM;
6247                 }
6248
6249                 set_bit(bit_id, hdev->fd_bmap);
6250                 rule->location = bit_id;
6251                 rule->flow_id = flow_id;
6252                 rule->queue_id = queue_id;
6253                 hclge_fd_build_arfs_rule(&new_tuples, rule);
6254                 ret = hclge_fd_config_rule(hdev, rule);
6255
6256                 spin_unlock_bh(&hdev->fd_rule_lock);
6257
6258                 if (ret)
6259                         return ret;
6260
6261                 return rule->location;
6262         }
6263
6264         spin_unlock_bh(&hdev->fd_rule_lock);
6265
6266         if (rule->queue_id == queue_id)
6267                 return rule->location;
6268
6269         tmp_queue_id = rule->queue_id;
6270         rule->queue_id = queue_id;
6271         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6272         if (ret) {
6273                 rule->queue_id = tmp_queue_id;
6274                 return ret;
6275         }
6276
6277         return rule->location;
6278 }
6279
6280 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6281 {
6282 #ifdef CONFIG_RFS_ACCEL
6283         struct hnae3_handle *handle = &hdev->vport[0].nic;
6284         struct hclge_fd_rule *rule;
6285         struct hlist_node *node;
6286         HLIST_HEAD(del_list);
6287
6288         spin_lock_bh(&hdev->fd_rule_lock);
6289         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6290                 spin_unlock_bh(&hdev->fd_rule_lock);
6291                 return;
6292         }
6293         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6294                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6295                                         rule->flow_id, rule->location)) {
6296                         hlist_del_init(&rule->rule_node);
6297                         hlist_add_head(&rule->rule_node, &del_list);
6298                         hdev->hclge_fd_rule_num--;
6299                         clear_bit(rule->location, hdev->fd_bmap);
6300                 }
6301         }
6302         spin_unlock_bh(&hdev->fd_rule_lock);
6303
6304         hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6305                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6306                                      rule->location, NULL, false);
6307                 kfree(rule);
6308         }
6309 #endif
6310 }
6311
6312 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6313 {
6314 #ifdef CONFIG_RFS_ACCEL
6315         struct hclge_vport *vport = hclge_get_vport(handle);
6316         struct hclge_dev *hdev = vport->back;
6317
6318         if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6319                 hclge_del_all_fd_entries(handle, true);
6320 #endif
6321 }
6322
6323 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6324 {
6325         struct hclge_vport *vport = hclge_get_vport(handle);
6326         struct hclge_dev *hdev = vport->back;
6327
6328         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6329                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6330 }
6331
6332 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6333 {
6334         struct hclge_vport *vport = hclge_get_vport(handle);
6335         struct hclge_dev *hdev = vport->back;
6336
6337         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6338 }
6339
6340 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6341 {
6342         struct hclge_vport *vport = hclge_get_vport(handle);
6343         struct hclge_dev *hdev = vport->back;
6344
6345         return hdev->rst_stats.hw_reset_done_cnt;
6346 }
6347
6348 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6349 {
6350         struct hclge_vport *vport = hclge_get_vport(handle);
6351         struct hclge_dev *hdev = vport->back;
6352         bool clear;
6353
6354         hdev->fd_en = enable;
6355         clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6356         if (!enable)
6357                 hclge_del_all_fd_entries(handle, clear);
6358         else
6359                 hclge_restore_fd_entries(handle);
6360 }
6361
6362 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6363 {
6364         struct hclge_desc desc;
6365         struct hclge_config_mac_mode_cmd *req =
6366                 (struct hclge_config_mac_mode_cmd *)desc.data;
6367         u32 loop_en = 0;
6368         int ret;
6369
6370         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6371
6372         if (enable) {
6373                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6374                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6375                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6376                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6377                 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6378                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6379                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6380                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6381                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6382                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6383         }
6384
6385         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6386
6387         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6388         if (ret)
6389                 dev_err(&hdev->pdev->dev,
6390                         "mac enable fail, ret =%d.\n", ret);
6391 }
6392
6393 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6394                                      u8 switch_param, u8 param_mask)
6395 {
6396         struct hclge_mac_vlan_switch_cmd *req;
6397         struct hclge_desc desc;
6398         u32 func_id;
6399         int ret;
6400
6401         func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6402         req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6403
6404         /* read current config parameter */
6405         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6406                                    true);
6407         req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6408         req->func_id = cpu_to_le32(func_id);
6409
6410         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6411         if (ret) {
6412                 dev_err(&hdev->pdev->dev,
6413                         "read mac vlan switch parameter fail, ret = %d\n", ret);
6414                 return ret;
6415         }
6416
6417         /* modify and write new config parameter */
6418         hclge_cmd_reuse_desc(&desc, false);
6419         req->switch_param = (req->switch_param & param_mask) | switch_param;
6420         req->param_mask = param_mask;
6421
6422         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6423         if (ret)
6424                 dev_err(&hdev->pdev->dev,
6425                         "set mac vlan switch parameter fail, ret = %d\n", ret);
6426         return ret;
6427 }
6428
6429 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6430                                        int link_ret)
6431 {
6432 #define HCLGE_PHY_LINK_STATUS_NUM  200
6433
6434         struct phy_device *phydev = hdev->hw.mac.phydev;
6435         int i = 0;
6436         int ret;
6437
6438         do {
6439                 ret = phy_read_status(phydev);
6440                 if (ret) {
6441                         dev_err(&hdev->pdev->dev,
6442                                 "phy update link status fail, ret = %d\n", ret);
6443                         return;
6444                 }
6445
6446                 if (phydev->link == link_ret)
6447                         break;
6448
6449                 msleep(HCLGE_LINK_STATUS_MS);
6450         } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6451 }
6452
6453 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6454 {
6455 #define HCLGE_MAC_LINK_STATUS_NUM  100
6456
6457         int i = 0;
6458         int ret;
6459
6460         do {
6461                 ret = hclge_get_mac_link_status(hdev);
6462                 if (ret < 0)
6463                         return ret;
6464                 else if (ret == link_ret)
6465                         return 0;
6466
6467                 msleep(HCLGE_LINK_STATUS_MS);
6468         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6469         return -EBUSY;
6470 }
6471
6472 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6473                                           bool is_phy)
6474 {
6475 #define HCLGE_LINK_STATUS_DOWN 0
6476 #define HCLGE_LINK_STATUS_UP   1
6477
6478         int link_ret;
6479
6480         link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6481
6482         if (is_phy)
6483                 hclge_phy_link_status_wait(hdev, link_ret);
6484
6485         return hclge_mac_link_status_wait(hdev, link_ret);
6486 }
6487
6488 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6489 {
6490         struct hclge_config_mac_mode_cmd *req;
6491         struct hclge_desc desc;
6492         u32 loop_en;
6493         int ret;
6494
6495         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6496         /* 1 Read out the MAC mode config at first */
6497         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6498         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6499         if (ret) {
6500                 dev_err(&hdev->pdev->dev,
6501                         "mac loopback get fail, ret =%d.\n", ret);
6502                 return ret;
6503         }
6504
6505         /* 2 Then setup the loopback flag */
6506         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6507         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6508         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6509         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
6510
6511         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6512
6513         /* 3 Config mac work mode with loopback flag
6514          * and its original configure parameters
6515          */
6516         hclge_cmd_reuse_desc(&desc, false);
6517         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6518         if (ret)
6519                 dev_err(&hdev->pdev->dev,
6520                         "mac loopback set fail, ret =%d.\n", ret);
6521         return ret;
6522 }
6523
6524 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6525                                      enum hnae3_loop loop_mode)
6526 {
6527 #define HCLGE_SERDES_RETRY_MS   10
6528 #define HCLGE_SERDES_RETRY_NUM  100
6529
6530         struct hclge_serdes_lb_cmd *req;
6531         struct hclge_desc desc;
6532         int ret, i = 0;
6533         u8 loop_mode_b;
6534
6535         req = (struct hclge_serdes_lb_cmd *)desc.data;
6536         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6537
6538         switch (loop_mode) {
6539         case HNAE3_LOOP_SERIAL_SERDES:
6540                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6541                 break;
6542         case HNAE3_LOOP_PARALLEL_SERDES:
6543                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6544                 break;
6545         default:
6546                 dev_err(&hdev->pdev->dev,
6547                         "unsupported serdes loopback mode %d\n", loop_mode);
6548                 return -ENOTSUPP;
6549         }
6550
6551         if (en) {
6552                 req->enable = loop_mode_b;
6553                 req->mask = loop_mode_b;
6554         } else {
6555                 req->mask = loop_mode_b;
6556         }
6557
6558         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6559         if (ret) {
6560                 dev_err(&hdev->pdev->dev,
6561                         "serdes loopback set fail, ret = %d\n", ret);
6562                 return ret;
6563         }
6564
6565         do {
6566                 msleep(HCLGE_SERDES_RETRY_MS);
6567                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6568                                            true);
6569                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6570                 if (ret) {
6571                         dev_err(&hdev->pdev->dev,
6572                                 "serdes loopback get, ret = %d\n", ret);
6573                         return ret;
6574                 }
6575         } while (++i < HCLGE_SERDES_RETRY_NUM &&
6576                  !(req->result & HCLGE_CMD_SERDES_DONE_B));
6577
6578         if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6579                 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6580                 return -EBUSY;
6581         } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6582                 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6583                 return -EIO;
6584         }
6585         return ret;
6586 }
6587
6588 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6589                                      enum hnae3_loop loop_mode)
6590 {
6591         int ret;
6592
6593         ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6594         if (ret)
6595                 return ret;
6596
6597         hclge_cfg_mac_mode(hdev, en);
6598
6599         ret = hclge_mac_phy_link_status_wait(hdev, en, FALSE);
6600         if (ret)
6601                 dev_err(&hdev->pdev->dev,
6602                         "serdes loopback config mac mode timeout\n");
6603
6604         return ret;
6605 }
6606
6607 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6608                                      struct phy_device *phydev)
6609 {
6610         int ret;
6611
6612         if (!phydev->suspended) {
6613                 ret = phy_suspend(phydev);
6614                 if (ret)
6615                         return ret;
6616         }
6617
6618         ret = phy_resume(phydev);
6619         if (ret)
6620                 return ret;
6621
6622         return phy_loopback(phydev, true);
6623 }
6624
6625 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6626                                       struct phy_device *phydev)
6627 {
6628         int ret;
6629
6630         ret = phy_loopback(phydev, false);
6631         if (ret)
6632                 return ret;
6633
6634         return phy_suspend(phydev);
6635 }
6636
6637 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6638 {
6639         struct phy_device *phydev = hdev->hw.mac.phydev;
6640         int ret;
6641
6642         if (!phydev)
6643                 return -ENOTSUPP;
6644
6645         if (en)
6646                 ret = hclge_enable_phy_loopback(hdev, phydev);
6647         else
6648                 ret = hclge_disable_phy_loopback(hdev, phydev);
6649         if (ret) {
6650                 dev_err(&hdev->pdev->dev,
6651                         "set phy loopback fail, ret = %d\n", ret);
6652                 return ret;
6653         }
6654
6655         hclge_cfg_mac_mode(hdev, en);
6656
6657         ret = hclge_mac_phy_link_status_wait(hdev, en, TRUE);
6658         if (ret)
6659                 dev_err(&hdev->pdev->dev,
6660                         "phy loopback config mac mode timeout\n");
6661
6662         return ret;
6663 }
6664
6665 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6666                             int stream_id, bool enable)
6667 {
6668         struct hclge_desc desc;
6669         struct hclge_cfg_com_tqp_queue_cmd *req =
6670                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6671         int ret;
6672
6673         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6674         req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6675         req->stream_id = cpu_to_le16(stream_id);
6676         if (enable)
6677                 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6678
6679         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6680         if (ret)
6681                 dev_err(&hdev->pdev->dev,
6682                         "Tqp enable fail, status =%d.\n", ret);
6683         return ret;
6684 }
6685
6686 static int hclge_set_loopback(struct hnae3_handle *handle,
6687                               enum hnae3_loop loop_mode, bool en)
6688 {
6689         struct hclge_vport *vport = hclge_get_vport(handle);
6690         struct hnae3_knic_private_info *kinfo;
6691         struct hclge_dev *hdev = vport->back;
6692         int i, ret;
6693
6694         /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6695          * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6696          * the same, the packets are looped back in the SSU. If SSU loopback
6697          * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6698          */
6699         if (hdev->pdev->revision >= 0x21) {
6700                 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6701
6702                 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6703                                                 HCLGE_SWITCH_ALW_LPBK_MASK);
6704                 if (ret)
6705                         return ret;
6706         }
6707
6708         switch (loop_mode) {
6709         case HNAE3_LOOP_APP:
6710                 ret = hclge_set_app_loopback(hdev, en);
6711                 break;
6712         case HNAE3_LOOP_SERIAL_SERDES:
6713         case HNAE3_LOOP_PARALLEL_SERDES:
6714                 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6715                 break;
6716         case HNAE3_LOOP_PHY:
6717                 ret = hclge_set_phy_loopback(hdev, en);
6718                 break;
6719         default:
6720                 ret = -ENOTSUPP;
6721                 dev_err(&hdev->pdev->dev,
6722                         "loop_mode %d is not supported\n", loop_mode);
6723                 break;
6724         }
6725
6726         if (ret)
6727                 return ret;
6728
6729         kinfo = &vport->nic.kinfo;
6730         for (i = 0; i < kinfo->num_tqps; i++) {
6731                 ret = hclge_tqp_enable(hdev, i, 0, en);
6732                 if (ret)
6733                         return ret;
6734         }
6735
6736         return 0;
6737 }
6738
6739 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6740 {
6741         int ret;
6742
6743         ret = hclge_set_app_loopback(hdev, false);
6744         if (ret)
6745                 return ret;
6746
6747         ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6748         if (ret)
6749                 return ret;
6750
6751         return hclge_cfg_serdes_loopback(hdev, false,
6752                                          HNAE3_LOOP_PARALLEL_SERDES);
6753 }
6754
6755 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6756 {
6757         struct hclge_vport *vport = hclge_get_vport(handle);
6758         struct hnae3_knic_private_info *kinfo;
6759         struct hnae3_queue *queue;
6760         struct hclge_tqp *tqp;
6761         int i;
6762
6763         kinfo = &vport->nic.kinfo;
6764         for (i = 0; i < kinfo->num_tqps; i++) {
6765                 queue = handle->kinfo.tqp[i];
6766                 tqp = container_of(queue, struct hclge_tqp, q);
6767                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6768         }
6769 }
6770
6771 static void hclge_flush_link_update(struct hclge_dev *hdev)
6772 {
6773 #define HCLGE_FLUSH_LINK_TIMEOUT        100000
6774
6775         unsigned long last = hdev->serv_processed_cnt;
6776         int i = 0;
6777
6778         while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6779                i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6780                last == hdev->serv_processed_cnt)
6781                 usleep_range(1, 1);
6782 }
6783
6784 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6785 {
6786         struct hclge_vport *vport = hclge_get_vport(handle);
6787         struct hclge_dev *hdev = vport->back;
6788
6789         if (enable) {
6790                 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
6791         } else {
6792                 /* Set the DOWN flag here to disable link updating */
6793                 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6794
6795                 /* flush memory to make sure DOWN is seen by service task */
6796                 smp_mb__before_atomic();
6797                 hclge_flush_link_update(hdev);
6798         }
6799 }
6800
6801 static int hclge_ae_start(struct hnae3_handle *handle)
6802 {
6803         struct hclge_vport *vport = hclge_get_vport(handle);
6804         struct hclge_dev *hdev = vport->back;
6805
6806         /* mac enable */
6807         hclge_cfg_mac_mode(hdev, true);
6808         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6809         hdev->hw.mac.link = 0;
6810
6811         /* reset tqp stats */
6812         hclge_reset_tqp_stats(handle);
6813
6814         hclge_mac_start_phy(hdev);
6815
6816         return 0;
6817 }
6818
6819 static void hclge_ae_stop(struct hnae3_handle *handle)
6820 {
6821         struct hclge_vport *vport = hclge_get_vport(handle);
6822         struct hclge_dev *hdev = vport->back;
6823         int i;
6824
6825         set_bit(HCLGE_STATE_DOWN, &hdev->state);
6826
6827         hclge_clear_arfs_rules(handle);
6828
6829         /* If it is not PF reset, the firmware will disable the MAC,
6830          * so it only need to stop phy here.
6831          */
6832         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6833             hdev->reset_type != HNAE3_FUNC_RESET) {
6834                 hclge_mac_stop_phy(hdev);
6835                 hclge_update_link_status(hdev);
6836                 return;
6837         }
6838
6839         for (i = 0; i < handle->kinfo.num_tqps; i++)
6840                 hclge_reset_tqp(handle, i);
6841
6842         hclge_config_mac_tnl_int(hdev, false);
6843
6844         /* Mac disable */
6845         hclge_cfg_mac_mode(hdev, false);
6846
6847         hclge_mac_stop_phy(hdev);
6848
6849         /* reset tqp stats */
6850         hclge_reset_tqp_stats(handle);
6851         hclge_update_link_status(hdev);
6852 }
6853
6854 int hclge_vport_start(struct hclge_vport *vport)
6855 {
6856         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6857         vport->last_active_jiffies = jiffies;
6858         return 0;
6859 }
6860
6861 void hclge_vport_stop(struct hclge_vport *vport)
6862 {
6863         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6864 }
6865
6866 static int hclge_client_start(struct hnae3_handle *handle)
6867 {
6868         struct hclge_vport *vport = hclge_get_vport(handle);
6869
6870         return hclge_vport_start(vport);
6871 }
6872
6873 static void hclge_client_stop(struct hnae3_handle *handle)
6874 {
6875         struct hclge_vport *vport = hclge_get_vport(handle);
6876
6877         hclge_vport_stop(vport);
6878 }
6879
6880 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6881                                          u16 cmdq_resp, u8  resp_code,
6882                                          enum hclge_mac_vlan_tbl_opcode op)
6883 {
6884         struct hclge_dev *hdev = vport->back;
6885
6886         if (cmdq_resp) {
6887                 dev_err(&hdev->pdev->dev,
6888                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
6889                         cmdq_resp);
6890                 return -EIO;
6891         }
6892
6893         if (op == HCLGE_MAC_VLAN_ADD) {
6894                 if ((!resp_code) || (resp_code == 1)) {
6895                         return 0;
6896                 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6897                         dev_err(&hdev->pdev->dev,
6898                                 "add mac addr failed for uc_overflow.\n");
6899                         return -ENOSPC;
6900                 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6901                         dev_err(&hdev->pdev->dev,
6902                                 "add mac addr failed for mc_overflow.\n");
6903                         return -ENOSPC;
6904                 }
6905
6906                 dev_err(&hdev->pdev->dev,
6907                         "add mac addr failed for undefined, code=%u.\n",
6908                         resp_code);
6909                 return -EIO;
6910         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6911                 if (!resp_code) {
6912                         return 0;
6913                 } else if (resp_code == 1) {
6914                         dev_dbg(&hdev->pdev->dev,
6915                                 "remove mac addr failed for miss.\n");
6916                         return -ENOENT;
6917                 }
6918
6919                 dev_err(&hdev->pdev->dev,
6920                         "remove mac addr failed for undefined, code=%u.\n",
6921                         resp_code);
6922                 return -EIO;
6923         } else if (op == HCLGE_MAC_VLAN_LKUP) {
6924                 if (!resp_code) {
6925                         return 0;
6926                 } else if (resp_code == 1) {
6927                         dev_dbg(&hdev->pdev->dev,
6928                                 "lookup mac addr failed for miss.\n");
6929                         return -ENOENT;
6930                 }
6931
6932                 dev_err(&hdev->pdev->dev,
6933                         "lookup mac addr failed for undefined, code=%u.\n",
6934                         resp_code);
6935                 return -EIO;
6936         }
6937
6938         dev_err(&hdev->pdev->dev,
6939                 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
6940
6941         return -EINVAL;
6942 }
6943
6944 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6945 {
6946 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6947
6948         unsigned int word_num;
6949         unsigned int bit_num;
6950
6951         if (vfid > 255 || vfid < 0)
6952                 return -EIO;
6953
6954         if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6955                 word_num = vfid / 32;
6956                 bit_num  = vfid % 32;
6957                 if (clr)
6958                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6959                 else
6960                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6961         } else {
6962                 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6963                 bit_num  = vfid % 32;
6964                 if (clr)
6965                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6966                 else
6967                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6968         }
6969
6970         return 0;
6971 }
6972
6973 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6974 {
6975 #define HCLGE_DESC_NUMBER 3
6976 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6977         int i, j;
6978
6979         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6980                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6981                         if (desc[i].data[j])
6982                                 return false;
6983
6984         return true;
6985 }
6986
6987 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6988                                    const u8 *addr, bool is_mc)
6989 {
6990         const unsigned char *mac_addr = addr;
6991         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6992                        (mac_addr[0]) | (mac_addr[1] << 8);
6993         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
6994
6995         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6996         if (is_mc) {
6997                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6998                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6999         }
7000
7001         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7002         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7003 }
7004
7005 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7006                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
7007 {
7008         struct hclge_dev *hdev = vport->back;
7009         struct hclge_desc desc;
7010         u8 resp_code;
7011         u16 retval;
7012         int ret;
7013
7014         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7015
7016         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7017
7018         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7019         if (ret) {
7020                 dev_err(&hdev->pdev->dev,
7021                         "del mac addr failed for cmd_send, ret =%d.\n",
7022                         ret);
7023                 return ret;
7024         }
7025         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7026         retval = le16_to_cpu(desc.retval);
7027
7028         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7029                                              HCLGE_MAC_VLAN_REMOVE);
7030 }
7031
7032 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7033                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
7034                                      struct hclge_desc *desc,
7035                                      bool is_mc)
7036 {
7037         struct hclge_dev *hdev = vport->back;
7038         u8 resp_code;
7039         u16 retval;
7040         int ret;
7041
7042         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7043         if (is_mc) {
7044                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7045                 memcpy(desc[0].data,
7046                        req,
7047                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7048                 hclge_cmd_setup_basic_desc(&desc[1],
7049                                            HCLGE_OPC_MAC_VLAN_ADD,
7050                                            true);
7051                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7052                 hclge_cmd_setup_basic_desc(&desc[2],
7053                                            HCLGE_OPC_MAC_VLAN_ADD,
7054                                            true);
7055                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7056         } else {
7057                 memcpy(desc[0].data,
7058                        req,
7059                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7060                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7061         }
7062         if (ret) {
7063                 dev_err(&hdev->pdev->dev,
7064                         "lookup mac addr failed for cmd_send, ret =%d.\n",
7065                         ret);
7066                 return ret;
7067         }
7068         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7069         retval = le16_to_cpu(desc[0].retval);
7070
7071         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7072                                              HCLGE_MAC_VLAN_LKUP);
7073 }
7074
7075 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7076                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
7077                                   struct hclge_desc *mc_desc)
7078 {
7079         struct hclge_dev *hdev = vport->back;
7080         int cfg_status;
7081         u8 resp_code;
7082         u16 retval;
7083         int ret;
7084
7085         if (!mc_desc) {
7086                 struct hclge_desc desc;
7087
7088                 hclge_cmd_setup_basic_desc(&desc,
7089                                            HCLGE_OPC_MAC_VLAN_ADD,
7090                                            false);
7091                 memcpy(desc.data, req,
7092                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7093                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7094                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7095                 retval = le16_to_cpu(desc.retval);
7096
7097                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7098                                                            resp_code,
7099                                                            HCLGE_MAC_VLAN_ADD);
7100         } else {
7101                 hclge_cmd_reuse_desc(&mc_desc[0], false);
7102                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7103                 hclge_cmd_reuse_desc(&mc_desc[1], false);
7104                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7105                 hclge_cmd_reuse_desc(&mc_desc[2], false);
7106                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7107                 memcpy(mc_desc[0].data, req,
7108                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7109                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7110                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7111                 retval = le16_to_cpu(mc_desc[0].retval);
7112
7113                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7114                                                            resp_code,
7115                                                            HCLGE_MAC_VLAN_ADD);
7116         }
7117
7118         if (ret) {
7119                 dev_err(&hdev->pdev->dev,
7120                         "add mac addr failed for cmd_send, ret =%d.\n",
7121                         ret);
7122                 return ret;
7123         }
7124
7125         return cfg_status;
7126 }
7127
7128 static int hclge_init_umv_space(struct hclge_dev *hdev)
7129 {
7130         u16 allocated_size = 0;
7131         int ret;
7132
7133         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
7134                                   true);
7135         if (ret)
7136                 return ret;
7137
7138         if (allocated_size < hdev->wanted_umv_size)
7139                 dev_warn(&hdev->pdev->dev,
7140                          "Alloc umv space failed, want %u, get %u\n",
7141                          hdev->wanted_umv_size, allocated_size);
7142
7143         mutex_init(&hdev->umv_mutex);
7144         hdev->max_umv_size = allocated_size;
7145         /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
7146          * preserve some unicast mac vlan table entries shared by pf
7147          * and its vfs.
7148          */
7149         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
7150         hdev->share_umv_size = hdev->priv_umv_size +
7151                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
7152
7153         return 0;
7154 }
7155
7156 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
7157 {
7158         int ret;
7159
7160         if (hdev->max_umv_size > 0) {
7161                 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
7162                                           false);
7163                 if (ret)
7164                         return ret;
7165                 hdev->max_umv_size = 0;
7166         }
7167         mutex_destroy(&hdev->umv_mutex);
7168
7169         return 0;
7170 }
7171
7172 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7173                                u16 *allocated_size, bool is_alloc)
7174 {
7175         struct hclge_umv_spc_alc_cmd *req;
7176         struct hclge_desc desc;
7177         int ret;
7178
7179         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7180         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7181         if (!is_alloc)
7182                 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
7183
7184         req->space_size = cpu_to_le32(space_size);
7185
7186         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7187         if (ret) {
7188                 dev_err(&hdev->pdev->dev,
7189                         "%s umv space failed for cmd_send, ret =%d\n",
7190                         is_alloc ? "allocate" : "free", ret);
7191                 return ret;
7192         }
7193
7194         if (is_alloc && allocated_size)
7195                 *allocated_size = le32_to_cpu(desc.data[1]);
7196
7197         return 0;
7198 }
7199
7200 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7201 {
7202         struct hclge_vport *vport;
7203         int i;
7204
7205         for (i = 0; i < hdev->num_alloc_vport; i++) {
7206                 vport = &hdev->vport[i];
7207                 vport->used_umv_num = 0;
7208         }
7209
7210         mutex_lock(&hdev->umv_mutex);
7211         hdev->share_umv_size = hdev->priv_umv_size +
7212                         hdev->max_umv_size % (hdev->num_req_vfs + 2);
7213         mutex_unlock(&hdev->umv_mutex);
7214 }
7215
7216 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
7217 {
7218         struct hclge_dev *hdev = vport->back;
7219         bool is_full;
7220
7221         mutex_lock(&hdev->umv_mutex);
7222         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7223                    hdev->share_umv_size == 0);
7224         mutex_unlock(&hdev->umv_mutex);
7225
7226         return is_full;
7227 }
7228
7229 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7230 {
7231         struct hclge_dev *hdev = vport->back;
7232
7233         mutex_lock(&hdev->umv_mutex);
7234         if (is_free) {
7235                 if (vport->used_umv_num > hdev->priv_umv_size)
7236                         hdev->share_umv_size++;
7237
7238                 if (vport->used_umv_num > 0)
7239                         vport->used_umv_num--;
7240         } else {
7241                 if (vport->used_umv_num >= hdev->priv_umv_size &&
7242                     hdev->share_umv_size > 0)
7243                         hdev->share_umv_size--;
7244                 vport->used_umv_num++;
7245         }
7246         mutex_unlock(&hdev->umv_mutex);
7247 }
7248
7249 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7250                              const unsigned char *addr)
7251 {
7252         struct hclge_vport *vport = hclge_get_vport(handle);
7253
7254         return hclge_add_uc_addr_common(vport, addr);
7255 }
7256
7257 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7258                              const unsigned char *addr)
7259 {
7260         struct hclge_dev *hdev = vport->back;
7261         struct hclge_mac_vlan_tbl_entry_cmd req;
7262         struct hclge_desc desc;
7263         u16 egress_port = 0;
7264         int ret;
7265
7266         /* mac addr check */
7267         if (is_zero_ether_addr(addr) ||
7268             is_broadcast_ether_addr(addr) ||
7269             is_multicast_ether_addr(addr)) {
7270                 dev_err(&hdev->pdev->dev,
7271                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7272                          addr, is_zero_ether_addr(addr),
7273                          is_broadcast_ether_addr(addr),
7274                          is_multicast_ether_addr(addr));
7275                 return -EINVAL;
7276         }
7277
7278         memset(&req, 0, sizeof(req));
7279
7280         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7281                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7282
7283         req.egress_port = cpu_to_le16(egress_port);
7284
7285         hclge_prepare_mac_addr(&req, addr, false);
7286
7287         /* Lookup the mac address in the mac_vlan table, and add
7288          * it if the entry is inexistent. Repeated unicast entry
7289          * is not allowed in the mac vlan table.
7290          */
7291         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7292         if (ret == -ENOENT) {
7293                 if (!hclge_is_umv_space_full(vport)) {
7294                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7295                         if (!ret)
7296                                 hclge_update_umv_space(vport, false);
7297                         return ret;
7298                 }
7299
7300                 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7301                         hdev->priv_umv_size);
7302
7303                 return -ENOSPC;
7304         }
7305
7306         /* check if we just hit the duplicate */
7307         if (!ret) {
7308                 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7309                          vport->vport_id, addr);
7310                 return 0;
7311         }
7312
7313         dev_err(&hdev->pdev->dev,
7314                 "PF failed to add unicast entry(%pM) in the MAC table\n",
7315                 addr);
7316
7317         return ret;
7318 }
7319
7320 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7321                             const unsigned char *addr)
7322 {
7323         struct hclge_vport *vport = hclge_get_vport(handle);
7324
7325         return hclge_rm_uc_addr_common(vport, addr);
7326 }
7327
7328 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7329                             const unsigned char *addr)
7330 {
7331         struct hclge_dev *hdev = vport->back;
7332         struct hclge_mac_vlan_tbl_entry_cmd req;
7333         int ret;
7334
7335         /* mac addr check */
7336         if (is_zero_ether_addr(addr) ||
7337             is_broadcast_ether_addr(addr) ||
7338             is_multicast_ether_addr(addr)) {
7339                 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7340                         addr);
7341                 return -EINVAL;
7342         }
7343
7344         memset(&req, 0, sizeof(req));
7345         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7346         hclge_prepare_mac_addr(&req, addr, false);
7347         ret = hclge_remove_mac_vlan_tbl(vport, &req);
7348         if (!ret)
7349                 hclge_update_umv_space(vport, true);
7350
7351         return ret;
7352 }
7353
7354 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7355                              const unsigned char *addr)
7356 {
7357         struct hclge_vport *vport = hclge_get_vport(handle);
7358
7359         return hclge_add_mc_addr_common(vport, addr);
7360 }
7361
7362 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7363                              const unsigned char *addr)
7364 {
7365         struct hclge_dev *hdev = vport->back;
7366         struct hclge_mac_vlan_tbl_entry_cmd req;
7367         struct hclge_desc desc[3];
7368         int status;
7369
7370         /* mac addr check */
7371         if (!is_multicast_ether_addr(addr)) {
7372                 dev_err(&hdev->pdev->dev,
7373                         "Add mc mac err! invalid mac:%pM.\n",
7374                          addr);
7375                 return -EINVAL;
7376         }
7377         memset(&req, 0, sizeof(req));
7378         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7379         hclge_prepare_mac_addr(&req, addr, true);
7380         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7381         if (status) {
7382                 /* This mac addr do not exist, add new entry for it */
7383                 memset(desc[0].data, 0, sizeof(desc[0].data));
7384                 memset(desc[1].data, 0, sizeof(desc[0].data));
7385                 memset(desc[2].data, 0, sizeof(desc[0].data));
7386         }
7387         status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7388         if (status)
7389                 return status;
7390         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7391
7392         if (status == -ENOSPC)
7393                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7394
7395         return status;
7396 }
7397
7398 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7399                             const unsigned char *addr)
7400 {
7401         struct hclge_vport *vport = hclge_get_vport(handle);
7402
7403         return hclge_rm_mc_addr_common(vport, addr);
7404 }
7405
7406 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7407                             const unsigned char *addr)
7408 {
7409         struct hclge_dev *hdev = vport->back;
7410         struct hclge_mac_vlan_tbl_entry_cmd req;
7411         enum hclge_cmd_status status;
7412         struct hclge_desc desc[3];
7413
7414         /* mac addr check */
7415         if (!is_multicast_ether_addr(addr)) {
7416                 dev_dbg(&hdev->pdev->dev,
7417                         "Remove mc mac err! invalid mac:%pM.\n",
7418                          addr);
7419                 return -EINVAL;
7420         }
7421
7422         memset(&req, 0, sizeof(req));
7423         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7424         hclge_prepare_mac_addr(&req, addr, true);
7425         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7426         if (!status) {
7427                 /* This mac addr exist, remove this handle's VFID for it */
7428                 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7429                 if (status)
7430                         return status;
7431
7432                 if (hclge_is_all_function_id_zero(desc))
7433                         /* All the vfid is zero, so need to delete this entry */
7434                         status = hclge_remove_mac_vlan_tbl(vport, &req);
7435                 else
7436                         /* Not all the vfid is zero, update the vfid */
7437                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7438
7439         } else {
7440                 /* Maybe this mac address is in mta table, but it cannot be
7441                  * deleted here because an entry of mta represents an address
7442                  * range rather than a specific address. the delete action to
7443                  * all entries will take effect in update_mta_status called by
7444                  * hns3_nic_set_rx_mode.
7445                  */
7446                 status = 0;
7447         }
7448
7449         return status;
7450 }
7451
7452 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7453                                enum HCLGE_MAC_ADDR_TYPE mac_type)
7454 {
7455         struct hclge_vport_mac_addr_cfg *mac_cfg;
7456         struct list_head *list;
7457
7458         if (!vport->vport_id)
7459                 return;
7460
7461         mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
7462         if (!mac_cfg)
7463                 return;
7464
7465         mac_cfg->hd_tbl_status = true;
7466         memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
7467
7468         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7469                &vport->uc_mac_list : &vport->mc_mac_list;
7470
7471         list_add_tail(&mac_cfg->node, list);
7472 }
7473
7474 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7475                               bool is_write_tbl,
7476                               enum HCLGE_MAC_ADDR_TYPE mac_type)
7477 {
7478         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7479         struct list_head *list;
7480         bool uc_flag, mc_flag;
7481
7482         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7483                &vport->uc_mac_list : &vport->mc_mac_list;
7484
7485         uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
7486         mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
7487
7488         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7489                 if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) {
7490                         if (uc_flag && mac_cfg->hd_tbl_status)
7491                                 hclge_rm_uc_addr_common(vport, mac_addr);
7492
7493                         if (mc_flag && mac_cfg->hd_tbl_status)
7494                                 hclge_rm_mc_addr_common(vport, mac_addr);
7495
7496                         list_del(&mac_cfg->node);
7497                         kfree(mac_cfg);
7498                         break;
7499                 }
7500         }
7501 }
7502
7503 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7504                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
7505 {
7506         struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7507         struct list_head *list;
7508
7509         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7510                &vport->uc_mac_list : &vport->mc_mac_list;
7511
7512         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7513                 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
7514                         hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
7515
7516                 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
7517                         hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
7518
7519                 mac_cfg->hd_tbl_status = false;
7520                 if (is_del_list) {
7521                         list_del(&mac_cfg->node);
7522                         kfree(mac_cfg);
7523                 }
7524         }
7525 }
7526
7527 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
7528 {
7529         struct hclge_vport_mac_addr_cfg *mac, *tmp;
7530         struct hclge_vport *vport;
7531         int i;
7532
7533         mutex_lock(&hdev->vport_cfg_mutex);
7534         for (i = 0; i < hdev->num_alloc_vport; i++) {
7535                 vport = &hdev->vport[i];
7536                 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
7537                         list_del(&mac->node);
7538                         kfree(mac);
7539                 }
7540
7541                 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
7542                         list_del(&mac->node);
7543                         kfree(mac);
7544                 }
7545         }
7546         mutex_unlock(&hdev->vport_cfg_mutex);
7547 }
7548
7549 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7550                                               u16 cmdq_resp, u8 resp_code)
7551 {
7552 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
7553 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
7554 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
7555 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
7556
7557         int return_status;
7558
7559         if (cmdq_resp) {
7560                 dev_err(&hdev->pdev->dev,
7561                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
7562                         cmdq_resp);
7563                 return -EIO;
7564         }
7565
7566         switch (resp_code) {
7567         case HCLGE_ETHERTYPE_SUCCESS_ADD:
7568         case HCLGE_ETHERTYPE_ALREADY_ADD:
7569                 return_status = 0;
7570                 break;
7571         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7572                 dev_err(&hdev->pdev->dev,
7573                         "add mac ethertype failed for manager table overflow.\n");
7574                 return_status = -EIO;
7575                 break;
7576         case HCLGE_ETHERTYPE_KEY_CONFLICT:
7577                 dev_err(&hdev->pdev->dev,
7578                         "add mac ethertype failed for key conflict.\n");
7579                 return_status = -EIO;
7580                 break;
7581         default:
7582                 dev_err(&hdev->pdev->dev,
7583                         "add mac ethertype failed for undefined, code=%u.\n",
7584                         resp_code);
7585                 return_status = -EIO;
7586         }
7587
7588         return return_status;
7589 }
7590
7591 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
7592                                      u8 *mac_addr)
7593 {
7594         struct hclge_mac_vlan_tbl_entry_cmd req;
7595         struct hclge_dev *hdev = vport->back;
7596         struct hclge_desc desc;
7597         u16 egress_port = 0;
7598         int i;
7599
7600         if (is_zero_ether_addr(mac_addr))
7601                 return false;
7602
7603         memset(&req, 0, sizeof(req));
7604         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7605                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7606         req.egress_port = cpu_to_le16(egress_port);
7607         hclge_prepare_mac_addr(&req, mac_addr, false);
7608
7609         if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
7610                 return true;
7611
7612         vf_idx += HCLGE_VF_VPORT_START_NUM;
7613         for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
7614                 if (i != vf_idx &&
7615                     ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
7616                         return true;
7617
7618         return false;
7619 }
7620
7621 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
7622                             u8 *mac_addr)
7623 {
7624         struct hclge_vport *vport = hclge_get_vport(handle);
7625         struct hclge_dev *hdev = vport->back;
7626
7627         vport = hclge_get_vf_vport(hdev, vf);
7628         if (!vport)
7629                 return -EINVAL;
7630
7631         if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
7632                 dev_info(&hdev->pdev->dev,
7633                          "Specified MAC(=%pM) is same as before, no change committed!\n",
7634                          mac_addr);
7635                 return 0;
7636         }
7637
7638         if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
7639                 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
7640                         mac_addr);
7641                 return -EEXIST;
7642         }
7643
7644         ether_addr_copy(vport->vf_info.mac, mac_addr);
7645         dev_info(&hdev->pdev->dev,
7646                  "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
7647                  vf, mac_addr);
7648
7649         return hclge_inform_reset_assert_to_vf(vport);
7650 }
7651
7652 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
7653                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
7654 {
7655         struct hclge_desc desc;
7656         u8 resp_code;
7657         u16 retval;
7658         int ret;
7659
7660         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
7661         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
7662
7663         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7664         if (ret) {
7665                 dev_err(&hdev->pdev->dev,
7666                         "add mac ethertype failed for cmd_send, ret =%d.\n",
7667                         ret);
7668                 return ret;
7669         }
7670
7671         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7672         retval = le16_to_cpu(desc.retval);
7673
7674         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
7675 }
7676
7677 static int init_mgr_tbl(struct hclge_dev *hdev)
7678 {
7679         int ret;
7680         int i;
7681
7682         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
7683                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
7684                 if (ret) {
7685                         dev_err(&hdev->pdev->dev,
7686                                 "add mac ethertype failed, ret =%d.\n",
7687                                 ret);
7688                         return ret;
7689                 }
7690         }
7691
7692         return 0;
7693 }
7694
7695 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7696 {
7697         struct hclge_vport *vport = hclge_get_vport(handle);
7698         struct hclge_dev *hdev = vport->back;
7699
7700         ether_addr_copy(p, hdev->hw.mac.mac_addr);
7701 }
7702
7703 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7704                               bool is_first)
7705 {
7706         const unsigned char *new_addr = (const unsigned char *)p;
7707         struct hclge_vport *vport = hclge_get_vport(handle);
7708         struct hclge_dev *hdev = vport->back;
7709         int ret;
7710
7711         /* mac addr check */
7712         if (is_zero_ether_addr(new_addr) ||
7713             is_broadcast_ether_addr(new_addr) ||
7714             is_multicast_ether_addr(new_addr)) {
7715                 dev_err(&hdev->pdev->dev,
7716                         "Change uc mac err! invalid mac:%pM.\n",
7717                          new_addr);
7718                 return -EINVAL;
7719         }
7720
7721         if ((!is_first || is_kdump_kernel()) &&
7722             hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7723                 dev_warn(&hdev->pdev->dev,
7724                          "remove old uc mac address fail.\n");
7725
7726         ret = hclge_add_uc_addr(handle, new_addr);
7727         if (ret) {
7728                 dev_err(&hdev->pdev->dev,
7729                         "add uc mac address fail, ret =%d.\n",
7730                         ret);
7731
7732                 if (!is_first &&
7733                     hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7734                         dev_err(&hdev->pdev->dev,
7735                                 "restore uc mac address fail.\n");
7736
7737                 return -EIO;
7738         }
7739
7740         ret = hclge_pause_addr_cfg(hdev, new_addr);
7741         if (ret) {
7742                 dev_err(&hdev->pdev->dev,
7743                         "configure mac pause address fail, ret =%d.\n",
7744                         ret);
7745                 return -EIO;
7746         }
7747
7748         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7749
7750         return 0;
7751 }
7752
7753 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7754                           int cmd)
7755 {
7756         struct hclge_vport *vport = hclge_get_vport(handle);
7757         struct hclge_dev *hdev = vport->back;
7758
7759         if (!hdev->hw.mac.phydev)
7760                 return -EOPNOTSUPP;
7761
7762         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7763 }
7764
7765 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7766                                       u8 fe_type, bool filter_en, u8 vf_id)
7767 {
7768         struct hclge_vlan_filter_ctrl_cmd *req;
7769         struct hclge_desc desc;
7770         int ret;
7771
7772         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7773
7774         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7775         req->vlan_type = vlan_type;
7776         req->vlan_fe = filter_en ? fe_type : 0;
7777         req->vf_id = vf_id;
7778
7779         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7780         if (ret)
7781                 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7782                         ret);
7783
7784         return ret;
7785 }
7786
7787 #define HCLGE_FILTER_TYPE_VF            0
7788 #define HCLGE_FILTER_TYPE_PORT          1
7789 #define HCLGE_FILTER_FE_EGRESS_V1_B     BIT(0)
7790 #define HCLGE_FILTER_FE_NIC_INGRESS_B   BIT(0)
7791 #define HCLGE_FILTER_FE_NIC_EGRESS_B    BIT(1)
7792 #define HCLGE_FILTER_FE_ROCE_INGRESS_B  BIT(2)
7793 #define HCLGE_FILTER_FE_ROCE_EGRESS_B   BIT(3)
7794 #define HCLGE_FILTER_FE_EGRESS          (HCLGE_FILTER_FE_NIC_EGRESS_B \
7795                                         | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7796 #define HCLGE_FILTER_FE_INGRESS         (HCLGE_FILTER_FE_NIC_INGRESS_B \
7797                                         | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7798
7799 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7800 {
7801         struct hclge_vport *vport = hclge_get_vport(handle);
7802         struct hclge_dev *hdev = vport->back;
7803
7804         if (hdev->pdev->revision >= 0x21) {
7805                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7806                                            HCLGE_FILTER_FE_EGRESS, enable, 0);
7807                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7808                                            HCLGE_FILTER_FE_INGRESS, enable, 0);
7809         } else {
7810                 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7811                                            HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7812                                            0);
7813         }
7814         if (enable)
7815                 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7816         else
7817                 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7818 }
7819
7820 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7821                                     bool is_kill, u16 vlan,
7822                                     __be16 proto)
7823 {
7824         struct hclge_vport *vport = &hdev->vport[vfid];
7825         struct hclge_vlan_filter_vf_cfg_cmd *req0;
7826         struct hclge_vlan_filter_vf_cfg_cmd *req1;
7827         struct hclge_desc desc[2];
7828         u8 vf_byte_val;
7829         u8 vf_byte_off;
7830         int ret;
7831
7832         /* if vf vlan table is full, firmware will close vf vlan filter, it
7833          * is unable and unnecessary to add new vlan id to vf vlan filter.
7834          * If spoof check is enable, and vf vlan is full, it shouldn't add
7835          * new vlan, because tx packets with these vlan id will be dropped.
7836          */
7837         if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
7838                 if (vport->vf_info.spoofchk && vlan) {
7839                         dev_err(&hdev->pdev->dev,
7840                                 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
7841                         return -EPERM;
7842                 }
7843                 return 0;
7844         }
7845
7846         hclge_cmd_setup_basic_desc(&desc[0],
7847                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7848         hclge_cmd_setup_basic_desc(&desc[1],
7849                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7850
7851         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7852
7853         vf_byte_off = vfid / 8;
7854         vf_byte_val = 1 << (vfid % 8);
7855
7856         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7857         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7858
7859         req0->vlan_id  = cpu_to_le16(vlan);
7860         req0->vlan_cfg = is_kill;
7861
7862         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7863                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7864         else
7865                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7866
7867         ret = hclge_cmd_send(&hdev->hw, desc, 2);
7868         if (ret) {
7869                 dev_err(&hdev->pdev->dev,
7870                         "Send vf vlan command fail, ret =%d.\n",
7871                         ret);
7872                 return ret;
7873         }
7874
7875         if (!is_kill) {
7876 #define HCLGE_VF_VLAN_NO_ENTRY  2
7877                 if (!req0->resp_code || req0->resp_code == 1)
7878                         return 0;
7879
7880                 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7881                         set_bit(vfid, hdev->vf_vlan_full);
7882                         dev_warn(&hdev->pdev->dev,
7883                                  "vf vlan table is full, vf vlan filter is disabled\n");
7884                         return 0;
7885                 }
7886
7887                 dev_err(&hdev->pdev->dev,
7888                         "Add vf vlan filter fail, ret =%u.\n",
7889                         req0->resp_code);
7890         } else {
7891 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
7892                 if (!req0->resp_code)
7893                         return 0;
7894
7895                 /* vf vlan filter is disabled when vf vlan table is full,
7896                  * then new vlan id will not be added into vf vlan table.
7897                  * Just return 0 without warning, avoid massive verbose
7898                  * print logs when unload.
7899                  */
7900                 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7901                         return 0;
7902
7903                 dev_err(&hdev->pdev->dev,
7904                         "Kill vf vlan filter fail, ret =%u.\n",
7905                         req0->resp_code);
7906         }
7907
7908         return -EIO;
7909 }
7910
7911 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7912                                       u16 vlan_id, bool is_kill)
7913 {
7914         struct hclge_vlan_filter_pf_cfg_cmd *req;
7915         struct hclge_desc desc;
7916         u8 vlan_offset_byte_val;
7917         u8 vlan_offset_byte;
7918         u8 vlan_offset_160;
7919         int ret;
7920
7921         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7922
7923         vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
7924         vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
7925                            HCLGE_VLAN_BYTE_SIZE;
7926         vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
7927
7928         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7929         req->vlan_offset = vlan_offset_160;
7930         req->vlan_cfg = is_kill;
7931         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7932
7933         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7934         if (ret)
7935                 dev_err(&hdev->pdev->dev,
7936                         "port vlan command, send fail, ret =%d.\n", ret);
7937         return ret;
7938 }
7939
7940 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7941                                     u16 vport_id, u16 vlan_id,
7942                                     bool is_kill)
7943 {
7944         u16 vport_idx, vport_num = 0;
7945         int ret;
7946
7947         if (is_kill && !vlan_id)
7948                 return 0;
7949
7950         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7951                                        proto);
7952         if (ret) {
7953                 dev_err(&hdev->pdev->dev,
7954                         "Set %u vport vlan filter config fail, ret =%d.\n",
7955                         vport_id, ret);
7956                 return ret;
7957         }
7958
7959         /* vlan 0 may be added twice when 8021q module is enabled */
7960         if (!is_kill && !vlan_id &&
7961             test_bit(vport_id, hdev->vlan_table[vlan_id]))
7962                 return 0;
7963
7964         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7965                 dev_err(&hdev->pdev->dev,
7966                         "Add port vlan failed, vport %u is already in vlan %u\n",
7967                         vport_id, vlan_id);
7968                 return -EINVAL;
7969         }
7970
7971         if (is_kill &&
7972             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7973                 dev_err(&hdev->pdev->dev,
7974                         "Delete port vlan failed, vport %u is not in vlan %u\n",
7975                         vport_id, vlan_id);
7976                 return -EINVAL;
7977         }
7978
7979         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7980                 vport_num++;
7981
7982         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7983                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7984                                                  is_kill);
7985
7986         return ret;
7987 }
7988
7989 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7990 {
7991         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7992         struct hclge_vport_vtag_tx_cfg_cmd *req;
7993         struct hclge_dev *hdev = vport->back;
7994         struct hclge_desc desc;
7995         u16 bmap_index;
7996         int status;
7997
7998         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
7999
8000         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
8001         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
8002         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
8003         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
8004                       vcfg->accept_tag1 ? 1 : 0);
8005         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
8006                       vcfg->accept_untag1 ? 1 : 0);
8007         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
8008                       vcfg->accept_tag2 ? 1 : 0);
8009         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
8010                       vcfg->accept_untag2 ? 1 : 0);
8011         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
8012                       vcfg->insert_tag1_en ? 1 : 0);
8013         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
8014                       vcfg->insert_tag2_en ? 1 : 0);
8015         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
8016
8017         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8018         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8019                         HCLGE_VF_NUM_PER_BYTE;
8020         req->vf_bitmap[bmap_index] =
8021                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8022
8023         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8024         if (status)
8025                 dev_err(&hdev->pdev->dev,
8026                         "Send port txvlan cfg command fail, ret =%d\n",
8027                         status);
8028
8029         return status;
8030 }
8031
8032 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8033 {
8034         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8035         struct hclge_vport_vtag_rx_cfg_cmd *req;
8036         struct hclge_dev *hdev = vport->back;
8037         struct hclge_desc desc;
8038         u16 bmap_index;
8039         int status;
8040
8041         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8042
8043         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
8044         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8045                       vcfg->strip_tag1_en ? 1 : 0);
8046         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8047                       vcfg->strip_tag2_en ? 1 : 0);
8048         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8049                       vcfg->vlan1_vlan_prionly ? 1 : 0);
8050         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8051                       vcfg->vlan2_vlan_prionly ? 1 : 0);
8052
8053         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8054         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8055                         HCLGE_VF_NUM_PER_BYTE;
8056         req->vf_bitmap[bmap_index] =
8057                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8058
8059         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8060         if (status)
8061                 dev_err(&hdev->pdev->dev,
8062                         "Send port rxvlan cfg command fail, ret =%d\n",
8063                         status);
8064
8065         return status;
8066 }
8067
8068 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8069                                   u16 port_base_vlan_state,
8070                                   u16 vlan_tag)
8071 {
8072         int ret;
8073
8074         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8075                 vport->txvlan_cfg.accept_tag1 = true;
8076                 vport->txvlan_cfg.insert_tag1_en = false;
8077                 vport->txvlan_cfg.default_tag1 = 0;
8078         } else {
8079                 vport->txvlan_cfg.accept_tag1 = false;
8080                 vport->txvlan_cfg.insert_tag1_en = true;
8081                 vport->txvlan_cfg.default_tag1 = vlan_tag;
8082         }
8083
8084         vport->txvlan_cfg.accept_untag1 = true;
8085
8086         /* accept_tag2 and accept_untag2 are not supported on
8087          * pdev revision(0x20), new revision support them,
8088          * this two fields can not be configured by user.
8089          */
8090         vport->txvlan_cfg.accept_tag2 = true;
8091         vport->txvlan_cfg.accept_untag2 = true;
8092         vport->txvlan_cfg.insert_tag2_en = false;
8093         vport->txvlan_cfg.default_tag2 = 0;
8094
8095         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8096                 vport->rxvlan_cfg.strip_tag1_en = false;
8097                 vport->rxvlan_cfg.strip_tag2_en =
8098                                 vport->rxvlan_cfg.rx_vlan_offload_en;
8099         } else {
8100                 vport->rxvlan_cfg.strip_tag1_en =
8101                                 vport->rxvlan_cfg.rx_vlan_offload_en;
8102                 vport->rxvlan_cfg.strip_tag2_en = true;
8103         }
8104         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8105         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8106
8107         ret = hclge_set_vlan_tx_offload_cfg(vport);
8108         if (ret)
8109                 return ret;
8110
8111         return hclge_set_vlan_rx_offload_cfg(vport);
8112 }
8113
8114 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8115 {
8116         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8117         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8118         struct hclge_desc desc;
8119         int status;
8120
8121         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8122         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8123         rx_req->ot_fst_vlan_type =
8124                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8125         rx_req->ot_sec_vlan_type =
8126                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8127         rx_req->in_fst_vlan_type =
8128                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8129         rx_req->in_sec_vlan_type =
8130                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8131
8132         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8133         if (status) {
8134                 dev_err(&hdev->pdev->dev,
8135                         "Send rxvlan protocol type command fail, ret =%d\n",
8136                         status);
8137                 return status;
8138         }
8139
8140         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8141
8142         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8143         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8144         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8145
8146         status = hclge_cmd_send(&hdev->hw, &desc, 1);
8147         if (status)
8148                 dev_err(&hdev->pdev->dev,
8149                         "Send txvlan protocol type command fail, ret =%d\n",
8150                         status);
8151
8152         return status;
8153 }
8154
8155 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8156 {
8157 #define HCLGE_DEF_VLAN_TYPE             0x8100
8158
8159         struct hnae3_handle *handle = &hdev->vport[0].nic;
8160         struct hclge_vport *vport;
8161         int ret;
8162         int i;
8163
8164         if (hdev->pdev->revision >= 0x21) {
8165                 /* for revision 0x21, vf vlan filter is per function */
8166                 for (i = 0; i < hdev->num_alloc_vport; i++) {
8167                         vport = &hdev->vport[i];
8168                         ret = hclge_set_vlan_filter_ctrl(hdev,
8169                                                          HCLGE_FILTER_TYPE_VF,
8170                                                          HCLGE_FILTER_FE_EGRESS,
8171                                                          true,
8172                                                          vport->vport_id);
8173                         if (ret)
8174                                 return ret;
8175                 }
8176
8177                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8178                                                  HCLGE_FILTER_FE_INGRESS, true,
8179                                                  0);
8180                 if (ret)
8181                         return ret;
8182         } else {
8183                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8184                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
8185                                                  true, 0);
8186                 if (ret)
8187                         return ret;
8188         }
8189
8190         handle->netdev_flags |= HNAE3_VLAN_FLTR;
8191
8192         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8193         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8194         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8195         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8196         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8197         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8198
8199         ret = hclge_set_vlan_protocol_type(hdev);
8200         if (ret)
8201                 return ret;
8202
8203         for (i = 0; i < hdev->num_alloc_vport; i++) {
8204                 u16 vlan_tag;
8205
8206                 vport = &hdev->vport[i];
8207                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8208
8209                 ret = hclge_vlan_offload_cfg(vport,
8210                                              vport->port_base_vlan_cfg.state,
8211                                              vlan_tag);
8212                 if (ret)
8213                         return ret;
8214         }
8215
8216         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8217 }
8218
8219 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8220                                        bool writen_to_tbl)
8221 {
8222         struct hclge_vport_vlan_cfg *vlan;
8223
8224         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8225         if (!vlan)
8226                 return;
8227
8228         vlan->hd_tbl_status = writen_to_tbl;
8229         vlan->vlan_id = vlan_id;
8230
8231         list_add_tail(&vlan->node, &vport->vlan_list);
8232 }
8233
8234 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8235 {
8236         struct hclge_vport_vlan_cfg *vlan, *tmp;
8237         struct hclge_dev *hdev = vport->back;
8238         int ret;
8239
8240         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8241                 if (!vlan->hd_tbl_status) {
8242                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8243                                                        vport->vport_id,
8244                                                        vlan->vlan_id, false);
8245                         if (ret) {
8246                                 dev_err(&hdev->pdev->dev,
8247                                         "restore vport vlan list failed, ret=%d\n",
8248                                         ret);
8249                                 return ret;
8250                         }
8251                 }
8252                 vlan->hd_tbl_status = true;
8253         }
8254
8255         return 0;
8256 }
8257
8258 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8259                                       bool is_write_tbl)
8260 {
8261         struct hclge_vport_vlan_cfg *vlan, *tmp;
8262         struct hclge_dev *hdev = vport->back;
8263
8264         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8265                 if (vlan->vlan_id == vlan_id) {
8266                         if (is_write_tbl && vlan->hd_tbl_status)
8267                                 hclge_set_vlan_filter_hw(hdev,
8268                                                          htons(ETH_P_8021Q),
8269                                                          vport->vport_id,
8270                                                          vlan_id,
8271                                                          true);
8272
8273                         list_del(&vlan->node);
8274                         kfree(vlan);
8275                         break;
8276                 }
8277         }
8278 }
8279
8280 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8281 {
8282         struct hclge_vport_vlan_cfg *vlan, *tmp;
8283         struct hclge_dev *hdev = vport->back;
8284
8285         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8286                 if (vlan->hd_tbl_status)
8287                         hclge_set_vlan_filter_hw(hdev,
8288                                                  htons(ETH_P_8021Q),
8289                                                  vport->vport_id,
8290                                                  vlan->vlan_id,
8291                                                  true);
8292
8293                 vlan->hd_tbl_status = false;
8294                 if (is_del_list) {
8295                         list_del(&vlan->node);
8296                         kfree(vlan);
8297                 }
8298         }
8299 }
8300
8301 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8302 {
8303         struct hclge_vport_vlan_cfg *vlan, *tmp;
8304         struct hclge_vport *vport;
8305         int i;
8306
8307         mutex_lock(&hdev->vport_cfg_mutex);
8308         for (i = 0; i < hdev->num_alloc_vport; i++) {
8309                 vport = &hdev->vport[i];
8310                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8311                         list_del(&vlan->node);
8312                         kfree(vlan);
8313                 }
8314         }
8315         mutex_unlock(&hdev->vport_cfg_mutex);
8316 }
8317
8318 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
8319 {
8320         struct hclge_vport *vport = hclge_get_vport(handle);
8321         struct hclge_vport_vlan_cfg *vlan, *tmp;
8322         struct hclge_dev *hdev = vport->back;
8323         u16 vlan_proto;
8324         u16 state, vlan_id;
8325         int i;
8326
8327         mutex_lock(&hdev->vport_cfg_mutex);
8328         for (i = 0; i < hdev->num_alloc_vport; i++) {
8329                 vport = &hdev->vport[i];
8330                 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8331                 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8332                 state = vport->port_base_vlan_cfg.state;
8333
8334                 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8335                         hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8336                                                  vport->vport_id, vlan_id,
8337                                                  false);
8338                         continue;
8339                 }
8340
8341                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8342                         int ret;
8343
8344                         if (!vlan->hd_tbl_status)
8345                                 continue;
8346                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8347                                                        vport->vport_id,
8348                                                        vlan->vlan_id, false);
8349                         if (ret)
8350                                 break;
8351                 }
8352         }
8353
8354         mutex_unlock(&hdev->vport_cfg_mutex);
8355 }
8356
8357 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8358 {
8359         struct hclge_vport *vport = hclge_get_vport(handle);
8360
8361         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8362                 vport->rxvlan_cfg.strip_tag1_en = false;
8363                 vport->rxvlan_cfg.strip_tag2_en = enable;
8364         } else {
8365                 vport->rxvlan_cfg.strip_tag1_en = enable;
8366                 vport->rxvlan_cfg.strip_tag2_en = true;
8367         }
8368         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8369         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8370         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8371
8372         return hclge_set_vlan_rx_offload_cfg(vport);
8373 }
8374
8375 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8376                                             u16 port_base_vlan_state,
8377                                             struct hclge_vlan_info *new_info,
8378                                             struct hclge_vlan_info *old_info)
8379 {
8380         struct hclge_dev *hdev = vport->back;
8381         int ret;
8382
8383         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8384                 hclge_rm_vport_all_vlan_table(vport, false);
8385                 return hclge_set_vlan_filter_hw(hdev,
8386                                                  htons(new_info->vlan_proto),
8387                                                  vport->vport_id,
8388                                                  new_info->vlan_tag,
8389                                                  false);
8390         }
8391
8392         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8393                                        vport->vport_id, old_info->vlan_tag,
8394                                        true);
8395         if (ret)
8396                 return ret;
8397
8398         return hclge_add_vport_all_vlan_table(vport);
8399 }
8400
8401 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8402                                     struct hclge_vlan_info *vlan_info)
8403 {
8404         struct hnae3_handle *nic = &vport->nic;
8405         struct hclge_vlan_info *old_vlan_info;
8406         struct hclge_dev *hdev = vport->back;
8407         int ret;
8408
8409         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8410
8411         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8412         if (ret)
8413                 return ret;
8414
8415         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8416                 /* add new VLAN tag */
8417                 ret = hclge_set_vlan_filter_hw(hdev,
8418                                                htons(vlan_info->vlan_proto),
8419                                                vport->vport_id,
8420                                                vlan_info->vlan_tag,
8421                                                false);
8422                 if (ret)
8423                         return ret;
8424
8425                 /* remove old VLAN tag */
8426                 ret = hclge_set_vlan_filter_hw(hdev,
8427                                                htons(old_vlan_info->vlan_proto),
8428                                                vport->vport_id,
8429                                                old_vlan_info->vlan_tag,
8430                                                true);
8431                 if (ret)
8432                         return ret;
8433
8434                 goto update;
8435         }
8436
8437         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8438                                                old_vlan_info);
8439         if (ret)
8440                 return ret;
8441
8442         /* update state only when disable/enable port based VLAN */
8443         vport->port_base_vlan_cfg.state = state;
8444         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8445                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8446         else
8447                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8448
8449 update:
8450         vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8451         vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8452         vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8453
8454         return 0;
8455 }
8456
8457 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8458                                           enum hnae3_port_base_vlan_state state,
8459                                           u16 vlan)
8460 {
8461         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8462                 if (!vlan)
8463                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8464                 else
8465                         return HNAE3_PORT_BASE_VLAN_ENABLE;
8466         } else {
8467                 if (!vlan)
8468                         return HNAE3_PORT_BASE_VLAN_DISABLE;
8469                 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8470                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8471                 else
8472                         return HNAE3_PORT_BASE_VLAN_MODIFY;
8473         }
8474 }
8475
8476 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8477                                     u16 vlan, u8 qos, __be16 proto)
8478 {
8479         struct hclge_vport *vport = hclge_get_vport(handle);
8480         struct hclge_dev *hdev = vport->back;
8481         struct hclge_vlan_info vlan_info;
8482         u16 state;
8483         int ret;
8484
8485         if (hdev->pdev->revision == 0x20)
8486                 return -EOPNOTSUPP;
8487
8488         vport = hclge_get_vf_vport(hdev, vfid);
8489         if (!vport)
8490                 return -EINVAL;
8491
8492         /* qos is a 3 bits value, so can not be bigger than 7 */
8493         if (vlan > VLAN_N_VID - 1 || qos > 7)
8494                 return -EINVAL;
8495         if (proto != htons(ETH_P_8021Q))
8496                 return -EPROTONOSUPPORT;
8497
8498         state = hclge_get_port_base_vlan_state(vport,
8499                                                vport->port_base_vlan_cfg.state,
8500                                                vlan);
8501         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
8502                 return 0;
8503
8504         vlan_info.vlan_tag = vlan;
8505         vlan_info.qos = qos;
8506         vlan_info.vlan_proto = ntohs(proto);
8507
8508         if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8509                 return hclge_update_port_base_vlan_cfg(vport, state,
8510                                                        &vlan_info);
8511         } else {
8512                 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
8513                                                         vport->vport_id, state,
8514                                                         vlan, qos,
8515                                                         ntohs(proto));
8516                 return ret;
8517         }
8518 }
8519
8520 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
8521                           u16 vlan_id, bool is_kill)
8522 {
8523         struct hclge_vport *vport = hclge_get_vport(handle);
8524         struct hclge_dev *hdev = vport->back;
8525         bool writen_to_tbl = false;
8526         int ret = 0;
8527
8528         /* When device is resetting, firmware is unable to handle
8529          * mailbox. Just record the vlan id, and remove it after
8530          * reset finished.
8531          */
8532         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
8533                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8534                 return -EBUSY;
8535         }
8536
8537         /* when port base vlan enabled, we use port base vlan as the vlan
8538          * filter entry. In this case, we don't update vlan filter table
8539          * when user add new vlan or remove exist vlan, just update the vport
8540          * vlan list. The vlan id in vlan list will be writen in vlan filter
8541          * table until port base vlan disabled
8542          */
8543         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8544                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
8545                                                vlan_id, is_kill);
8546                 writen_to_tbl = true;
8547         }
8548
8549         if (!ret) {
8550                 if (is_kill)
8551                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
8552                 else
8553                         hclge_add_vport_vlan_table(vport, vlan_id,
8554                                                    writen_to_tbl);
8555         } else if (is_kill) {
8556                 /* when remove hw vlan filter failed, record the vlan id,
8557                  * and try to remove it from hw later, to be consistence
8558                  * with stack
8559                  */
8560                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8561         }
8562         return ret;
8563 }
8564
8565 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
8566 {
8567 #define HCLGE_MAX_SYNC_COUNT    60
8568
8569         int i, ret, sync_cnt = 0;
8570         u16 vlan_id;
8571
8572         /* start from vport 1 for PF is always alive */
8573         for (i = 0; i < hdev->num_alloc_vport; i++) {
8574                 struct hclge_vport *vport = &hdev->vport[i];
8575
8576                 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8577                                          VLAN_N_VID);
8578                 while (vlan_id != VLAN_N_VID) {
8579                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8580                                                        vport->vport_id, vlan_id,
8581                                                        true);
8582                         if (ret && ret != -EINVAL)
8583                                 return;
8584
8585                         clear_bit(vlan_id, vport->vlan_del_fail_bmap);
8586                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
8587
8588                         sync_cnt++;
8589                         if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
8590                                 return;
8591
8592                         vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8593                                                  VLAN_N_VID);
8594                 }
8595         }
8596 }
8597
8598 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
8599 {
8600         struct hclge_config_max_frm_size_cmd *req;
8601         struct hclge_desc desc;
8602
8603         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
8604
8605         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
8606         req->max_frm_size = cpu_to_le16(new_mps);
8607         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
8608
8609         return hclge_cmd_send(&hdev->hw, &desc, 1);
8610 }
8611
8612 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
8613 {
8614         struct hclge_vport *vport = hclge_get_vport(handle);
8615
8616         return hclge_set_vport_mtu(vport, new_mtu);
8617 }
8618
8619 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
8620 {
8621         struct hclge_dev *hdev = vport->back;
8622         int i, max_frm_size, ret;
8623
8624         /* HW supprt 2 layer vlan */
8625         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8626         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
8627             max_frm_size > HCLGE_MAC_MAX_FRAME)
8628                 return -EINVAL;
8629
8630         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
8631         mutex_lock(&hdev->vport_lock);
8632         /* VF's mps must fit within hdev->mps */
8633         if (vport->vport_id && max_frm_size > hdev->mps) {
8634                 mutex_unlock(&hdev->vport_lock);
8635                 return -EINVAL;
8636         } else if (vport->vport_id) {
8637                 vport->mps = max_frm_size;
8638                 mutex_unlock(&hdev->vport_lock);
8639                 return 0;
8640         }
8641
8642         /* PF's mps must be greater then VF's mps */
8643         for (i = 1; i < hdev->num_alloc_vport; i++)
8644                 if (max_frm_size < hdev->vport[i].mps) {
8645                         mutex_unlock(&hdev->vport_lock);
8646                         return -EINVAL;
8647                 }
8648
8649         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8650
8651         ret = hclge_set_mac_mtu(hdev, max_frm_size);
8652         if (ret) {
8653                 dev_err(&hdev->pdev->dev,
8654                         "Change mtu fail, ret =%d\n", ret);
8655                 goto out;
8656         }
8657
8658         hdev->mps = max_frm_size;
8659         vport->mps = max_frm_size;
8660
8661         ret = hclge_buffer_alloc(hdev);
8662         if (ret)
8663                 dev_err(&hdev->pdev->dev,
8664                         "Allocate buffer fail, ret =%d\n", ret);
8665
8666 out:
8667         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8668         mutex_unlock(&hdev->vport_lock);
8669         return ret;
8670 }
8671
8672 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
8673                                     bool enable)
8674 {
8675         struct hclge_reset_tqp_queue_cmd *req;
8676         struct hclge_desc desc;
8677         int ret;
8678
8679         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
8680
8681         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8682         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8683         if (enable)
8684                 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
8685
8686         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8687         if (ret) {
8688                 dev_err(&hdev->pdev->dev,
8689                         "Send tqp reset cmd error, status =%d\n", ret);
8690                 return ret;
8691         }
8692
8693         return 0;
8694 }
8695
8696 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
8697 {
8698         struct hclge_reset_tqp_queue_cmd *req;
8699         struct hclge_desc desc;
8700         int ret;
8701
8702         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
8703
8704         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8705         req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8706
8707         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8708         if (ret) {
8709                 dev_err(&hdev->pdev->dev,
8710                         "Get reset status error, status =%d\n", ret);
8711                 return ret;
8712         }
8713
8714         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8715 }
8716
8717 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8718 {
8719         struct hnae3_queue *queue;
8720         struct hclge_tqp *tqp;
8721
8722         queue = handle->kinfo.tqp[queue_id];
8723         tqp = container_of(queue, struct hclge_tqp, q);
8724
8725         return tqp->index;
8726 }
8727
8728 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8729 {
8730         struct hclge_vport *vport = hclge_get_vport(handle);
8731         struct hclge_dev *hdev = vport->back;
8732         int reset_try_times = 0;
8733         int reset_status;
8734         u16 queue_gid;
8735         int ret;
8736
8737         queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8738
8739         ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8740         if (ret) {
8741                 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8742                 return ret;
8743         }
8744
8745         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8746         if (ret) {
8747                 dev_err(&hdev->pdev->dev,
8748                         "Send reset tqp cmd fail, ret = %d\n", ret);
8749                 return ret;
8750         }
8751
8752         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8753                 reset_status = hclge_get_reset_status(hdev, queue_gid);
8754                 if (reset_status)
8755                         break;
8756
8757                 /* Wait for tqp hw reset */
8758                 usleep_range(1000, 1200);
8759         }
8760
8761         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8762                 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8763                 return ret;
8764         }
8765
8766         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8767         if (ret)
8768                 dev_err(&hdev->pdev->dev,
8769                         "Deassert the soft reset fail, ret = %d\n", ret);
8770
8771         return ret;
8772 }
8773
8774 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8775 {
8776         struct hclge_dev *hdev = vport->back;
8777         int reset_try_times = 0;
8778         int reset_status;
8779         u16 queue_gid;
8780         int ret;
8781
8782         queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8783
8784         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8785         if (ret) {
8786                 dev_warn(&hdev->pdev->dev,
8787                          "Send reset tqp cmd fail, ret = %d\n", ret);
8788                 return;
8789         }
8790
8791         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8792                 reset_status = hclge_get_reset_status(hdev, queue_gid);
8793                 if (reset_status)
8794                         break;
8795
8796                 /* Wait for tqp hw reset */
8797                 usleep_range(1000, 1200);
8798         }
8799
8800         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8801                 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8802                 return;
8803         }
8804
8805         ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8806         if (ret)
8807                 dev_warn(&hdev->pdev->dev,
8808                          "Deassert the soft reset fail, ret = %d\n", ret);
8809 }
8810
8811 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8812 {
8813         struct hclge_vport *vport = hclge_get_vport(handle);
8814         struct hclge_dev *hdev = vport->back;
8815
8816         return hdev->fw_version;
8817 }
8818
8819 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8820 {
8821         struct phy_device *phydev = hdev->hw.mac.phydev;
8822
8823         if (!phydev)
8824                 return;
8825
8826         phy_set_asym_pause(phydev, rx_en, tx_en);
8827 }
8828
8829 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8830 {
8831         int ret;
8832
8833         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8834                 return 0;
8835
8836         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8837         if (ret)
8838                 dev_err(&hdev->pdev->dev,
8839                         "configure pauseparam error, ret = %d.\n", ret);
8840
8841         return ret;
8842 }
8843
8844 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8845 {
8846         struct phy_device *phydev = hdev->hw.mac.phydev;
8847         u16 remote_advertising = 0;
8848         u16 local_advertising;
8849         u32 rx_pause, tx_pause;
8850         u8 flowctl;
8851
8852         if (!phydev->link || !phydev->autoneg)
8853                 return 0;
8854
8855         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8856
8857         if (phydev->pause)
8858                 remote_advertising = LPA_PAUSE_CAP;
8859
8860         if (phydev->asym_pause)
8861                 remote_advertising |= LPA_PAUSE_ASYM;
8862
8863         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8864                                            remote_advertising);
8865         tx_pause = flowctl & FLOW_CTRL_TX;
8866         rx_pause = flowctl & FLOW_CTRL_RX;
8867
8868         if (phydev->duplex == HCLGE_MAC_HALF) {
8869                 tx_pause = 0;
8870                 rx_pause = 0;
8871         }
8872
8873         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8874 }
8875
8876 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8877                                  u32 *rx_en, u32 *tx_en)
8878 {
8879         struct hclge_vport *vport = hclge_get_vport(handle);
8880         struct hclge_dev *hdev = vport->back;
8881         struct phy_device *phydev = hdev->hw.mac.phydev;
8882
8883         *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8884
8885         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8886                 *rx_en = 0;
8887                 *tx_en = 0;
8888                 return;
8889         }
8890
8891         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8892                 *rx_en = 1;
8893                 *tx_en = 0;
8894         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8895                 *tx_en = 1;
8896                 *rx_en = 0;
8897         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8898                 *rx_en = 1;
8899                 *tx_en = 1;
8900         } else {
8901                 *rx_en = 0;
8902                 *tx_en = 0;
8903         }
8904 }
8905
8906 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
8907                                          u32 rx_en, u32 tx_en)
8908 {
8909         if (rx_en && tx_en)
8910                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
8911         else if (rx_en && !tx_en)
8912                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8913         else if (!rx_en && tx_en)
8914                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8915         else
8916                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
8917
8918         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8919 }
8920
8921 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8922                                 u32 rx_en, u32 tx_en)
8923 {
8924         struct hclge_vport *vport = hclge_get_vport(handle);
8925         struct hclge_dev *hdev = vport->back;
8926         struct phy_device *phydev = hdev->hw.mac.phydev;
8927         u32 fc_autoneg;
8928
8929         if (phydev) {
8930                 fc_autoneg = hclge_get_autoneg(handle);
8931                 if (auto_neg != fc_autoneg) {
8932                         dev_info(&hdev->pdev->dev,
8933                                  "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8934                         return -EOPNOTSUPP;
8935                 }
8936         }
8937
8938         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8939                 dev_info(&hdev->pdev->dev,
8940                          "Priority flow control enabled. Cannot set link flow control.\n");
8941                 return -EOPNOTSUPP;
8942         }
8943
8944         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8945
8946         hclge_record_user_pauseparam(hdev, rx_en, tx_en);
8947
8948         if (!auto_neg)
8949                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8950
8951         if (phydev)
8952                 return phy_start_aneg(phydev);
8953
8954         return -EOPNOTSUPP;
8955 }
8956
8957 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8958                                           u8 *auto_neg, u32 *speed, u8 *duplex)
8959 {
8960         struct hclge_vport *vport = hclge_get_vport(handle);
8961         struct hclge_dev *hdev = vport->back;
8962
8963         if (speed)
8964                 *speed = hdev->hw.mac.speed;
8965         if (duplex)
8966                 *duplex = hdev->hw.mac.duplex;
8967         if (auto_neg)
8968                 *auto_neg = hdev->hw.mac.autoneg;
8969 }
8970
8971 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8972                                  u8 *module_type)
8973 {
8974         struct hclge_vport *vport = hclge_get_vport(handle);
8975         struct hclge_dev *hdev = vport->back;
8976
8977         if (media_type)
8978                 *media_type = hdev->hw.mac.media_type;
8979
8980         if (module_type)
8981                 *module_type = hdev->hw.mac.module_type;
8982 }
8983
8984 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8985                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8986 {
8987         struct hclge_vport *vport = hclge_get_vport(handle);
8988         struct hclge_dev *hdev = vport->back;
8989         struct phy_device *phydev = hdev->hw.mac.phydev;
8990         int mdix_ctrl, mdix, is_resolved;
8991         unsigned int retval;
8992
8993         if (!phydev) {
8994                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8995                 *tp_mdix = ETH_TP_MDI_INVALID;
8996                 return;
8997         }
8998
8999         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
9000
9001         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
9002         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
9003                                     HCLGE_PHY_MDIX_CTRL_S);
9004
9005         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
9006         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
9007         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
9008
9009         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
9010
9011         switch (mdix_ctrl) {
9012         case 0x0:
9013                 *tp_mdix_ctrl = ETH_TP_MDI;
9014                 break;
9015         case 0x1:
9016                 *tp_mdix_ctrl = ETH_TP_MDI_X;
9017                 break;
9018         case 0x3:
9019                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
9020                 break;
9021         default:
9022                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9023                 break;
9024         }
9025
9026         if (!is_resolved)
9027                 *tp_mdix = ETH_TP_MDI_INVALID;
9028         else if (mdix)
9029                 *tp_mdix = ETH_TP_MDI_X;
9030         else
9031                 *tp_mdix = ETH_TP_MDI;
9032 }
9033
9034 static void hclge_info_show(struct hclge_dev *hdev)
9035 {
9036         struct device *dev = &hdev->pdev->dev;
9037
9038         dev_info(dev, "PF info begin:\n");
9039
9040         dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9041         dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9042         dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9043         dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9044         dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9045         dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9046         dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9047         dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9048         dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9049         dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9050         dev_info(dev, "This is %s PF\n",
9051                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9052         dev_info(dev, "DCB %s\n",
9053                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9054         dev_info(dev, "MQPRIO %s\n",
9055                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9056
9057         dev_info(dev, "PF info end.\n");
9058 }
9059
9060 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9061                                           struct hclge_vport *vport)
9062 {
9063         struct hnae3_client *client = vport->nic.client;
9064         struct hclge_dev *hdev = ae_dev->priv;
9065         int rst_cnt = hdev->rst_stats.reset_cnt;
9066         int ret;
9067
9068         ret = client->ops->init_instance(&vport->nic);
9069         if (ret)
9070                 return ret;
9071
9072         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9073         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9074             rst_cnt != hdev->rst_stats.reset_cnt) {
9075                 ret = -EBUSY;
9076                 goto init_nic_err;
9077         }
9078
9079         /* Enable nic hw error interrupts */
9080         ret = hclge_config_nic_hw_error(hdev, true);
9081         if (ret) {
9082                 dev_err(&ae_dev->pdev->dev,
9083                         "fail(%d) to enable hw error interrupts\n", ret);
9084                 goto init_nic_err;
9085         }
9086
9087         hnae3_set_client_init_flag(client, ae_dev, 1);
9088
9089         if (netif_msg_drv(&hdev->vport->nic))
9090                 hclge_info_show(hdev);
9091
9092         return ret;
9093
9094 init_nic_err:
9095         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9096         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9097                 msleep(HCLGE_WAIT_RESET_DONE);
9098
9099         client->ops->uninit_instance(&vport->nic, 0);
9100
9101         return ret;
9102 }
9103
9104 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9105                                            struct hclge_vport *vport)
9106 {
9107         struct hnae3_client *client = vport->roce.client;
9108         struct hclge_dev *hdev = ae_dev->priv;
9109         int rst_cnt;
9110         int ret;
9111
9112         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9113             !hdev->nic_client)
9114                 return 0;
9115
9116         client = hdev->roce_client;
9117         ret = hclge_init_roce_base_info(vport);
9118         if (ret)
9119                 return ret;
9120
9121         rst_cnt = hdev->rst_stats.reset_cnt;
9122         ret = client->ops->init_instance(&vport->roce);
9123         if (ret)
9124                 return ret;
9125
9126         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9127         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9128             rst_cnt != hdev->rst_stats.reset_cnt) {
9129                 ret = -EBUSY;
9130                 goto init_roce_err;
9131         }
9132
9133         /* Enable roce ras interrupts */
9134         ret = hclge_config_rocee_ras_interrupt(hdev, true);
9135         if (ret) {
9136                 dev_err(&ae_dev->pdev->dev,
9137                         "fail(%d) to enable roce ras interrupts\n", ret);
9138                 goto init_roce_err;
9139         }
9140
9141         hnae3_set_client_init_flag(client, ae_dev, 1);
9142
9143         return 0;
9144
9145 init_roce_err:
9146         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9147         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9148                 msleep(HCLGE_WAIT_RESET_DONE);
9149
9150         hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9151
9152         return ret;
9153 }
9154
9155 static int hclge_init_client_instance(struct hnae3_client *client,
9156                                       struct hnae3_ae_dev *ae_dev)
9157 {
9158         struct hclge_dev *hdev = ae_dev->priv;
9159         struct hclge_vport *vport;
9160         int i, ret;
9161
9162         for (i = 0; i <  hdev->num_vmdq_vport + 1; i++) {
9163                 vport = &hdev->vport[i];
9164
9165                 switch (client->type) {
9166                 case HNAE3_CLIENT_KNIC:
9167                         hdev->nic_client = client;
9168                         vport->nic.client = client;
9169                         ret = hclge_init_nic_client_instance(ae_dev, vport);
9170                         if (ret)
9171                                 goto clear_nic;
9172
9173                         ret = hclge_init_roce_client_instance(ae_dev, vport);
9174                         if (ret)
9175                                 goto clear_roce;
9176
9177                         break;
9178                 case HNAE3_CLIENT_ROCE:
9179                         if (hnae3_dev_roce_supported(hdev)) {
9180                                 hdev->roce_client = client;
9181                                 vport->roce.client = client;
9182                         }
9183
9184                         ret = hclge_init_roce_client_instance(ae_dev, vport);
9185                         if (ret)
9186                                 goto clear_roce;
9187
9188                         break;
9189                 default:
9190                         return -EINVAL;
9191                 }
9192         }
9193
9194         return 0;
9195
9196 clear_nic:
9197         hdev->nic_client = NULL;
9198         vport->nic.client = NULL;
9199         return ret;
9200 clear_roce:
9201         hdev->roce_client = NULL;
9202         vport->roce.client = NULL;
9203         return ret;
9204 }
9205
9206 static void hclge_uninit_client_instance(struct hnae3_client *client,
9207                                          struct hnae3_ae_dev *ae_dev)
9208 {
9209         struct hclge_dev *hdev = ae_dev->priv;
9210         struct hclge_vport *vport;
9211         int i;
9212
9213         for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9214                 vport = &hdev->vport[i];
9215                 if (hdev->roce_client) {
9216                         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9217                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9218                                 msleep(HCLGE_WAIT_RESET_DONE);
9219
9220                         hdev->roce_client->ops->uninit_instance(&vport->roce,
9221                                                                 0);
9222                         hdev->roce_client = NULL;
9223                         vport->roce.client = NULL;
9224                 }
9225                 if (client->type == HNAE3_CLIENT_ROCE)
9226                         return;
9227                 if (hdev->nic_client && client->ops->uninit_instance) {
9228                         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9229                         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9230                                 msleep(HCLGE_WAIT_RESET_DONE);
9231
9232                         client->ops->uninit_instance(&vport->nic, 0);
9233                         hdev->nic_client = NULL;
9234                         vport->nic.client = NULL;
9235                 }
9236         }
9237 }
9238
9239 static int hclge_pci_init(struct hclge_dev *hdev)
9240 {
9241         struct pci_dev *pdev = hdev->pdev;
9242         struct hclge_hw *hw;
9243         int ret;
9244
9245         ret = pci_enable_device(pdev);
9246         if (ret) {
9247                 dev_err(&pdev->dev, "failed to enable PCI device\n");
9248                 return ret;
9249         }
9250
9251         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9252         if (ret) {
9253                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9254                 if (ret) {
9255                         dev_err(&pdev->dev,
9256                                 "can't set consistent PCI DMA");
9257                         goto err_disable_device;
9258                 }
9259                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9260         }
9261
9262         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9263         if (ret) {
9264                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9265                 goto err_disable_device;
9266         }
9267
9268         pci_set_master(pdev);
9269         hw = &hdev->hw;
9270         hw->io_base = pcim_iomap(pdev, 2, 0);
9271         if (!hw->io_base) {
9272                 dev_err(&pdev->dev, "Can't map configuration register space\n");
9273                 ret = -ENOMEM;
9274                 goto err_clr_master;
9275         }
9276
9277         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9278
9279         return 0;
9280 err_clr_master:
9281         pci_clear_master(pdev);
9282         pci_release_regions(pdev);
9283 err_disable_device:
9284         pci_disable_device(pdev);
9285
9286         return ret;
9287 }
9288
9289 static void hclge_pci_uninit(struct hclge_dev *hdev)
9290 {
9291         struct pci_dev *pdev = hdev->pdev;
9292
9293         pcim_iounmap(pdev, hdev->hw.io_base);
9294         pci_free_irq_vectors(pdev);
9295         pci_clear_master(pdev);
9296         pci_release_mem_regions(pdev);
9297         pci_disable_device(pdev);
9298 }
9299
9300 static void hclge_state_init(struct hclge_dev *hdev)
9301 {
9302         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9303         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9304         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9305         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9306         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9307         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9308 }
9309
9310 static void hclge_state_uninit(struct hclge_dev *hdev)
9311 {
9312         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9313         set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9314
9315         if (hdev->reset_timer.function)
9316                 del_timer_sync(&hdev->reset_timer);
9317         if (hdev->service_task.work.func)
9318                 cancel_delayed_work_sync(&hdev->service_task);
9319 }
9320
9321 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9322 {
9323 #define HCLGE_FLR_WAIT_MS       100
9324 #define HCLGE_FLR_WAIT_CNT      50
9325         struct hclge_dev *hdev = ae_dev->priv;
9326         int cnt = 0;
9327
9328         clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
9329         clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
9330         set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
9331         hclge_reset_event(hdev->pdev, NULL);
9332
9333         while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
9334                cnt++ < HCLGE_FLR_WAIT_CNT)
9335                 msleep(HCLGE_FLR_WAIT_MS);
9336
9337         if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
9338                 dev_err(&hdev->pdev->dev,
9339                         "flr wait down timeout: %d\n", cnt);
9340 }
9341
9342 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9343 {
9344         struct hclge_dev *hdev = ae_dev->priv;
9345
9346         set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
9347 }
9348
9349 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9350 {
9351         u16 i;
9352
9353         for (i = 0; i < hdev->num_alloc_vport; i++) {
9354                 struct hclge_vport *vport = &hdev->vport[i];
9355                 int ret;
9356
9357                  /* Send cmd to clear VF's FUNC_RST_ING */
9358                 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9359                 if (ret)
9360                         dev_warn(&hdev->pdev->dev,
9361                                  "clear vf(%u) rst failed %d!\n",
9362                                  vport->vport_id, ret);
9363         }
9364 }
9365
9366 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9367 {
9368         struct pci_dev *pdev = ae_dev->pdev;
9369         struct hclge_dev *hdev;
9370         int ret;
9371
9372         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9373         if (!hdev) {
9374                 ret = -ENOMEM;
9375                 goto out;
9376         }
9377
9378         hdev->pdev = pdev;
9379         hdev->ae_dev = ae_dev;
9380         hdev->reset_type = HNAE3_NONE_RESET;
9381         hdev->reset_level = HNAE3_FUNC_RESET;
9382         ae_dev->priv = hdev;
9383
9384         /* HW supprt 2 layer vlan */
9385         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9386
9387         mutex_init(&hdev->vport_lock);
9388         mutex_init(&hdev->vport_cfg_mutex);
9389         spin_lock_init(&hdev->fd_rule_lock);
9390
9391         ret = hclge_pci_init(hdev);
9392         if (ret) {
9393                 dev_err(&pdev->dev, "PCI init failed\n");
9394                 goto out;
9395         }
9396
9397         /* Firmware command queue initialize */
9398         ret = hclge_cmd_queue_init(hdev);
9399         if (ret) {
9400                 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
9401                 goto err_pci_uninit;
9402         }
9403
9404         /* Firmware command initialize */
9405         ret = hclge_cmd_init(hdev);
9406         if (ret)
9407                 goto err_cmd_uninit;
9408
9409         ret = hclge_get_cap(hdev);
9410         if (ret) {
9411                 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
9412                         ret);
9413                 goto err_cmd_uninit;
9414         }
9415
9416         ret = hclge_configure(hdev);
9417         if (ret) {
9418                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9419                 goto err_cmd_uninit;
9420         }
9421
9422         ret = hclge_init_msi(hdev);
9423         if (ret) {
9424                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9425                 goto err_cmd_uninit;
9426         }
9427
9428         ret = hclge_misc_irq_init(hdev);
9429         if (ret) {
9430                 dev_err(&pdev->dev,
9431                         "Misc IRQ(vector0) init error, ret = %d.\n",
9432                         ret);
9433                 goto err_msi_uninit;
9434         }
9435
9436         ret = hclge_alloc_tqps(hdev);
9437         if (ret) {
9438                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9439                 goto err_msi_irq_uninit;
9440         }
9441
9442         ret = hclge_alloc_vport(hdev);
9443         if (ret) {
9444                 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
9445                 goto err_msi_irq_uninit;
9446         }
9447
9448         ret = hclge_map_tqp(hdev);
9449         if (ret) {
9450                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9451                 goto err_msi_irq_uninit;
9452         }
9453
9454         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9455                 ret = hclge_mac_mdio_config(hdev);
9456                 if (ret) {
9457                         dev_err(&hdev->pdev->dev,
9458                                 "mdio config fail ret=%d\n", ret);
9459                         goto err_msi_irq_uninit;
9460                 }
9461         }
9462
9463         ret = hclge_init_umv_space(hdev);
9464         if (ret) {
9465                 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
9466                 goto err_mdiobus_unreg;
9467         }
9468
9469         ret = hclge_mac_init(hdev);
9470         if (ret) {
9471                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9472                 goto err_mdiobus_unreg;
9473         }
9474
9475         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9476         if (ret) {
9477                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9478                 goto err_mdiobus_unreg;
9479         }
9480
9481         ret = hclge_config_gro(hdev, true);
9482         if (ret)
9483                 goto err_mdiobus_unreg;
9484
9485         ret = hclge_init_vlan_config(hdev);
9486         if (ret) {
9487                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9488                 goto err_mdiobus_unreg;
9489         }
9490
9491         ret = hclge_tm_schd_init(hdev);
9492         if (ret) {
9493                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
9494                 goto err_mdiobus_unreg;
9495         }
9496
9497         hclge_rss_init_cfg(hdev);
9498         ret = hclge_rss_init_hw(hdev);
9499         if (ret) {
9500                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9501                 goto err_mdiobus_unreg;
9502         }
9503
9504         ret = init_mgr_tbl(hdev);
9505         if (ret) {
9506                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
9507                 goto err_mdiobus_unreg;
9508         }
9509
9510         ret = hclge_init_fd_config(hdev);
9511         if (ret) {
9512                 dev_err(&pdev->dev,
9513                         "fd table init fail, ret=%d\n", ret);
9514                 goto err_mdiobus_unreg;
9515         }
9516
9517         INIT_KFIFO(hdev->mac_tnl_log);
9518
9519         hclge_dcb_ops_set(hdev);
9520
9521         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
9522         INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
9523
9524         /* Setup affinity after service timer setup because add_timer_on
9525          * is called in affinity notify.
9526          */
9527         hclge_misc_affinity_setup(hdev);
9528
9529         hclge_clear_all_event_cause(hdev);
9530         hclge_clear_resetting_state(hdev);
9531
9532         /* Log and clear the hw errors those already occurred */
9533         hclge_handle_all_hns_hw_errors(ae_dev);
9534
9535         /* request delayed reset for the error recovery because an immediate
9536          * global reset on a PF affecting pending initialization of other PFs
9537          */
9538         if (ae_dev->hw_err_reset_req) {
9539                 enum hnae3_reset_type reset_level;
9540
9541                 reset_level = hclge_get_reset_level(ae_dev,
9542                                                     &ae_dev->hw_err_reset_req);
9543                 hclge_set_def_reset_request(ae_dev, reset_level);
9544                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9545         }
9546
9547         /* Enable MISC vector(vector0) */
9548         hclge_enable_vector(&hdev->misc_vector, true);
9549
9550         hclge_state_init(hdev);
9551         hdev->last_reset_time = jiffies;
9552
9553         dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
9554                  HCLGE_DRIVER_NAME);
9555
9556         hclge_task_schedule(hdev, round_jiffies_relative(HZ));
9557
9558         return 0;
9559
9560 err_mdiobus_unreg:
9561         if (hdev->hw.mac.phydev)
9562                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
9563 err_msi_irq_uninit:
9564         hclge_misc_irq_uninit(hdev);
9565 err_msi_uninit:
9566         pci_free_irq_vectors(pdev);
9567 err_cmd_uninit:
9568         hclge_cmd_uninit(hdev);
9569 err_pci_uninit:
9570         pcim_iounmap(pdev, hdev->hw.io_base);
9571         pci_clear_master(pdev);
9572         pci_release_regions(pdev);
9573         pci_disable_device(pdev);
9574 out:
9575         return ret;
9576 }
9577
9578 static void hclge_stats_clear(struct hclge_dev *hdev)
9579 {
9580         memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
9581 }
9582
9583 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9584 {
9585         return hclge_config_switch_param(hdev, vf, enable,
9586                                          HCLGE_SWITCH_ANTI_SPOOF_MASK);
9587 }
9588
9589 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9590 {
9591         return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9592                                           HCLGE_FILTER_FE_NIC_INGRESS_B,
9593                                           enable, vf);
9594 }
9595
9596 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
9597 {
9598         int ret;
9599
9600         ret = hclge_set_mac_spoofchk(hdev, vf, enable);
9601         if (ret) {
9602                 dev_err(&hdev->pdev->dev,
9603                         "Set vf %d mac spoof check %s failed, ret=%d\n",
9604                         vf, enable ? "on" : "off", ret);
9605                 return ret;
9606         }
9607
9608         ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
9609         if (ret)
9610                 dev_err(&hdev->pdev->dev,
9611                         "Set vf %d vlan spoof check %s failed, ret=%d\n",
9612                         vf, enable ? "on" : "off", ret);
9613
9614         return ret;
9615 }
9616
9617 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
9618                                  bool enable)
9619 {
9620         struct hclge_vport *vport = hclge_get_vport(handle);
9621         struct hclge_dev *hdev = vport->back;
9622         u32 new_spoofchk = enable ? 1 : 0;
9623         int ret;
9624
9625         if (hdev->pdev->revision == 0x20)
9626                 return -EOPNOTSUPP;
9627
9628         vport = hclge_get_vf_vport(hdev, vf);
9629         if (!vport)
9630                 return -EINVAL;
9631
9632         if (vport->vf_info.spoofchk == new_spoofchk)
9633                 return 0;
9634
9635         if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
9636                 dev_warn(&hdev->pdev->dev,
9637                          "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
9638                          vf);
9639         else if (enable && hclge_is_umv_space_full(vport))
9640                 dev_warn(&hdev->pdev->dev,
9641                          "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
9642                          vf);
9643
9644         ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
9645         if (ret)
9646                 return ret;
9647
9648         vport->vf_info.spoofchk = new_spoofchk;
9649         return 0;
9650 }
9651
9652 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
9653 {
9654         struct hclge_vport *vport = hdev->vport;
9655         int ret;
9656         int i;
9657
9658         if (hdev->pdev->revision == 0x20)
9659                 return 0;
9660
9661         /* resume the vf spoof check state after reset */
9662         for (i = 0; i < hdev->num_alloc_vport; i++) {
9663                 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
9664                                                vport->vf_info.spoofchk);
9665                 if (ret)
9666                         return ret;
9667
9668                 vport++;
9669         }
9670
9671         return 0;
9672 }
9673
9674 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
9675 {
9676         struct hclge_vport *vport = hclge_get_vport(handle);
9677         struct hclge_dev *hdev = vport->back;
9678         u32 new_trusted = enable ? 1 : 0;
9679         bool en_bc_pmc;
9680         int ret;
9681
9682         vport = hclge_get_vf_vport(hdev, vf);
9683         if (!vport)
9684                 return -EINVAL;
9685
9686         if (vport->vf_info.trusted == new_trusted)
9687                 return 0;
9688
9689         /* Disable promisc mode for VF if it is not trusted any more. */
9690         if (!enable && vport->vf_info.promisc_enable) {
9691                 en_bc_pmc = hdev->pdev->revision != 0x20;
9692                 ret = hclge_set_vport_promisc_mode(vport, false, false,
9693                                                    en_bc_pmc);
9694                 if (ret)
9695                         return ret;
9696                 vport->vf_info.promisc_enable = 0;
9697                 hclge_inform_vf_promisc_info(vport);
9698         }
9699
9700         vport->vf_info.trusted = new_trusted;
9701
9702         return 0;
9703 }
9704
9705 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
9706 {
9707         int ret;
9708         int vf;
9709
9710         /* reset vf rate to default value */
9711         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9712                 struct hclge_vport *vport = &hdev->vport[vf];
9713
9714                 vport->vf_info.max_tx_rate = 0;
9715                 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
9716                 if (ret)
9717                         dev_err(&hdev->pdev->dev,
9718                                 "vf%d failed to reset to default, ret=%d\n",
9719                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
9720         }
9721 }
9722
9723 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
9724                                      int min_tx_rate, int max_tx_rate)
9725 {
9726         if (min_tx_rate != 0 ||
9727             max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
9728                 dev_err(&hdev->pdev->dev,
9729                         "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
9730                         min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
9731                 return -EINVAL;
9732         }
9733
9734         return 0;
9735 }
9736
9737 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
9738                              int min_tx_rate, int max_tx_rate, bool force)
9739 {
9740         struct hclge_vport *vport = hclge_get_vport(handle);
9741         struct hclge_dev *hdev = vport->back;
9742         int ret;
9743
9744         ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
9745         if (ret)
9746                 return ret;
9747
9748         vport = hclge_get_vf_vport(hdev, vf);
9749         if (!vport)
9750                 return -EINVAL;
9751
9752         if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
9753                 return 0;
9754
9755         ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
9756         if (ret)
9757                 return ret;
9758
9759         vport->vf_info.max_tx_rate = max_tx_rate;
9760
9761         return 0;
9762 }
9763
9764 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
9765 {
9766         struct hnae3_handle *handle = &hdev->vport->nic;
9767         struct hclge_vport *vport;
9768         int ret;
9769         int vf;
9770
9771         /* resume the vf max_tx_rate after reset */
9772         for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
9773                 vport = hclge_get_vf_vport(hdev, vf);
9774                 if (!vport)
9775                         return -EINVAL;
9776
9777                 /* zero means max rate, after reset, firmware already set it to
9778                  * max rate, so just continue.
9779                  */
9780                 if (!vport->vf_info.max_tx_rate)
9781                         continue;
9782
9783                 ret = hclge_set_vf_rate(handle, vf, 0,
9784                                         vport->vf_info.max_tx_rate, true);
9785                 if (ret) {
9786                         dev_err(&hdev->pdev->dev,
9787                                 "vf%d failed to resume tx_rate:%u, ret=%d\n",
9788                                 vf, vport->vf_info.max_tx_rate, ret);
9789                         return ret;
9790                 }
9791         }
9792
9793         return 0;
9794 }
9795
9796 static void hclge_reset_vport_state(struct hclge_dev *hdev)
9797 {
9798         struct hclge_vport *vport = hdev->vport;
9799         int i;
9800
9801         for (i = 0; i < hdev->num_alloc_vport; i++) {
9802                 hclge_vport_stop(vport);
9803                 vport++;
9804         }
9805 }
9806
9807 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
9808 {
9809         struct hclge_dev *hdev = ae_dev->priv;
9810         struct pci_dev *pdev = ae_dev->pdev;
9811         int ret;
9812
9813         set_bit(HCLGE_STATE_DOWN, &hdev->state);
9814
9815         hclge_stats_clear(hdev);
9816         memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
9817         memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
9818
9819         ret = hclge_cmd_init(hdev);
9820         if (ret) {
9821                 dev_err(&pdev->dev, "Cmd queue init failed\n");
9822                 return ret;
9823         }
9824
9825         ret = hclge_map_tqp(hdev);
9826         if (ret) {
9827                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9828                 return ret;
9829         }
9830
9831         hclge_reset_umv_space(hdev);
9832
9833         ret = hclge_mac_init(hdev);
9834         if (ret) {
9835                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9836                 return ret;
9837         }
9838
9839         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9840         if (ret) {
9841                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9842                 return ret;
9843         }
9844
9845         ret = hclge_config_gro(hdev, true);
9846         if (ret)
9847                 return ret;
9848
9849         ret = hclge_init_vlan_config(hdev);
9850         if (ret) {
9851                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9852                 return ret;
9853         }
9854
9855         ret = hclge_tm_init_hw(hdev, true);
9856         if (ret) {
9857                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
9858                 return ret;
9859         }
9860
9861         ret = hclge_rss_init_hw(hdev);
9862         if (ret) {
9863                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9864                 return ret;
9865         }
9866
9867         ret = hclge_init_fd_config(hdev);
9868         if (ret) {
9869                 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
9870                 return ret;
9871         }
9872
9873         /* Log and clear the hw errors those already occurred */
9874         hclge_handle_all_hns_hw_errors(ae_dev);
9875
9876         /* Re-enable the hw error interrupts because
9877          * the interrupts get disabled on global reset.
9878          */
9879         ret = hclge_config_nic_hw_error(hdev, true);
9880         if (ret) {
9881                 dev_err(&pdev->dev,
9882                         "fail(%d) to re-enable NIC hw error interrupts\n",
9883                         ret);
9884                 return ret;
9885         }
9886
9887         if (hdev->roce_client) {
9888                 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9889                 if (ret) {
9890                         dev_err(&pdev->dev,
9891                                 "fail(%d) to re-enable roce ras interrupts\n",
9892                                 ret);
9893                         return ret;
9894                 }
9895         }
9896
9897         hclge_reset_vport_state(hdev);
9898         ret = hclge_reset_vport_spoofchk(hdev);
9899         if (ret)
9900                 return ret;
9901
9902         ret = hclge_resume_vf_rate(hdev);
9903         if (ret)
9904                 return ret;
9905
9906         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
9907                  HCLGE_DRIVER_NAME);
9908
9909         return 0;
9910 }
9911
9912 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
9913 {
9914         struct hclge_dev *hdev = ae_dev->priv;
9915         struct hclge_mac *mac = &hdev->hw.mac;
9916
9917         hclge_reset_vf_rate(hdev);
9918         hclge_misc_affinity_teardown(hdev);
9919         hclge_state_uninit(hdev);
9920
9921         if (mac->phydev)
9922                 mdiobus_unregister(mac->mdio_bus);
9923
9924         hclge_uninit_umv_space(hdev);
9925
9926         /* Disable MISC vector(vector0) */
9927         hclge_enable_vector(&hdev->misc_vector, false);
9928         synchronize_irq(hdev->misc_vector.vector_irq);
9929
9930         /* Disable all hw interrupts */
9931         hclge_config_mac_tnl_int(hdev, false);
9932         hclge_config_nic_hw_error(hdev, false);
9933         hclge_config_rocee_ras_interrupt(hdev, false);
9934
9935         hclge_cmd_uninit(hdev);
9936         hclge_misc_irq_uninit(hdev);
9937         hclge_pci_uninit(hdev);
9938         mutex_destroy(&hdev->vport_lock);
9939         hclge_uninit_vport_mac_table(hdev);
9940         hclge_uninit_vport_vlan_table(hdev);
9941         mutex_destroy(&hdev->vport_cfg_mutex);
9942         ae_dev->priv = NULL;
9943 }
9944
9945 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9946 {
9947         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9948         struct hclge_vport *vport = hclge_get_vport(handle);
9949         struct hclge_dev *hdev = vport->back;
9950
9951         return min_t(u32, hdev->rss_size_max,
9952                      vport->alloc_tqps / kinfo->num_tc);
9953 }
9954
9955 static void hclge_get_channels(struct hnae3_handle *handle,
9956                                struct ethtool_channels *ch)
9957 {
9958         ch->max_combined = hclge_get_max_channels(handle);
9959         ch->other_count = 1;
9960         ch->max_other = 1;
9961         ch->combined_count = handle->kinfo.rss_size;
9962 }
9963
9964 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9965                                         u16 *alloc_tqps, u16 *max_rss_size)
9966 {
9967         struct hclge_vport *vport = hclge_get_vport(handle);
9968         struct hclge_dev *hdev = vport->back;
9969
9970         *alloc_tqps = vport->alloc_tqps;
9971         *max_rss_size = hdev->rss_size_max;
9972 }
9973
9974 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9975                               bool rxfh_configured)
9976 {
9977         struct hclge_vport *vport = hclge_get_vport(handle);
9978         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9979         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
9980         struct hclge_dev *hdev = vport->back;
9981         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
9982         u16 cur_rss_size = kinfo->rss_size;
9983         u16 cur_tqps = kinfo->num_tqps;
9984         u16 tc_valid[HCLGE_MAX_TC_NUM];
9985         u16 roundup_size;
9986         u32 *rss_indir;
9987         unsigned int i;
9988         int ret;
9989
9990         kinfo->req_rss_size = new_tqps_num;
9991
9992         ret = hclge_tm_vport_map_update(hdev);
9993         if (ret) {
9994                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
9995                 return ret;
9996         }
9997
9998         roundup_size = roundup_pow_of_two(kinfo->rss_size);
9999         roundup_size = ilog2(roundup_size);
10000         /* Set the RSS TC mode according to the new RSS size */
10001         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
10002                 tc_valid[i] = 0;
10003
10004                 if (!(hdev->hw_tc_map & BIT(i)))
10005                         continue;
10006
10007                 tc_valid[i] = 1;
10008                 tc_size[i] = roundup_size;
10009                 tc_offset[i] = kinfo->rss_size * i;
10010         }
10011         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
10012         if (ret)
10013                 return ret;
10014
10015         /* RSS indirection table has been configuared by user */
10016         if (rxfh_configured)
10017                 goto out;
10018
10019         /* Reinitializes the rss indirect table according to the new RSS size */
10020         rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10021         if (!rss_indir)
10022                 return -ENOMEM;
10023
10024         for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10025                 rss_indir[i] = i % kinfo->rss_size;
10026
10027         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10028         if (ret)
10029                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10030                         ret);
10031
10032         kfree(rss_indir);
10033
10034 out:
10035         if (!ret)
10036                 dev_info(&hdev->pdev->dev,
10037                          "Channels changed, rss_size from %u to %u, tqps from %u to %u",
10038                          cur_rss_size, kinfo->rss_size,
10039                          cur_tqps, kinfo->rss_size * kinfo->num_tc);
10040
10041         return ret;
10042 }
10043
10044 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10045                               u32 *regs_num_64_bit)
10046 {
10047         struct hclge_desc desc;
10048         u32 total_num;
10049         int ret;
10050
10051         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10052         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10053         if (ret) {
10054                 dev_err(&hdev->pdev->dev,
10055                         "Query register number cmd failed, ret = %d.\n", ret);
10056                 return ret;
10057         }
10058
10059         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
10060         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
10061
10062         total_num = *regs_num_32_bit + *regs_num_64_bit;
10063         if (!total_num)
10064                 return -EINVAL;
10065
10066         return 0;
10067 }
10068
10069 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10070                                  void *data)
10071 {
10072 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10073 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10074
10075         struct hclge_desc *desc;
10076         u32 *reg_val = data;
10077         __le32 *desc_data;
10078         int nodata_num;
10079         int cmd_num;
10080         int i, k, n;
10081         int ret;
10082
10083         if (regs_num == 0)
10084                 return 0;
10085
10086         nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10087         cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10088                                HCLGE_32_BIT_REG_RTN_DATANUM);
10089         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10090         if (!desc)
10091                 return -ENOMEM;
10092
10093         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10094         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10095         if (ret) {
10096                 dev_err(&hdev->pdev->dev,
10097                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
10098                 kfree(desc);
10099                 return ret;
10100         }
10101
10102         for (i = 0; i < cmd_num; i++) {
10103                 if (i == 0) {
10104                         desc_data = (__le32 *)(&desc[i].data[0]);
10105                         n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10106                 } else {
10107                         desc_data = (__le32 *)(&desc[i]);
10108                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
10109                 }
10110                 for (k = 0; k < n; k++) {
10111                         *reg_val++ = le32_to_cpu(*desc_data++);
10112
10113                         regs_num--;
10114                         if (!regs_num)
10115                                 break;
10116                 }
10117         }
10118
10119         kfree(desc);
10120         return 0;
10121 }
10122
10123 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10124                                  void *data)
10125 {
10126 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10127 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10128
10129         struct hclge_desc *desc;
10130         u64 *reg_val = data;
10131         __le64 *desc_data;
10132         int nodata_len;
10133         int cmd_num;
10134         int i, k, n;
10135         int ret;
10136
10137         if (regs_num == 0)
10138                 return 0;
10139
10140         nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10141         cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10142                                HCLGE_64_BIT_REG_RTN_DATANUM);
10143         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10144         if (!desc)
10145                 return -ENOMEM;
10146
10147         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10148         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10149         if (ret) {
10150                 dev_err(&hdev->pdev->dev,
10151                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
10152                 kfree(desc);
10153                 return ret;
10154         }
10155
10156         for (i = 0; i < cmd_num; i++) {
10157                 if (i == 0) {
10158                         desc_data = (__le64 *)(&desc[i].data[0]);
10159                         n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10160                 } else {
10161                         desc_data = (__le64 *)(&desc[i]);
10162                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
10163                 }
10164                 for (k = 0; k < n; k++) {
10165                         *reg_val++ = le64_to_cpu(*desc_data++);
10166
10167                         regs_num--;
10168                         if (!regs_num)
10169                                 break;
10170                 }
10171         }
10172
10173         kfree(desc);
10174         return 0;
10175 }
10176
10177 #define MAX_SEPARATE_NUM        4
10178 #define SEPARATOR_VALUE         0xFDFCFBFA
10179 #define REG_NUM_PER_LINE        4
10180 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
10181 #define REG_SEPARATOR_LINE      1
10182 #define REG_NUM_REMAIN_MASK     3
10183 #define BD_LIST_MAX_NUM         30
10184
10185 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10186 {
10187         /*prepare 4 commands to query DFX BD number*/
10188         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
10189         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10190         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
10191         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10192         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
10193         desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10194         hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
10195
10196         return hclge_cmd_send(&hdev->hw, desc, 4);
10197 }
10198
10199 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10200                                     int *bd_num_list,
10201                                     u32 type_num)
10202 {
10203 #define HCLGE_DFX_REG_BD_NUM    4
10204
10205         u32 entries_per_desc, desc_index, index, offset, i;
10206         struct hclge_desc desc[HCLGE_DFX_REG_BD_NUM];
10207         int ret;
10208
10209         ret = hclge_query_bd_num_cmd_send(hdev, desc);
10210         if (ret) {
10211                 dev_err(&hdev->pdev->dev,
10212                         "Get dfx bd num fail, status is %d.\n", ret);
10213                 return ret;
10214         }
10215
10216         entries_per_desc = ARRAY_SIZE(desc[0].data);
10217         for (i = 0; i < type_num; i++) {
10218                 offset = hclge_dfx_bd_offset_list[i];
10219                 index = offset % entries_per_desc;
10220                 desc_index = offset / entries_per_desc;
10221                 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10222         }
10223
10224         return ret;
10225 }
10226
10227 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10228                                   struct hclge_desc *desc_src, int bd_num,
10229                                   enum hclge_opcode_type cmd)
10230 {
10231         struct hclge_desc *desc = desc_src;
10232         int i, ret;
10233
10234         hclge_cmd_setup_basic_desc(desc, cmd, true);
10235         for (i = 0; i < bd_num - 1; i++) {
10236                 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10237                 desc++;
10238                 hclge_cmd_setup_basic_desc(desc, cmd, true);
10239         }
10240
10241         desc = desc_src;
10242         ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10243         if (ret)
10244                 dev_err(&hdev->pdev->dev,
10245                         "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10246                         cmd, ret);
10247
10248         return ret;
10249 }
10250
10251 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10252                                     void *data)
10253 {
10254         int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10255         struct hclge_desc *desc = desc_src;
10256         u32 *reg = data;
10257
10258         entries_per_desc = ARRAY_SIZE(desc->data);
10259         reg_num = entries_per_desc * bd_num;
10260         separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10261         for (i = 0; i < reg_num; i++) {
10262                 index = i % entries_per_desc;
10263                 desc_index = i / entries_per_desc;
10264                 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
10265         }
10266         for (i = 0; i < separator_num; i++)
10267                 *reg++ = SEPARATOR_VALUE;
10268
10269         return reg_num + separator_num;
10270 }
10271
10272 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10273 {
10274         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10275         int data_len_per_desc, data_len, bd_num, i;
10276         int bd_num_list[BD_LIST_MAX_NUM];
10277         int ret;
10278
10279         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10280         if (ret) {
10281                 dev_err(&hdev->pdev->dev,
10282                         "Get dfx reg bd num fail, status is %d.\n", ret);
10283                 return ret;
10284         }
10285
10286         data_len_per_desc = FIELD_SIZEOF(struct hclge_desc, data);
10287         *len = 0;
10288         for (i = 0; i < dfx_reg_type_num; i++) {
10289                 bd_num = bd_num_list[i];
10290                 data_len = data_len_per_desc * bd_num;
10291                 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10292         }
10293
10294         return ret;
10295 }
10296
10297 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10298 {
10299         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10300         int bd_num, bd_num_max, buf_len, i;
10301         int bd_num_list[BD_LIST_MAX_NUM];
10302         struct hclge_desc *desc_src;
10303         u32 *reg = data;
10304         int ret;
10305
10306         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10307         if (ret) {
10308                 dev_err(&hdev->pdev->dev,
10309                         "Get dfx reg bd num fail, status is %d.\n", ret);
10310                 return ret;
10311         }
10312
10313         bd_num_max = bd_num_list[0];
10314         for (i = 1; i < dfx_reg_type_num; i++)
10315                 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10316
10317         buf_len = sizeof(*desc_src) * bd_num_max;
10318         desc_src = kzalloc(buf_len, GFP_KERNEL);
10319         if (!desc_src) {
10320                 dev_err(&hdev->pdev->dev, "%s kzalloc failed\n", __func__);
10321                 return -ENOMEM;
10322         }
10323
10324         for (i = 0; i < dfx_reg_type_num; i++) {
10325                 bd_num = bd_num_list[i];
10326                 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10327                                              hclge_dfx_reg_opcode_list[i]);
10328                 if (ret) {
10329                         dev_err(&hdev->pdev->dev,
10330                                 "Get dfx reg fail, status is %d.\n", ret);
10331                         break;
10332                 }
10333
10334                 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
10335         }
10336
10337         kfree(desc_src);
10338         return ret;
10339 }
10340
10341 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
10342                               struct hnae3_knic_private_info *kinfo)
10343 {
10344 #define HCLGE_RING_REG_OFFSET           0x200
10345 #define HCLGE_RING_INT_REG_OFFSET       0x4
10346
10347         int i, j, reg_num, separator_num;
10348         int data_num_sum;
10349         u32 *reg = data;
10350
10351         /* fetching per-PF registers valus from PF PCIe register space */
10352         reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
10353         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10354         for (i = 0; i < reg_num; i++)
10355                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
10356         for (i = 0; i < separator_num; i++)
10357                 *reg++ = SEPARATOR_VALUE;
10358         data_num_sum = reg_num + separator_num;
10359
10360         reg_num = ARRAY_SIZE(common_reg_addr_list);
10361         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10362         for (i = 0; i < reg_num; i++)
10363                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
10364         for (i = 0; i < separator_num; i++)
10365                 *reg++ = SEPARATOR_VALUE;
10366         data_num_sum += reg_num + separator_num;
10367
10368         reg_num = ARRAY_SIZE(ring_reg_addr_list);
10369         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10370         for (j = 0; j < kinfo->num_tqps; j++) {
10371                 for (i = 0; i < reg_num; i++)
10372                         *reg++ = hclge_read_dev(&hdev->hw,
10373                                                 ring_reg_addr_list[i] +
10374                                                 HCLGE_RING_REG_OFFSET * j);
10375                 for (i = 0; i < separator_num; i++)
10376                         *reg++ = SEPARATOR_VALUE;
10377         }
10378         data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
10379
10380         reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
10381         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10382         for (j = 0; j < hdev->num_msi_used - 1; j++) {
10383                 for (i = 0; i < reg_num; i++)
10384                         *reg++ = hclge_read_dev(&hdev->hw,
10385                                                 tqp_intr_reg_addr_list[i] +
10386                                                 HCLGE_RING_INT_REG_OFFSET * j);
10387                 for (i = 0; i < separator_num; i++)
10388                         *reg++ = SEPARATOR_VALUE;
10389         }
10390         data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
10391
10392         return data_num_sum;
10393 }
10394
10395 static int hclge_get_regs_len(struct hnae3_handle *handle)
10396 {
10397         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
10398         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10399         struct hclge_vport *vport = hclge_get_vport(handle);
10400         struct hclge_dev *hdev = vport->back;
10401         int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
10402         int regs_lines_32_bit, regs_lines_64_bit;
10403         int ret;
10404
10405         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10406         if (ret) {
10407                 dev_err(&hdev->pdev->dev,
10408                         "Get register number failed, ret = %d.\n", ret);
10409                 return ret;
10410         }
10411
10412         ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
10413         if (ret) {
10414                 dev_err(&hdev->pdev->dev,
10415                         "Get dfx reg len failed, ret = %d.\n", ret);
10416                 return ret;
10417         }
10418
10419         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
10420                 REG_SEPARATOR_LINE;
10421         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
10422                 REG_SEPARATOR_LINE;
10423         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
10424                 REG_SEPARATOR_LINE;
10425         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
10426                 REG_SEPARATOR_LINE;
10427         regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
10428                 REG_SEPARATOR_LINE;
10429         regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
10430                 REG_SEPARATOR_LINE;
10431
10432         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
10433                 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
10434                 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
10435 }
10436
10437 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
10438                            void *data)
10439 {
10440         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10441         struct hclge_vport *vport = hclge_get_vport(handle);
10442         struct hclge_dev *hdev = vport->back;
10443         u32 regs_num_32_bit, regs_num_64_bit;
10444         int i, reg_num, separator_num, ret;
10445         u32 *reg = data;
10446
10447         *version = hdev->fw_version;
10448
10449         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
10450         if (ret) {
10451                 dev_err(&hdev->pdev->dev,
10452                         "Get register number failed, ret = %d.\n", ret);
10453                 return;
10454         }
10455
10456         reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
10457
10458         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
10459         if (ret) {
10460                 dev_err(&hdev->pdev->dev,
10461                         "Get 32 bit register failed, ret = %d.\n", ret);
10462                 return;
10463         }
10464         reg_num = regs_num_32_bit;
10465         reg += reg_num;
10466         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10467         for (i = 0; i < separator_num; i++)
10468                 *reg++ = SEPARATOR_VALUE;
10469
10470         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
10471         if (ret) {
10472                 dev_err(&hdev->pdev->dev,
10473                         "Get 64 bit register failed, ret = %d.\n", ret);
10474                 return;
10475         }
10476         reg_num = regs_num_64_bit * 2;
10477         reg += reg_num;
10478         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10479         for (i = 0; i < separator_num; i++)
10480                 *reg++ = SEPARATOR_VALUE;
10481
10482         ret = hclge_get_dfx_reg(hdev, reg);
10483         if (ret)
10484                 dev_err(&hdev->pdev->dev,
10485                         "Get dfx register failed, ret = %d.\n", ret);
10486 }
10487
10488 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
10489 {
10490         struct hclge_set_led_state_cmd *req;
10491         struct hclge_desc desc;
10492         int ret;
10493
10494         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
10495
10496         req = (struct hclge_set_led_state_cmd *)desc.data;
10497         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
10498                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
10499
10500         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10501         if (ret)
10502                 dev_err(&hdev->pdev->dev,
10503                         "Send set led state cmd error, ret =%d\n", ret);
10504
10505         return ret;
10506 }
10507
10508 enum hclge_led_status {
10509         HCLGE_LED_OFF,
10510         HCLGE_LED_ON,
10511         HCLGE_LED_NO_CHANGE = 0xFF,
10512 };
10513
10514 static int hclge_set_led_id(struct hnae3_handle *handle,
10515                             enum ethtool_phys_id_state status)
10516 {
10517         struct hclge_vport *vport = hclge_get_vport(handle);
10518         struct hclge_dev *hdev = vport->back;
10519
10520         switch (status) {
10521         case ETHTOOL_ID_ACTIVE:
10522                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
10523         case ETHTOOL_ID_INACTIVE:
10524                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
10525         default:
10526                 return -EINVAL;
10527         }
10528 }
10529
10530 static void hclge_get_link_mode(struct hnae3_handle *handle,
10531                                 unsigned long *supported,
10532                                 unsigned long *advertising)
10533 {
10534         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
10535         struct hclge_vport *vport = hclge_get_vport(handle);
10536         struct hclge_dev *hdev = vport->back;
10537         unsigned int idx = 0;
10538
10539         for (; idx < size; idx++) {
10540                 supported[idx] = hdev->hw.mac.supported[idx];
10541                 advertising[idx] = hdev->hw.mac.advertising[idx];
10542         }
10543 }
10544
10545 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
10546 {
10547         struct hclge_vport *vport = hclge_get_vport(handle);
10548         struct hclge_dev *hdev = vport->back;
10549
10550         return hclge_config_gro(hdev, enable);
10551 }
10552
10553 static const struct hnae3_ae_ops hclge_ops = {
10554         .init_ae_dev = hclge_init_ae_dev,
10555         .uninit_ae_dev = hclge_uninit_ae_dev,
10556         .flr_prepare = hclge_flr_prepare,
10557         .flr_done = hclge_flr_done,
10558         .init_client_instance = hclge_init_client_instance,
10559         .uninit_client_instance = hclge_uninit_client_instance,
10560         .map_ring_to_vector = hclge_map_ring_to_vector,
10561         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
10562         .get_vector = hclge_get_vector,
10563         .put_vector = hclge_put_vector,
10564         .set_promisc_mode = hclge_set_promisc_mode,
10565         .set_loopback = hclge_set_loopback,
10566         .start = hclge_ae_start,
10567         .stop = hclge_ae_stop,
10568         .client_start = hclge_client_start,
10569         .client_stop = hclge_client_stop,
10570         .get_status = hclge_get_status,
10571         .get_ksettings_an_result = hclge_get_ksettings_an_result,
10572         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
10573         .get_media_type = hclge_get_media_type,
10574         .check_port_speed = hclge_check_port_speed,
10575         .get_fec = hclge_get_fec,
10576         .set_fec = hclge_set_fec,
10577         .get_rss_key_size = hclge_get_rss_key_size,
10578         .get_rss_indir_size = hclge_get_rss_indir_size,
10579         .get_rss = hclge_get_rss,
10580         .set_rss = hclge_set_rss,
10581         .set_rss_tuple = hclge_set_rss_tuple,
10582         .get_rss_tuple = hclge_get_rss_tuple,
10583         .get_tc_size = hclge_get_tc_size,
10584         .get_mac_addr = hclge_get_mac_addr,
10585         .set_mac_addr = hclge_set_mac_addr,
10586         .do_ioctl = hclge_do_ioctl,
10587         .add_uc_addr = hclge_add_uc_addr,
10588         .rm_uc_addr = hclge_rm_uc_addr,
10589         .add_mc_addr = hclge_add_mc_addr,
10590         .rm_mc_addr = hclge_rm_mc_addr,
10591         .set_autoneg = hclge_set_autoneg,
10592         .get_autoneg = hclge_get_autoneg,
10593         .restart_autoneg = hclge_restart_autoneg,
10594         .halt_autoneg = hclge_halt_autoneg,
10595         .get_pauseparam = hclge_get_pauseparam,
10596         .set_pauseparam = hclge_set_pauseparam,
10597         .set_mtu = hclge_set_mtu,
10598         .reset_queue = hclge_reset_tqp,
10599         .get_stats = hclge_get_stats,
10600         .get_mac_stats = hclge_get_mac_stat,
10601         .update_stats = hclge_update_stats,
10602         .get_strings = hclge_get_strings,
10603         .get_sset_count = hclge_get_sset_count,
10604         .get_fw_version = hclge_get_fw_version,
10605         .get_mdix_mode = hclge_get_mdix_mode,
10606         .enable_vlan_filter = hclge_enable_vlan_filter,
10607         .set_vlan_filter = hclge_set_vlan_filter,
10608         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
10609         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
10610         .reset_event = hclge_reset_event,
10611         .get_reset_level = hclge_get_reset_level,
10612         .set_default_reset_request = hclge_set_def_reset_request,
10613         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
10614         .set_channels = hclge_set_channels,
10615         .get_channels = hclge_get_channels,
10616         .get_regs_len = hclge_get_regs_len,
10617         .get_regs = hclge_get_regs,
10618         .set_led_id = hclge_set_led_id,
10619         .get_link_mode = hclge_get_link_mode,
10620         .add_fd_entry = hclge_add_fd_entry,
10621         .del_fd_entry = hclge_del_fd_entry,
10622         .del_all_fd_entries = hclge_del_all_fd_entries,
10623         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
10624         .get_fd_rule_info = hclge_get_fd_rule_info,
10625         .get_fd_all_rules = hclge_get_all_rules,
10626         .restore_fd_rules = hclge_restore_fd_entries,
10627         .enable_fd = hclge_enable_fd,
10628         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
10629         .dbg_run_cmd = hclge_dbg_run_cmd,
10630         .handle_hw_ras_error = hclge_handle_hw_ras_error,
10631         .get_hw_reset_stat = hclge_get_hw_reset_stat,
10632         .ae_dev_resetting = hclge_ae_dev_resetting,
10633         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
10634         .set_gro_en = hclge_gro_en,
10635         .get_global_queue_id = hclge_covert_handle_qid_global,
10636         .set_timer_task = hclge_set_timer_task,
10637         .mac_connect_phy = hclge_mac_connect_phy,
10638         .mac_disconnect_phy = hclge_mac_disconnect_phy,
10639         .restore_vlan_table = hclge_restore_vlan_table,
10640         .get_vf_config = hclge_get_vf_config,
10641         .set_vf_link_state = hclge_set_vf_link_state,
10642         .set_vf_spoofchk = hclge_set_vf_spoofchk,
10643         .set_vf_trust = hclge_set_vf_trust,
10644         .set_vf_rate = hclge_set_vf_rate,
10645         .set_vf_mac = hclge_set_vf_mac,
10646 };
10647
10648 static struct hnae3_ae_algo ae_algo = {
10649         .ops = &hclge_ops,
10650         .pdev_id_table = ae_algo_pci_tbl,
10651 };
10652
10653 static int hclge_init(void)
10654 {
10655         pr_info("%s is initializing\n", HCLGE_NAME);
10656
10657         hclge_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGE_NAME);
10658         if (!hclge_wq) {
10659                 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
10660                 return -ENOMEM;
10661         }
10662
10663         hnae3_register_ae_algo(&ae_algo);
10664
10665         return 0;
10666 }
10667
10668 static void hclge_exit(void)
10669 {
10670         hnae3_unregister_ae_algo(&ae_algo);
10671         destroy_workqueue(hclge_wq);
10672 }
10673 module_init(hclge_init);
10674 module_exit(hclge_exit);
10675
10676 MODULE_LICENSE("GPL");
10677 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
10678 MODULE_DESCRIPTION("HCLGE Driver");
10679 MODULE_VERSION(HCLGE_MOD_VERSION);