ARM: 9148/1: handle CONFIG_CPU_ENDIAN_BE32 in arch/arm/kernel/head.S
[platform/kernel/linux-rpi.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/ipv6.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
23 #include "hclge_tm.h"
24 #include "hclge_err.h"
25 #include "hnae3.h"
26 #include "hclge_devlink.h"
27
28 #define HCLGE_NAME                      "hclge"
29 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
30 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
31
32 #define HCLGE_BUF_SIZE_UNIT     256U
33 #define HCLGE_BUF_MUL_BY        2
34 #define HCLGE_BUF_DIV_BY        2
35 #define NEED_RESERVE_TC_NUM     2
36 #define BUF_MAX_PERCENT         100
37 #define BUF_RESERVE_PERCENT     90
38
39 #define HCLGE_RESET_MAX_FAIL_CNT        5
40 #define HCLGE_RESET_SYNC_TIME           100
41 #define HCLGE_PF_RESET_SYNC_TIME        20
42 #define HCLGE_PF_RESET_SYNC_CNT         1500
43
44 /* Get DFX BD number offset */
45 #define HCLGE_DFX_BIOS_BD_OFFSET        1
46 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
47 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
48 #define HCLGE_DFX_IGU_BD_OFFSET         4
49 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
50 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
51 #define HCLGE_DFX_NCSI_BD_OFFSET        7
52 #define HCLGE_DFX_RTC_BD_OFFSET         8
53 #define HCLGE_DFX_PPP_BD_OFFSET         9
54 #define HCLGE_DFX_RCB_BD_OFFSET         10
55 #define HCLGE_DFX_TQP_BD_OFFSET         11
56 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
57
58 #define HCLGE_LINK_STATUS_MS    10
59
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68                                                    unsigned long *addr);
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70
71 static void hclge_sync_mac_table(struct hclge_dev *hdev);
72 static void hclge_restore_hw_table(struct hclge_dev *hdev);
73 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
74 static void hclge_sync_fd_table(struct hclge_dev *hdev);
75
76 static struct hnae3_ae_algo ae_algo;
77
78 static struct workqueue_struct *hclge_wq;
79
80 static const struct pci_device_id ae_algo_pci_tbl[] = {
81         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
82         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
83         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
84         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
85         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
86         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
87         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
88         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
89         /* required last entry */
90         {0, }
91 };
92
93 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
94
95 static const u32 cmdq_reg_addr_list[] = {HCLGE_NIC_CSQ_BASEADDR_L_REG,
96                                          HCLGE_NIC_CSQ_BASEADDR_H_REG,
97                                          HCLGE_NIC_CSQ_DEPTH_REG,
98                                          HCLGE_NIC_CSQ_TAIL_REG,
99                                          HCLGE_NIC_CSQ_HEAD_REG,
100                                          HCLGE_NIC_CRQ_BASEADDR_L_REG,
101                                          HCLGE_NIC_CRQ_BASEADDR_H_REG,
102                                          HCLGE_NIC_CRQ_DEPTH_REG,
103                                          HCLGE_NIC_CRQ_TAIL_REG,
104                                          HCLGE_NIC_CRQ_HEAD_REG,
105                                          HCLGE_VECTOR0_CMDQ_SRC_REG,
106                                          HCLGE_CMDQ_INTR_STS_REG,
107                                          HCLGE_CMDQ_INTR_EN_REG,
108                                          HCLGE_CMDQ_INTR_GEN_REG};
109
110 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
111                                            HCLGE_PF_OTHER_INT_REG,
112                                            HCLGE_MISC_RESET_STS_REG,
113                                            HCLGE_MISC_VECTOR_INT_STS,
114                                            HCLGE_GLOBAL_RESET_REG,
115                                            HCLGE_FUN_RST_ING,
116                                            HCLGE_GRO_EN_REG};
117
118 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
119                                          HCLGE_RING_RX_ADDR_H_REG,
120                                          HCLGE_RING_RX_BD_NUM_REG,
121                                          HCLGE_RING_RX_BD_LENGTH_REG,
122                                          HCLGE_RING_RX_MERGE_EN_REG,
123                                          HCLGE_RING_RX_TAIL_REG,
124                                          HCLGE_RING_RX_HEAD_REG,
125                                          HCLGE_RING_RX_FBD_NUM_REG,
126                                          HCLGE_RING_RX_OFFSET_REG,
127                                          HCLGE_RING_RX_FBD_OFFSET_REG,
128                                          HCLGE_RING_RX_STASH_REG,
129                                          HCLGE_RING_RX_BD_ERR_REG,
130                                          HCLGE_RING_TX_ADDR_L_REG,
131                                          HCLGE_RING_TX_ADDR_H_REG,
132                                          HCLGE_RING_TX_BD_NUM_REG,
133                                          HCLGE_RING_TX_PRIORITY_REG,
134                                          HCLGE_RING_TX_TC_REG,
135                                          HCLGE_RING_TX_MERGE_EN_REG,
136                                          HCLGE_RING_TX_TAIL_REG,
137                                          HCLGE_RING_TX_HEAD_REG,
138                                          HCLGE_RING_TX_FBD_NUM_REG,
139                                          HCLGE_RING_TX_OFFSET_REG,
140                                          HCLGE_RING_TX_EBD_NUM_REG,
141                                          HCLGE_RING_TX_EBD_OFFSET_REG,
142                                          HCLGE_RING_TX_BD_ERR_REG,
143                                          HCLGE_RING_EN_REG};
144
145 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
146                                              HCLGE_TQP_INTR_GL0_REG,
147                                              HCLGE_TQP_INTR_GL1_REG,
148                                              HCLGE_TQP_INTR_GL2_REG,
149                                              HCLGE_TQP_INTR_RL_REG};
150
151 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
152         "App    Loopback test",
153         "Serdes serial Loopback test",
154         "Serdes parallel Loopback test",
155         "Phy    Loopback test"
156 };
157
158 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
159         {"mac_tx_mac_pause_num",
160                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
161         {"mac_rx_mac_pause_num",
162                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
163         {"mac_tx_control_pkt_num",
164                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
165         {"mac_rx_control_pkt_num",
166                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
167         {"mac_tx_pfc_pkt_num",
168                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
169         {"mac_tx_pfc_pri0_pkt_num",
170                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
171         {"mac_tx_pfc_pri1_pkt_num",
172                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
173         {"mac_tx_pfc_pri2_pkt_num",
174                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
175         {"mac_tx_pfc_pri3_pkt_num",
176                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
177         {"mac_tx_pfc_pri4_pkt_num",
178                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
179         {"mac_tx_pfc_pri5_pkt_num",
180                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
181         {"mac_tx_pfc_pri6_pkt_num",
182                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
183         {"mac_tx_pfc_pri7_pkt_num",
184                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
185         {"mac_rx_pfc_pkt_num",
186                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
187         {"mac_rx_pfc_pri0_pkt_num",
188                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
189         {"mac_rx_pfc_pri1_pkt_num",
190                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
191         {"mac_rx_pfc_pri2_pkt_num",
192                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
193         {"mac_rx_pfc_pri3_pkt_num",
194                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
195         {"mac_rx_pfc_pri4_pkt_num",
196                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
197         {"mac_rx_pfc_pri5_pkt_num",
198                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
199         {"mac_rx_pfc_pri6_pkt_num",
200                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
201         {"mac_rx_pfc_pri7_pkt_num",
202                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
203         {"mac_tx_total_pkt_num",
204                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
205         {"mac_tx_total_oct_num",
206                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
207         {"mac_tx_good_pkt_num",
208                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
209         {"mac_tx_bad_pkt_num",
210                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
211         {"mac_tx_good_oct_num",
212                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
213         {"mac_tx_bad_oct_num",
214                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
215         {"mac_tx_uni_pkt_num",
216                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
217         {"mac_tx_multi_pkt_num",
218                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
219         {"mac_tx_broad_pkt_num",
220                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
221         {"mac_tx_undersize_pkt_num",
222                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
223         {"mac_tx_oversize_pkt_num",
224                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
225         {"mac_tx_64_oct_pkt_num",
226                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
227         {"mac_tx_65_127_oct_pkt_num",
228                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
229         {"mac_tx_128_255_oct_pkt_num",
230                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
231         {"mac_tx_256_511_oct_pkt_num",
232                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
233         {"mac_tx_512_1023_oct_pkt_num",
234                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
235         {"mac_tx_1024_1518_oct_pkt_num",
236                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
237         {"mac_tx_1519_2047_oct_pkt_num",
238                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
239         {"mac_tx_2048_4095_oct_pkt_num",
240                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
241         {"mac_tx_4096_8191_oct_pkt_num",
242                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
243         {"mac_tx_8192_9216_oct_pkt_num",
244                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
245         {"mac_tx_9217_12287_oct_pkt_num",
246                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
247         {"mac_tx_12288_16383_oct_pkt_num",
248                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
249         {"mac_tx_1519_max_good_pkt_num",
250                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
251         {"mac_tx_1519_max_bad_pkt_num",
252                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
253         {"mac_rx_total_pkt_num",
254                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
255         {"mac_rx_total_oct_num",
256                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
257         {"mac_rx_good_pkt_num",
258                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
259         {"mac_rx_bad_pkt_num",
260                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
261         {"mac_rx_good_oct_num",
262                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
263         {"mac_rx_bad_oct_num",
264                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
265         {"mac_rx_uni_pkt_num",
266                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
267         {"mac_rx_multi_pkt_num",
268                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
269         {"mac_rx_broad_pkt_num",
270                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
271         {"mac_rx_undersize_pkt_num",
272                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
273         {"mac_rx_oversize_pkt_num",
274                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
275         {"mac_rx_64_oct_pkt_num",
276                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
277         {"mac_rx_65_127_oct_pkt_num",
278                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
279         {"mac_rx_128_255_oct_pkt_num",
280                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
281         {"mac_rx_256_511_oct_pkt_num",
282                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
283         {"mac_rx_512_1023_oct_pkt_num",
284                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
285         {"mac_rx_1024_1518_oct_pkt_num",
286                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
287         {"mac_rx_1519_2047_oct_pkt_num",
288                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
289         {"mac_rx_2048_4095_oct_pkt_num",
290                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
291         {"mac_rx_4096_8191_oct_pkt_num",
292                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
293         {"mac_rx_8192_9216_oct_pkt_num",
294                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
295         {"mac_rx_9217_12287_oct_pkt_num",
296                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
297         {"mac_rx_12288_16383_oct_pkt_num",
298                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
299         {"mac_rx_1519_max_good_pkt_num",
300                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
301         {"mac_rx_1519_max_bad_pkt_num",
302                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
303
304         {"mac_tx_fragment_pkt_num",
305                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
306         {"mac_tx_undermin_pkt_num",
307                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
308         {"mac_tx_jabber_pkt_num",
309                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
310         {"mac_tx_err_all_pkt_num",
311                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
312         {"mac_tx_from_app_good_pkt_num",
313                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
314         {"mac_tx_from_app_bad_pkt_num",
315                 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
316         {"mac_rx_fragment_pkt_num",
317                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
318         {"mac_rx_undermin_pkt_num",
319                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
320         {"mac_rx_jabber_pkt_num",
321                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
322         {"mac_rx_fcs_err_pkt_num",
323                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
324         {"mac_rx_send_app_good_pkt_num",
325                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
326         {"mac_rx_send_app_bad_pkt_num",
327                 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
328 };
329
330 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
331         {
332                 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
333                 .ethter_type = cpu_to_le16(ETH_P_LLDP),
334                 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
335                 .i_port_bitmap = 0x1,
336         },
337 };
338
339 static const u8 hclge_hash_key[] = {
340         0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
341         0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
342         0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
343         0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
344         0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
345 };
346
347 static const u32 hclge_dfx_bd_offset_list[] = {
348         HCLGE_DFX_BIOS_BD_OFFSET,
349         HCLGE_DFX_SSU_0_BD_OFFSET,
350         HCLGE_DFX_SSU_1_BD_OFFSET,
351         HCLGE_DFX_IGU_BD_OFFSET,
352         HCLGE_DFX_RPU_0_BD_OFFSET,
353         HCLGE_DFX_RPU_1_BD_OFFSET,
354         HCLGE_DFX_NCSI_BD_OFFSET,
355         HCLGE_DFX_RTC_BD_OFFSET,
356         HCLGE_DFX_PPP_BD_OFFSET,
357         HCLGE_DFX_RCB_BD_OFFSET,
358         HCLGE_DFX_TQP_BD_OFFSET,
359         HCLGE_DFX_SSU_2_BD_OFFSET
360 };
361
362 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
363         HCLGE_OPC_DFX_BIOS_COMMON_REG,
364         HCLGE_OPC_DFX_SSU_REG_0,
365         HCLGE_OPC_DFX_SSU_REG_1,
366         HCLGE_OPC_DFX_IGU_EGU_REG,
367         HCLGE_OPC_DFX_RPU_REG_0,
368         HCLGE_OPC_DFX_RPU_REG_1,
369         HCLGE_OPC_DFX_NCSI_REG,
370         HCLGE_OPC_DFX_RTC_REG,
371         HCLGE_OPC_DFX_PPP_REG,
372         HCLGE_OPC_DFX_RCB_REG,
373         HCLGE_OPC_DFX_TQP_REG,
374         HCLGE_OPC_DFX_SSU_REG_2
375 };
376
377 static const struct key_info meta_data_key_info[] = {
378         { PACKET_TYPE_ID, 6 },
379         { IP_FRAGEMENT, 1 },
380         { ROCE_TYPE, 1 },
381         { NEXT_KEY, 5 },
382         { VLAN_NUMBER, 2 },
383         { SRC_VPORT, 12 },
384         { DST_VPORT, 12 },
385         { TUNNEL_PACKET, 1 },
386 };
387
388 static const struct key_info tuple_key_info[] = {
389         { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
390         { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
391         { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
392         { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
393         { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
394         { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
395         { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
396         { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
397         { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
398         { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
399         { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
400         { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
401         { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
402         { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
403         { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
404         { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
405         { INNER_DST_MAC, 48, KEY_OPT_MAC,
406           offsetof(struct hclge_fd_rule, tuples.dst_mac),
407           offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
408         { INNER_SRC_MAC, 48, KEY_OPT_MAC,
409           offsetof(struct hclge_fd_rule, tuples.src_mac),
410           offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
411         { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
412           offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
413           offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
414         { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
415         { INNER_ETH_TYPE, 16, KEY_OPT_LE16,
416           offsetof(struct hclge_fd_rule, tuples.ether_proto),
417           offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
418         { INNER_L2_RSV, 16, KEY_OPT_LE16,
419           offsetof(struct hclge_fd_rule, tuples.l2_user_def),
420           offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
421         { INNER_IP_TOS, 8, KEY_OPT_U8,
422           offsetof(struct hclge_fd_rule, tuples.ip_tos),
423           offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
424         { INNER_IP_PROTO, 8, KEY_OPT_U8,
425           offsetof(struct hclge_fd_rule, tuples.ip_proto),
426           offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
427         { INNER_SRC_IP, 32, KEY_OPT_IP,
428           offsetof(struct hclge_fd_rule, tuples.src_ip),
429           offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
430         { INNER_DST_IP, 32, KEY_OPT_IP,
431           offsetof(struct hclge_fd_rule, tuples.dst_ip),
432           offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
433         { INNER_L3_RSV, 16, KEY_OPT_LE16,
434           offsetof(struct hclge_fd_rule, tuples.l3_user_def),
435           offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
436         { INNER_SRC_PORT, 16, KEY_OPT_LE16,
437           offsetof(struct hclge_fd_rule, tuples.src_port),
438           offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
439         { INNER_DST_PORT, 16, KEY_OPT_LE16,
440           offsetof(struct hclge_fd_rule, tuples.dst_port),
441           offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
442         { INNER_L4_RSV, 32, KEY_OPT_LE32,
443           offsetof(struct hclge_fd_rule, tuples.l4_user_def),
444           offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
445 };
446
447 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
448 {
449 #define HCLGE_MAC_CMD_NUM 21
450
451         u64 *data = (u64 *)(&hdev->mac_stats);
452         struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
453         __le64 *desc_data;
454         int i, k, n;
455         int ret;
456
457         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
458         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
459         if (ret) {
460                 dev_err(&hdev->pdev->dev,
461                         "Get MAC pkt stats fail, status = %d.\n", ret);
462
463                 return ret;
464         }
465
466         for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
467                 /* for special opcode 0032, only the first desc has the head */
468                 if (unlikely(i == 0)) {
469                         desc_data = (__le64 *)(&desc[i].data[0]);
470                         n = HCLGE_RD_FIRST_STATS_NUM;
471                 } else {
472                         desc_data = (__le64 *)(&desc[i]);
473                         n = HCLGE_RD_OTHER_STATS_NUM;
474                 }
475
476                 for (k = 0; k < n; k++) {
477                         *data += le64_to_cpu(*desc_data);
478                         data++;
479                         desc_data++;
480                 }
481         }
482
483         return 0;
484 }
485
486 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
487 {
488         u64 *data = (u64 *)(&hdev->mac_stats);
489         struct hclge_desc *desc;
490         __le64 *desc_data;
491         u16 i, k, n;
492         int ret;
493
494         /* This may be called inside atomic sections,
495          * so GFP_ATOMIC is more suitalbe here
496          */
497         desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
498         if (!desc)
499                 return -ENOMEM;
500
501         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
502         ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
503         if (ret) {
504                 kfree(desc);
505                 return ret;
506         }
507
508         for (i = 0; i < desc_num; i++) {
509                 /* for special opcode 0034, only the first desc has the head */
510                 if (i == 0) {
511                         desc_data = (__le64 *)(&desc[i].data[0]);
512                         n = HCLGE_RD_FIRST_STATS_NUM;
513                 } else {
514                         desc_data = (__le64 *)(&desc[i]);
515                         n = HCLGE_RD_OTHER_STATS_NUM;
516                 }
517
518                 for (k = 0; k < n; k++) {
519                         *data += le64_to_cpu(*desc_data);
520                         data++;
521                         desc_data++;
522                 }
523         }
524
525         kfree(desc);
526
527         return 0;
528 }
529
530 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
531 {
532         struct hclge_desc desc;
533         __le32 *desc_data;
534         u32 reg_num;
535         int ret;
536
537         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
538         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
539         if (ret)
540                 return ret;
541
542         desc_data = (__le32 *)(&desc.data[0]);
543         reg_num = le32_to_cpu(*desc_data);
544
545         *desc_num = 1 + ((reg_num - 3) >> 2) +
546                     (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
547
548         return 0;
549 }
550
551 static int hclge_mac_update_stats(struct hclge_dev *hdev)
552 {
553         u32 desc_num;
554         int ret;
555
556         ret = hclge_mac_query_reg_num(hdev, &desc_num);
557         /* The firmware supports the new statistics acquisition method */
558         if (!ret)
559                 ret = hclge_mac_update_stats_complete(hdev, desc_num);
560         else if (ret == -EOPNOTSUPP)
561                 ret = hclge_mac_update_stats_defective(hdev);
562         else
563                 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
564
565         return ret;
566 }
567
568 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
569 {
570         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
571         struct hclge_vport *vport = hclge_get_vport(handle);
572         struct hclge_dev *hdev = vport->back;
573         struct hnae3_queue *queue;
574         struct hclge_desc desc[1];
575         struct hclge_tqp *tqp;
576         int ret, i;
577
578         for (i = 0; i < kinfo->num_tqps; i++) {
579                 queue = handle->kinfo.tqp[i];
580                 tqp = container_of(queue, struct hclge_tqp, q);
581                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
582                 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
583                                            true);
584
585                 desc[0].data[0] = cpu_to_le32(tqp->index);
586                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
587                 if (ret) {
588                         dev_err(&hdev->pdev->dev,
589                                 "Query tqp stat fail, status = %d,queue = %d\n",
590                                 ret, i);
591                         return ret;
592                 }
593                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
594                         le32_to_cpu(desc[0].data[1]);
595         }
596
597         for (i = 0; i < kinfo->num_tqps; i++) {
598                 queue = handle->kinfo.tqp[i];
599                 tqp = container_of(queue, struct hclge_tqp, q);
600                 /* command : HCLGE_OPC_QUERY_IGU_STAT */
601                 hclge_cmd_setup_basic_desc(&desc[0],
602                                            HCLGE_OPC_QUERY_TX_STATS,
603                                            true);
604
605                 desc[0].data[0] = cpu_to_le32(tqp->index);
606                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
607                 if (ret) {
608                         dev_err(&hdev->pdev->dev,
609                                 "Query tqp stat fail, status = %d,queue = %d\n",
610                                 ret, i);
611                         return ret;
612                 }
613                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
614                         le32_to_cpu(desc[0].data[1]);
615         }
616
617         return 0;
618 }
619
620 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
621 {
622         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
623         struct hclge_tqp *tqp;
624         u64 *buff = data;
625         int i;
626
627         for (i = 0; i < kinfo->num_tqps; i++) {
628                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
629                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
630         }
631
632         for (i = 0; i < kinfo->num_tqps; i++) {
633                 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
634                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
635         }
636
637         return buff;
638 }
639
640 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
641 {
642         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
643
644         /* each tqp has TX & RX two queues */
645         return kinfo->num_tqps * (2);
646 }
647
648 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
649 {
650         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
651         u8 *buff = data;
652         int i;
653
654         for (i = 0; i < kinfo->num_tqps; i++) {
655                 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
656                         struct hclge_tqp, q);
657                 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
658                          tqp->index);
659                 buff = buff + ETH_GSTRING_LEN;
660         }
661
662         for (i = 0; i < kinfo->num_tqps; i++) {
663                 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
664                         struct hclge_tqp, q);
665                 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
666                          tqp->index);
667                 buff = buff + ETH_GSTRING_LEN;
668         }
669
670         return buff;
671 }
672
673 static u64 *hclge_comm_get_stats(const void *comm_stats,
674                                  const struct hclge_comm_stats_str strs[],
675                                  int size, u64 *data)
676 {
677         u64 *buf = data;
678         u32 i;
679
680         for (i = 0; i < size; i++)
681                 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
682
683         return buf + size;
684 }
685
686 static u8 *hclge_comm_get_strings(u32 stringset,
687                                   const struct hclge_comm_stats_str strs[],
688                                   int size, u8 *data)
689 {
690         char *buff = (char *)data;
691         u32 i;
692
693         if (stringset != ETH_SS_STATS)
694                 return buff;
695
696         for (i = 0; i < size; i++) {
697                 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
698                 buff = buff + ETH_GSTRING_LEN;
699         }
700
701         return (u8 *)buff;
702 }
703
704 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
705 {
706         struct hnae3_handle *handle;
707         int status;
708
709         handle = &hdev->vport[0].nic;
710         if (handle->client) {
711                 status = hclge_tqps_update_stats(handle);
712                 if (status) {
713                         dev_err(&hdev->pdev->dev,
714                                 "Update TQPS stats fail, status = %d.\n",
715                                 status);
716                 }
717         }
718
719         status = hclge_mac_update_stats(hdev);
720         if (status)
721                 dev_err(&hdev->pdev->dev,
722                         "Update MAC stats fail, status = %d.\n", status);
723 }
724
725 static void hclge_update_stats(struct hnae3_handle *handle,
726                                struct net_device_stats *net_stats)
727 {
728         struct hclge_vport *vport = hclge_get_vport(handle);
729         struct hclge_dev *hdev = vport->back;
730         int status;
731
732         if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
733                 return;
734
735         status = hclge_mac_update_stats(hdev);
736         if (status)
737                 dev_err(&hdev->pdev->dev,
738                         "Update MAC stats fail, status = %d.\n",
739                         status);
740
741         status = hclge_tqps_update_stats(handle);
742         if (status)
743                 dev_err(&hdev->pdev->dev,
744                         "Update TQPS stats fail, status = %d.\n",
745                         status);
746
747         clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
748 }
749
750 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
751 {
752 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
753                 HNAE3_SUPPORT_PHY_LOOPBACK | \
754                 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
755                 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
756
757         struct hclge_vport *vport = hclge_get_vport(handle);
758         struct hclge_dev *hdev = vport->back;
759         int count = 0;
760
761         /* Loopback test support rules:
762          * mac: only GE mode support
763          * serdes: all mac mode will support include GE/XGE/LGE/CGE
764          * phy: only support when phy device exist on board
765          */
766         if (stringset == ETH_SS_TEST) {
767                 /* clear loopback bit flags at first */
768                 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
769                 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
770                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
771                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
772                     hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
773                         count += 1;
774                         handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
775                 }
776
777                 count += 2;
778                 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
779                 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
780
781                 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
782                      hdev->hw.mac.phydev->drv->set_loopback) ||
783                     hnae3_dev_phy_imp_supported(hdev)) {
784                         count += 1;
785                         handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
786                 }
787         } else if (stringset == ETH_SS_STATS) {
788                 count = ARRAY_SIZE(g_mac_stats_string) +
789                         hclge_tqps_get_sset_count(handle, stringset);
790         }
791
792         return count;
793 }
794
795 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
796                               u8 *data)
797 {
798         u8 *p = (char *)data;
799         int size;
800
801         if (stringset == ETH_SS_STATS) {
802                 size = ARRAY_SIZE(g_mac_stats_string);
803                 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
804                                            size, p);
805                 p = hclge_tqps_get_strings(handle, p);
806         } else if (stringset == ETH_SS_TEST) {
807                 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
808                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
809                                ETH_GSTRING_LEN);
810                         p += ETH_GSTRING_LEN;
811                 }
812                 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
813                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
814                                ETH_GSTRING_LEN);
815                         p += ETH_GSTRING_LEN;
816                 }
817                 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
818                         memcpy(p,
819                                hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
820                                ETH_GSTRING_LEN);
821                         p += ETH_GSTRING_LEN;
822                 }
823                 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
824                         memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
825                                ETH_GSTRING_LEN);
826                         p += ETH_GSTRING_LEN;
827                 }
828         }
829 }
830
831 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
832 {
833         struct hclge_vport *vport = hclge_get_vport(handle);
834         struct hclge_dev *hdev = vport->back;
835         u64 *p;
836
837         p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
838                                  ARRAY_SIZE(g_mac_stats_string), data);
839         p = hclge_tqps_get_stats(handle, p);
840 }
841
842 static void hclge_get_mac_stat(struct hnae3_handle *handle,
843                                struct hns3_mac_stats *mac_stats)
844 {
845         struct hclge_vport *vport = hclge_get_vport(handle);
846         struct hclge_dev *hdev = vport->back;
847
848         hclge_update_stats(handle, NULL);
849
850         mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
851         mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
852 }
853
854 static int hclge_parse_func_status(struct hclge_dev *hdev,
855                                    struct hclge_func_status_cmd *status)
856 {
857 #define HCLGE_MAC_ID_MASK       0xF
858
859         if (!(status->pf_state & HCLGE_PF_STATE_DONE))
860                 return -EINVAL;
861
862         /* Set the pf to main pf */
863         if (status->pf_state & HCLGE_PF_STATE_MAIN)
864                 hdev->flag |= HCLGE_FLAG_MAIN;
865         else
866                 hdev->flag &= ~HCLGE_FLAG_MAIN;
867
868         hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
869         return 0;
870 }
871
872 static int hclge_query_function_status(struct hclge_dev *hdev)
873 {
874 #define HCLGE_QUERY_MAX_CNT     5
875
876         struct hclge_func_status_cmd *req;
877         struct hclge_desc desc;
878         int timeout = 0;
879         int ret;
880
881         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
882         req = (struct hclge_func_status_cmd *)desc.data;
883
884         do {
885                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
886                 if (ret) {
887                         dev_err(&hdev->pdev->dev,
888                                 "query function status failed %d.\n", ret);
889                         return ret;
890                 }
891
892                 /* Check pf reset is done */
893                 if (req->pf_state)
894                         break;
895                 usleep_range(1000, 2000);
896         } while (timeout++ < HCLGE_QUERY_MAX_CNT);
897
898         return hclge_parse_func_status(hdev, req);
899 }
900
901 static int hclge_query_pf_resource(struct hclge_dev *hdev)
902 {
903         struct hclge_pf_res_cmd *req;
904         struct hclge_desc desc;
905         int ret;
906
907         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
908         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
909         if (ret) {
910                 dev_err(&hdev->pdev->dev,
911                         "query pf resource failed %d.\n", ret);
912                 return ret;
913         }
914
915         req = (struct hclge_pf_res_cmd *)desc.data;
916         hdev->num_tqps = le16_to_cpu(req->tqp_num) +
917                          le16_to_cpu(req->ext_tqp_num);
918         hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
919
920         if (req->tx_buf_size)
921                 hdev->tx_buf_size =
922                         le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
923         else
924                 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
925
926         hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
927
928         if (req->dv_buf_size)
929                 hdev->dv_buf_size =
930                         le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
931         else
932                 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
933
934         hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
935
936         hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
937         if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
938                 dev_err(&hdev->pdev->dev,
939                         "only %u msi resources available, not enough for pf(min:2).\n",
940                         hdev->num_nic_msi);
941                 return -EINVAL;
942         }
943
944         if (hnae3_dev_roce_supported(hdev)) {
945                 hdev->num_roce_msi =
946                         le16_to_cpu(req->pf_intr_vector_number_roce);
947
948                 /* PF should have NIC vectors and Roce vectors,
949                  * NIC vectors are queued before Roce vectors.
950                  */
951                 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
952         } else {
953                 hdev->num_msi = hdev->num_nic_msi;
954         }
955
956         return 0;
957 }
958
959 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
960 {
961         switch (speed_cmd) {
962         case HCLGE_FW_MAC_SPEED_10M:
963                 *speed = HCLGE_MAC_SPEED_10M;
964                 break;
965         case HCLGE_FW_MAC_SPEED_100M:
966                 *speed = HCLGE_MAC_SPEED_100M;
967                 break;
968         case HCLGE_FW_MAC_SPEED_1G:
969                 *speed = HCLGE_MAC_SPEED_1G;
970                 break;
971         case HCLGE_FW_MAC_SPEED_10G:
972                 *speed = HCLGE_MAC_SPEED_10G;
973                 break;
974         case HCLGE_FW_MAC_SPEED_25G:
975                 *speed = HCLGE_MAC_SPEED_25G;
976                 break;
977         case HCLGE_FW_MAC_SPEED_40G:
978                 *speed = HCLGE_MAC_SPEED_40G;
979                 break;
980         case HCLGE_FW_MAC_SPEED_50G:
981                 *speed = HCLGE_MAC_SPEED_50G;
982                 break;
983         case HCLGE_FW_MAC_SPEED_100G:
984                 *speed = HCLGE_MAC_SPEED_100G;
985                 break;
986         case HCLGE_FW_MAC_SPEED_200G:
987                 *speed = HCLGE_MAC_SPEED_200G;
988                 break;
989         default:
990                 return -EINVAL;
991         }
992
993         return 0;
994 }
995
996 static const struct hclge_speed_bit_map speed_bit_map[] = {
997         {HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT},
998         {HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT},
999         {HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT},
1000         {HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT},
1001         {HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT},
1002         {HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT},
1003         {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BIT},
1004         {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BIT},
1005         {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT},
1006 };
1007
1008 static int hclge_get_speed_bit(u32 speed, u32 *speed_bit)
1009 {
1010         u16 i;
1011
1012         for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) {
1013                 if (speed == speed_bit_map[i].speed) {
1014                         *speed_bit = speed_bit_map[i].speed_bit;
1015                         return 0;
1016                 }
1017         }
1018
1019         return -EINVAL;
1020 }
1021
1022 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
1023 {
1024         struct hclge_vport *vport = hclge_get_vport(handle);
1025         struct hclge_dev *hdev = vport->back;
1026         u32 speed_ability = hdev->hw.mac.speed_ability;
1027         u32 speed_bit = 0;
1028         int ret;
1029
1030         ret = hclge_get_speed_bit(speed, &speed_bit);
1031         if (ret)
1032                 return ret;
1033
1034         if (speed_bit & speed_ability)
1035                 return 0;
1036
1037         return -EINVAL;
1038 }
1039
1040 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1041 {
1042         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1043                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1044                                  mac->supported);
1045         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1046                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1047                                  mac->supported);
1048         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1049                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1050                                  mac->supported);
1051         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1052                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1053                                  mac->supported);
1054         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1055                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1056                                  mac->supported);
1057         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1058                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1059                                  mac->supported);
1060 }
1061
1062 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1063 {
1064         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1065                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1066                                  mac->supported);
1067         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1068                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1069                                  mac->supported);
1070         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1071                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1072                                  mac->supported);
1073         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1074                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1075                                  mac->supported);
1076         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1077                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1078                                  mac->supported);
1079         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1080                 linkmode_set_bit(
1081                         ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1082                         mac->supported);
1083 }
1084
1085 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1086 {
1087         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1088                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1089                                  mac->supported);
1090         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1091                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1092                                  mac->supported);
1093         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1094                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1095                                  mac->supported);
1096         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1097                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1098                                  mac->supported);
1099         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1100                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1101                                  mac->supported);
1102         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1103                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1104                                  mac->supported);
1105 }
1106
1107 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1108 {
1109         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1110                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1111                                  mac->supported);
1112         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1113                 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1114                                  mac->supported);
1115         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1116                 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1117                                  mac->supported);
1118         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1119                 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1120                                  mac->supported);
1121         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1122                 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1123                                  mac->supported);
1124         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1125                 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1126                                  mac->supported);
1127         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1128                 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1129                                  mac->supported);
1130 }
1131
1132 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1133 {
1134         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1135         linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1136
1137         switch (mac->speed) {
1138         case HCLGE_MAC_SPEED_10G:
1139         case HCLGE_MAC_SPEED_40G:
1140                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1141                                  mac->supported);
1142                 mac->fec_ability =
1143                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1144                 break;
1145         case HCLGE_MAC_SPEED_25G:
1146         case HCLGE_MAC_SPEED_50G:
1147                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1148                                  mac->supported);
1149                 mac->fec_ability =
1150                         BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1151                         BIT(HNAE3_FEC_AUTO);
1152                 break;
1153         case HCLGE_MAC_SPEED_100G:
1154         case HCLGE_MAC_SPEED_200G:
1155                 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1156                 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1157                 break;
1158         default:
1159                 mac->fec_ability = 0;
1160                 break;
1161         }
1162 }
1163
1164 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1165                                         u16 speed_ability)
1166 {
1167         struct hclge_mac *mac = &hdev->hw.mac;
1168
1169         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1171                                  mac->supported);
1172
1173         hclge_convert_setting_sr(mac, speed_ability);
1174         hclge_convert_setting_lr(mac, speed_ability);
1175         hclge_convert_setting_cr(mac, speed_ability);
1176         if (hnae3_dev_fec_supported(hdev))
1177                 hclge_convert_setting_fec(mac);
1178
1179         if (hnae3_dev_pause_supported(hdev))
1180                 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1181
1182         linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1183         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1184 }
1185
1186 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1187                                             u16 speed_ability)
1188 {
1189         struct hclge_mac *mac = &hdev->hw.mac;
1190
1191         hclge_convert_setting_kr(mac, speed_ability);
1192         if (hnae3_dev_fec_supported(hdev))
1193                 hclge_convert_setting_fec(mac);
1194
1195         if (hnae3_dev_pause_supported(hdev))
1196                 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1197
1198         linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1199         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1200 }
1201
1202 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1203                                          u16 speed_ability)
1204 {
1205         unsigned long *supported = hdev->hw.mac.supported;
1206
1207         /* default to support all speed for GE port */
1208         if (!speed_ability)
1209                 speed_ability = HCLGE_SUPPORT_GE;
1210
1211         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1212                 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1213                                  supported);
1214
1215         if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1216                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1217                                  supported);
1218                 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1219                                  supported);
1220         }
1221
1222         if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1223                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1224                 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1225         }
1226
1227         if (hnae3_dev_pause_supported(hdev)) {
1228                 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1229                 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1230         }
1231
1232         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1233         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1234 }
1235
1236 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1237 {
1238         u8 media_type = hdev->hw.mac.media_type;
1239
1240         if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1241                 hclge_parse_fiber_link_mode(hdev, speed_ability);
1242         else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1243                 hclge_parse_copper_link_mode(hdev, speed_ability);
1244         else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1245                 hclge_parse_backplane_link_mode(hdev, speed_ability);
1246 }
1247
1248 static u32 hclge_get_max_speed(u16 speed_ability)
1249 {
1250         if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1251                 return HCLGE_MAC_SPEED_200G;
1252
1253         if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1254                 return HCLGE_MAC_SPEED_100G;
1255
1256         if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1257                 return HCLGE_MAC_SPEED_50G;
1258
1259         if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1260                 return HCLGE_MAC_SPEED_40G;
1261
1262         if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1263                 return HCLGE_MAC_SPEED_25G;
1264
1265         if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1266                 return HCLGE_MAC_SPEED_10G;
1267
1268         if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1269                 return HCLGE_MAC_SPEED_1G;
1270
1271         if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1272                 return HCLGE_MAC_SPEED_100M;
1273
1274         if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1275                 return HCLGE_MAC_SPEED_10M;
1276
1277         return HCLGE_MAC_SPEED_1G;
1278 }
1279
1280 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1281 {
1282 #define HCLGE_TX_SPARE_SIZE_UNIT                4096
1283 #define SPEED_ABILITY_EXT_SHIFT                 8
1284
1285         struct hclge_cfg_param_cmd *req;
1286         u64 mac_addr_tmp_high;
1287         u16 speed_ability_ext;
1288         u64 mac_addr_tmp;
1289         unsigned int i;
1290
1291         req = (struct hclge_cfg_param_cmd *)desc[0].data;
1292
1293         /* get the configuration */
1294         cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1295                                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1296         cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1297                                             HCLGE_CFG_TQP_DESC_N_M,
1298                                             HCLGE_CFG_TQP_DESC_N_S);
1299
1300         cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1301                                         HCLGE_CFG_PHY_ADDR_M,
1302                                         HCLGE_CFG_PHY_ADDR_S);
1303         cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1304                                           HCLGE_CFG_MEDIA_TP_M,
1305                                           HCLGE_CFG_MEDIA_TP_S);
1306         cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1307                                           HCLGE_CFG_RX_BUF_LEN_M,
1308                                           HCLGE_CFG_RX_BUF_LEN_S);
1309         /* get mac_address */
1310         mac_addr_tmp = __le32_to_cpu(req->param[2]);
1311         mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1312                                             HCLGE_CFG_MAC_ADDR_H_M,
1313                                             HCLGE_CFG_MAC_ADDR_H_S);
1314
1315         mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1316
1317         cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1318                                              HCLGE_CFG_DEFAULT_SPEED_M,
1319                                              HCLGE_CFG_DEFAULT_SPEED_S);
1320         cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1321                                                HCLGE_CFG_RSS_SIZE_M,
1322                                                HCLGE_CFG_RSS_SIZE_S);
1323
1324         for (i = 0; i < ETH_ALEN; i++)
1325                 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1326
1327         req = (struct hclge_cfg_param_cmd *)desc[1].data;
1328         cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1329
1330         cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1331                                              HCLGE_CFG_SPEED_ABILITY_M,
1332                                              HCLGE_CFG_SPEED_ABILITY_S);
1333         speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1334                                             HCLGE_CFG_SPEED_ABILITY_EXT_M,
1335                                             HCLGE_CFG_SPEED_ABILITY_EXT_S);
1336         cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1337
1338         cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
1339                                                HCLGE_CFG_VLAN_FLTR_CAP_M,
1340                                                HCLGE_CFG_VLAN_FLTR_CAP_S);
1341
1342         cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1343                                          HCLGE_CFG_UMV_TBL_SPACE_M,
1344                                          HCLGE_CFG_UMV_TBL_SPACE_S);
1345         if (!cfg->umv_space)
1346                 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1347
1348         cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1349                                                HCLGE_CFG_PF_RSS_SIZE_M,
1350                                                HCLGE_CFG_PF_RSS_SIZE_S);
1351
1352         /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1353          * power of 2, instead of reading out directly. This would
1354          * be more flexible for future changes and expansions.
1355          * When VF max  rss size field is HCLGE_CFG_RSS_SIZE_S,
1356          * it does not make sense if PF's field is 0. In this case, PF and VF
1357          * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1358          */
1359         cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1360                                1U << cfg->pf_rss_size_max :
1361                                cfg->vf_rss_size_max;
1362
1363         /* The unit of the tx spare buffer size queried from configuration
1364          * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is
1365          * needed here.
1366          */
1367         cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]),
1368                                                  HCLGE_CFG_TX_SPARE_BUF_SIZE_M,
1369                                                  HCLGE_CFG_TX_SPARE_BUF_SIZE_S);
1370         cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT;
1371 }
1372
1373 /* hclge_get_cfg: query the static parameter from flash
1374  * @hdev: pointer to struct hclge_dev
1375  * @hcfg: the config structure to be getted
1376  */
1377 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1378 {
1379         struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1380         struct hclge_cfg_param_cmd *req;
1381         unsigned int i;
1382         int ret;
1383
1384         for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1385                 u32 offset = 0;
1386
1387                 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1388                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1389                                            true);
1390                 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1391                                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1392                 /* Len should be united by 4 bytes when send to hardware */
1393                 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1394                                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1395                 req->offset = cpu_to_le32(offset);
1396         }
1397
1398         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1399         if (ret) {
1400                 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1401                 return ret;
1402         }
1403
1404         hclge_parse_cfg(hcfg, desc);
1405
1406         return 0;
1407 }
1408
1409 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1410 {
1411 #define HCLGE_MAX_NON_TSO_BD_NUM                        8U
1412
1413         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1414
1415         ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1416         ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1417         ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1418         ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1419         ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1420         ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1421         ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1422 }
1423
1424 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1425                                   struct hclge_desc *desc)
1426 {
1427         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1428         struct hclge_dev_specs_0_cmd *req0;
1429         struct hclge_dev_specs_1_cmd *req1;
1430
1431         req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1432         req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1433
1434         ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1435         ae_dev->dev_specs.rss_ind_tbl_size =
1436                 le16_to_cpu(req0->rss_ind_tbl_size);
1437         ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1438         ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1439         ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1440         ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1441         ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1442         ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1443 }
1444
1445 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1446 {
1447         struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1448
1449         if (!dev_specs->max_non_tso_bd_num)
1450                 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1451         if (!dev_specs->rss_ind_tbl_size)
1452                 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1453         if (!dev_specs->rss_key_size)
1454                 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1455         if (!dev_specs->max_tm_rate)
1456                 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1457         if (!dev_specs->max_qset_num)
1458                 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1459         if (!dev_specs->max_int_gl)
1460                 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1461         if (!dev_specs->max_frm_size)
1462                 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1463 }
1464
1465 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1466 {
1467         struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1468         int ret;
1469         int i;
1470
1471         /* set default specifications as devices lower than version V3 do not
1472          * support querying specifications from firmware.
1473          */
1474         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1475                 hclge_set_default_dev_specs(hdev);
1476                 return 0;
1477         }
1478
1479         for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1480                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1481                                            true);
1482                 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1483         }
1484         hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1485
1486         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1487         if (ret)
1488                 return ret;
1489
1490         hclge_parse_dev_specs(hdev, desc);
1491         hclge_check_dev_specs(hdev);
1492
1493         return 0;
1494 }
1495
1496 static int hclge_get_cap(struct hclge_dev *hdev)
1497 {
1498         int ret;
1499
1500         ret = hclge_query_function_status(hdev);
1501         if (ret) {
1502                 dev_err(&hdev->pdev->dev,
1503                         "query function status error %d.\n", ret);
1504                 return ret;
1505         }
1506
1507         /* get pf resource */
1508         return hclge_query_pf_resource(hdev);
1509 }
1510
1511 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1512 {
1513 #define HCLGE_MIN_TX_DESC       64
1514 #define HCLGE_MIN_RX_DESC       64
1515
1516         if (!is_kdump_kernel())
1517                 return;
1518
1519         dev_info(&hdev->pdev->dev,
1520                  "Running kdump kernel. Using minimal resources\n");
1521
1522         /* minimal queue pairs equals to the number of vports */
1523         hdev->num_tqps = hdev->num_req_vfs + 1;
1524         hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1525         hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1526 }
1527
1528 static int hclge_configure(struct hclge_dev *hdev)
1529 {
1530         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1531         struct hclge_cfg cfg;
1532         unsigned int i;
1533         int ret;
1534
1535         ret = hclge_get_cfg(hdev, &cfg);
1536         if (ret)
1537                 return ret;
1538
1539         hdev->base_tqp_pid = 0;
1540         hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1541         hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1542         hdev->rx_buf_len = cfg.rx_buf_len;
1543         ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1544         hdev->hw.mac.media_type = cfg.media_type;
1545         hdev->hw.mac.phy_addr = cfg.phy_addr;
1546         hdev->num_tx_desc = cfg.tqp_desc_num;
1547         hdev->num_rx_desc = cfg.tqp_desc_num;
1548         hdev->tm_info.num_pg = 1;
1549         hdev->tc_max = cfg.tc_num;
1550         hdev->tm_info.hw_pfc_map = 0;
1551         hdev->wanted_umv_size = cfg.umv_space;
1552         hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
1553         hdev->gro_en = true;
1554         if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
1555                 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
1556
1557         if (hnae3_dev_fd_supported(hdev)) {
1558                 hdev->fd_en = true;
1559                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1560         }
1561
1562         ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1563         if (ret) {
1564                 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1565                         cfg.default_speed, ret);
1566                 return ret;
1567         }
1568
1569         hclge_parse_link_mode(hdev, cfg.speed_ability);
1570
1571         hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1572
1573         if ((hdev->tc_max > HNAE3_MAX_TC) ||
1574             (hdev->tc_max < 1)) {
1575                 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1576                          hdev->tc_max);
1577                 hdev->tc_max = 1;
1578         }
1579
1580         /* Dev does not support DCB */
1581         if (!hnae3_dev_dcb_supported(hdev)) {
1582                 hdev->tc_max = 1;
1583                 hdev->pfc_max = 0;
1584         } else {
1585                 hdev->pfc_max = hdev->tc_max;
1586         }
1587
1588         hdev->tm_info.num_tc = 1;
1589
1590         /* Currently not support uncontiuous tc */
1591         for (i = 0; i < hdev->tm_info.num_tc; i++)
1592                 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1593
1594         hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1595
1596         hclge_init_kdump_kernel_config(hdev);
1597
1598         /* Set the init affinity based on pci func number */
1599         i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1600         i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1601         cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1602                         &hdev->affinity_mask);
1603
1604         return ret;
1605 }
1606
1607 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1608                             u16 tso_mss_max)
1609 {
1610         struct hclge_cfg_tso_status_cmd *req;
1611         struct hclge_desc desc;
1612
1613         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1614
1615         req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1616         req->tso_mss_min = cpu_to_le16(tso_mss_min);
1617         req->tso_mss_max = cpu_to_le16(tso_mss_max);
1618
1619         return hclge_cmd_send(&hdev->hw, &desc, 1);
1620 }
1621
1622 static int hclge_config_gro(struct hclge_dev *hdev)
1623 {
1624         struct hclge_cfg_gro_status_cmd *req;
1625         struct hclge_desc desc;
1626         int ret;
1627
1628         if (!hnae3_dev_gro_supported(hdev))
1629                 return 0;
1630
1631         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1632         req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1633
1634         req->gro_en = hdev->gro_en ? 1 : 0;
1635
1636         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1637         if (ret)
1638                 dev_err(&hdev->pdev->dev,
1639                         "GRO hardware config cmd failed, ret = %d\n", ret);
1640
1641         return ret;
1642 }
1643
1644 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1645 {
1646         struct hclge_tqp *tqp;
1647         int i;
1648
1649         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1650                                   sizeof(struct hclge_tqp), GFP_KERNEL);
1651         if (!hdev->htqp)
1652                 return -ENOMEM;
1653
1654         tqp = hdev->htqp;
1655
1656         for (i = 0; i < hdev->num_tqps; i++) {
1657                 tqp->dev = &hdev->pdev->dev;
1658                 tqp->index = i;
1659
1660                 tqp->q.ae_algo = &ae_algo;
1661                 tqp->q.buf_size = hdev->rx_buf_len;
1662                 tqp->q.tx_desc_num = hdev->num_tx_desc;
1663                 tqp->q.rx_desc_num = hdev->num_rx_desc;
1664
1665                 /* need an extended offset to configure queues >=
1666                  * HCLGE_TQP_MAX_SIZE_DEV_V2
1667                  */
1668                 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1669                         tqp->q.io_base = hdev->hw.io_base +
1670                                          HCLGE_TQP_REG_OFFSET +
1671                                          i * HCLGE_TQP_REG_SIZE;
1672                 else
1673                         tqp->q.io_base = hdev->hw.io_base +
1674                                          HCLGE_TQP_REG_OFFSET +
1675                                          HCLGE_TQP_EXT_REG_OFFSET +
1676                                          (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1677                                          HCLGE_TQP_REG_SIZE;
1678
1679                 tqp++;
1680         }
1681
1682         return 0;
1683 }
1684
1685 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1686                                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1687 {
1688         struct hclge_tqp_map_cmd *req;
1689         struct hclge_desc desc;
1690         int ret;
1691
1692         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1693
1694         req = (struct hclge_tqp_map_cmd *)desc.data;
1695         req->tqp_id = cpu_to_le16(tqp_pid);
1696         req->tqp_vf = func_id;
1697         req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1698         if (!is_pf)
1699                 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1700         req->tqp_vid = cpu_to_le16(tqp_vid);
1701
1702         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1703         if (ret)
1704                 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1705
1706         return ret;
1707 }
1708
1709 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1710 {
1711         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1712         struct hclge_dev *hdev = vport->back;
1713         int i, alloced;
1714
1715         for (i = 0, alloced = 0; i < hdev->num_tqps &&
1716              alloced < num_tqps; i++) {
1717                 if (!hdev->htqp[i].alloced) {
1718                         hdev->htqp[i].q.handle = &vport->nic;
1719                         hdev->htqp[i].q.tqp_index = alloced;
1720                         hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1721                         hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1722                         kinfo->tqp[alloced] = &hdev->htqp[i].q;
1723                         hdev->htqp[i].alloced = true;
1724                         alloced++;
1725                 }
1726         }
1727         vport->alloc_tqps = alloced;
1728         kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1729                                 vport->alloc_tqps / hdev->tm_info.num_tc);
1730
1731         /* ensure one to one mapping between irq and queue at default */
1732         kinfo->rss_size = min_t(u16, kinfo->rss_size,
1733                                 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1734
1735         return 0;
1736 }
1737
1738 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1739                             u16 num_tx_desc, u16 num_rx_desc)
1740
1741 {
1742         struct hnae3_handle *nic = &vport->nic;
1743         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1744         struct hclge_dev *hdev = vport->back;
1745         int ret;
1746
1747         kinfo->num_tx_desc = num_tx_desc;
1748         kinfo->num_rx_desc = num_rx_desc;
1749
1750         kinfo->rx_buf_len = hdev->rx_buf_len;
1751         kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size;
1752
1753         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1754                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1755         if (!kinfo->tqp)
1756                 return -ENOMEM;
1757
1758         ret = hclge_assign_tqp(vport, num_tqps);
1759         if (ret)
1760                 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1761
1762         return ret;
1763 }
1764
1765 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1766                                   struct hclge_vport *vport)
1767 {
1768         struct hnae3_handle *nic = &vport->nic;
1769         struct hnae3_knic_private_info *kinfo;
1770         u16 i;
1771
1772         kinfo = &nic->kinfo;
1773         for (i = 0; i < vport->alloc_tqps; i++) {
1774                 struct hclge_tqp *q =
1775                         container_of(kinfo->tqp[i], struct hclge_tqp, q);
1776                 bool is_pf;
1777                 int ret;
1778
1779                 is_pf = !(vport->vport_id);
1780                 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1781                                              i, is_pf);
1782                 if (ret)
1783                         return ret;
1784         }
1785
1786         return 0;
1787 }
1788
1789 static int hclge_map_tqp(struct hclge_dev *hdev)
1790 {
1791         struct hclge_vport *vport = hdev->vport;
1792         u16 i, num_vport;
1793
1794         num_vport = hdev->num_req_vfs + 1;
1795         for (i = 0; i < num_vport; i++) {
1796                 int ret;
1797
1798                 ret = hclge_map_tqp_to_vport(hdev, vport);
1799                 if (ret)
1800                         return ret;
1801
1802                 vport++;
1803         }
1804
1805         return 0;
1806 }
1807
1808 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1809 {
1810         struct hnae3_handle *nic = &vport->nic;
1811         struct hclge_dev *hdev = vport->back;
1812         int ret;
1813
1814         nic->pdev = hdev->pdev;
1815         nic->ae_algo = &ae_algo;
1816         nic->numa_node_mask = hdev->numa_node_mask;
1817         nic->kinfo.io_base = hdev->hw.io_base;
1818
1819         ret = hclge_knic_setup(vport, num_tqps,
1820                                hdev->num_tx_desc, hdev->num_rx_desc);
1821         if (ret)
1822                 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1823
1824         return ret;
1825 }
1826
1827 static int hclge_alloc_vport(struct hclge_dev *hdev)
1828 {
1829         struct pci_dev *pdev = hdev->pdev;
1830         struct hclge_vport *vport;
1831         u32 tqp_main_vport;
1832         u32 tqp_per_vport;
1833         int num_vport, i;
1834         int ret;
1835
1836         /* We need to alloc a vport for main NIC of PF */
1837         num_vport = hdev->num_req_vfs + 1;
1838
1839         if (hdev->num_tqps < num_vport) {
1840                 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1841                         hdev->num_tqps, num_vport);
1842                 return -EINVAL;
1843         }
1844
1845         /* Alloc the same number of TQPs for every vport */
1846         tqp_per_vport = hdev->num_tqps / num_vport;
1847         tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1848
1849         vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1850                              GFP_KERNEL);
1851         if (!vport)
1852                 return -ENOMEM;
1853
1854         hdev->vport = vport;
1855         hdev->num_alloc_vport = num_vport;
1856
1857         if (IS_ENABLED(CONFIG_PCI_IOV))
1858                 hdev->num_alloc_vfs = hdev->num_req_vfs;
1859
1860         for (i = 0; i < num_vport; i++) {
1861                 vport->back = hdev;
1862                 vport->vport_id = i;
1863                 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1864                 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1865                 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1866                 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1867                 vport->req_vlan_fltr_en = true;
1868                 INIT_LIST_HEAD(&vport->vlan_list);
1869                 INIT_LIST_HEAD(&vport->uc_mac_list);
1870                 INIT_LIST_HEAD(&vport->mc_mac_list);
1871                 spin_lock_init(&vport->mac_list_lock);
1872
1873                 if (i == 0)
1874                         ret = hclge_vport_setup(vport, tqp_main_vport);
1875                 else
1876                         ret = hclge_vport_setup(vport, tqp_per_vport);
1877                 if (ret) {
1878                         dev_err(&pdev->dev,
1879                                 "vport setup failed for vport %d, %d\n",
1880                                 i, ret);
1881                         return ret;
1882                 }
1883
1884                 vport++;
1885         }
1886
1887         return 0;
1888 }
1889
1890 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1891                                     struct hclge_pkt_buf_alloc *buf_alloc)
1892 {
1893 /* TX buffer size is unit by 128 byte */
1894 #define HCLGE_BUF_SIZE_UNIT_SHIFT       7
1895 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1896         struct hclge_tx_buff_alloc_cmd *req;
1897         struct hclge_desc desc;
1898         int ret;
1899         u8 i;
1900
1901         req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1902
1903         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1904         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1905                 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1906
1907                 req->tx_pkt_buff[i] =
1908                         cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1909                                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1910         }
1911
1912         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1913         if (ret)
1914                 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1915                         ret);
1916
1917         return ret;
1918 }
1919
1920 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1921                                  struct hclge_pkt_buf_alloc *buf_alloc)
1922 {
1923         int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1924
1925         if (ret)
1926                 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1927
1928         return ret;
1929 }
1930
1931 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1932 {
1933         unsigned int i;
1934         u32 cnt = 0;
1935
1936         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1937                 if (hdev->hw_tc_map & BIT(i))
1938                         cnt++;
1939         return cnt;
1940 }
1941
1942 /* Get the number of pfc enabled TCs, which have private buffer */
1943 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1944                                   struct hclge_pkt_buf_alloc *buf_alloc)
1945 {
1946         struct hclge_priv_buf *priv;
1947         unsigned int i;
1948         int cnt = 0;
1949
1950         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1951                 priv = &buf_alloc->priv_buf[i];
1952                 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1953                     priv->enable)
1954                         cnt++;
1955         }
1956
1957         return cnt;
1958 }
1959
1960 /* Get the number of pfc disabled TCs, which have private buffer */
1961 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1962                                      struct hclge_pkt_buf_alloc *buf_alloc)
1963 {
1964         struct hclge_priv_buf *priv;
1965         unsigned int i;
1966         int cnt = 0;
1967
1968         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1969                 priv = &buf_alloc->priv_buf[i];
1970                 if (hdev->hw_tc_map & BIT(i) &&
1971                     !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1972                     priv->enable)
1973                         cnt++;
1974         }
1975
1976         return cnt;
1977 }
1978
1979 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1980 {
1981         struct hclge_priv_buf *priv;
1982         u32 rx_priv = 0;
1983         int i;
1984
1985         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1986                 priv = &buf_alloc->priv_buf[i];
1987                 if (priv->enable)
1988                         rx_priv += priv->buf_size;
1989         }
1990         return rx_priv;
1991 }
1992
1993 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1994 {
1995         u32 i, total_tx_size = 0;
1996
1997         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1998                 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1999
2000         return total_tx_size;
2001 }
2002
2003 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
2004                                 struct hclge_pkt_buf_alloc *buf_alloc,
2005                                 u32 rx_all)
2006 {
2007         u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
2008         u32 tc_num = hclge_get_tc_num(hdev);
2009         u32 shared_buf, aligned_mps;
2010         u32 rx_priv;
2011         int i;
2012
2013         aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2014
2015         if (hnae3_dev_dcb_supported(hdev))
2016                 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
2017                                         hdev->dv_buf_size;
2018         else
2019                 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
2020                                         + hdev->dv_buf_size;
2021
2022         shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2023         shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
2024                              HCLGE_BUF_SIZE_UNIT);
2025
2026         rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
2027         if (rx_all < rx_priv + shared_std)
2028                 return false;
2029
2030         shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
2031         buf_alloc->s_buf.buf_size = shared_buf;
2032         if (hnae3_dev_dcb_supported(hdev)) {
2033                 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2034                 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2035                         - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
2036                                   HCLGE_BUF_SIZE_UNIT);
2037         } else {
2038                 buf_alloc->s_buf.self.high = aligned_mps +
2039                                                 HCLGE_NON_DCB_ADDITIONAL_BUF;
2040                 buf_alloc->s_buf.self.low = aligned_mps;
2041         }
2042
2043         if (hnae3_dev_dcb_supported(hdev)) {
2044                 hi_thrd = shared_buf - hdev->dv_buf_size;
2045
2046                 if (tc_num <= NEED_RESERVE_TC_NUM)
2047                         hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2048                                         / BUF_MAX_PERCENT;
2049
2050                 if (tc_num)
2051                         hi_thrd = hi_thrd / tc_num;
2052
2053                 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2054                 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2055                 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2056         } else {
2057                 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2058                 lo_thrd = aligned_mps;
2059         }
2060
2061         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2062                 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2063                 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2064         }
2065
2066         return true;
2067 }
2068
2069 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2070                                 struct hclge_pkt_buf_alloc *buf_alloc)
2071 {
2072         u32 i, total_size;
2073
2074         total_size = hdev->pkt_buf_size;
2075
2076         /* alloc tx buffer for all enabled tc */
2077         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2078                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2079
2080                 if (hdev->hw_tc_map & BIT(i)) {
2081                         if (total_size < hdev->tx_buf_size)
2082                                 return -ENOMEM;
2083
2084                         priv->tx_buf_size = hdev->tx_buf_size;
2085                 } else {
2086                         priv->tx_buf_size = 0;
2087                 }
2088
2089                 total_size -= priv->tx_buf_size;
2090         }
2091
2092         return 0;
2093 }
2094
2095 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2096                                   struct hclge_pkt_buf_alloc *buf_alloc)
2097 {
2098         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2099         u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2100         unsigned int i;
2101
2102         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2103                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2104
2105                 priv->enable = 0;
2106                 priv->wl.low = 0;
2107                 priv->wl.high = 0;
2108                 priv->buf_size = 0;
2109
2110                 if (!(hdev->hw_tc_map & BIT(i)))
2111                         continue;
2112
2113                 priv->enable = 1;
2114
2115                 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2116                         priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2117                         priv->wl.high = roundup(priv->wl.low + aligned_mps,
2118                                                 HCLGE_BUF_SIZE_UNIT);
2119                 } else {
2120                         priv->wl.low = 0;
2121                         priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2122                                         aligned_mps;
2123                 }
2124
2125                 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2126         }
2127
2128         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2129 }
2130
2131 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2132                                           struct hclge_pkt_buf_alloc *buf_alloc)
2133 {
2134         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2135         int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2136         int i;
2137
2138         /* let the last to be cleared first */
2139         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2140                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2141                 unsigned int mask = BIT((unsigned int)i);
2142
2143                 if (hdev->hw_tc_map & mask &&
2144                     !(hdev->tm_info.hw_pfc_map & mask)) {
2145                         /* Clear the no pfc TC private buffer */
2146                         priv->wl.low = 0;
2147                         priv->wl.high = 0;
2148                         priv->buf_size = 0;
2149                         priv->enable = 0;
2150                         no_pfc_priv_num--;
2151                 }
2152
2153                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2154                     no_pfc_priv_num == 0)
2155                         break;
2156         }
2157
2158         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2159 }
2160
2161 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2162                                         struct hclge_pkt_buf_alloc *buf_alloc)
2163 {
2164         u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2165         int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2166         int i;
2167
2168         /* let the last to be cleared first */
2169         for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2170                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2171                 unsigned int mask = BIT((unsigned int)i);
2172
2173                 if (hdev->hw_tc_map & mask &&
2174                     hdev->tm_info.hw_pfc_map & mask) {
2175                         /* Reduce the number of pfc TC with private buffer */
2176                         priv->wl.low = 0;
2177                         priv->enable = 0;
2178                         priv->wl.high = 0;
2179                         priv->buf_size = 0;
2180                         pfc_priv_num--;
2181                 }
2182
2183                 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2184                     pfc_priv_num == 0)
2185                         break;
2186         }
2187
2188         return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2189 }
2190
2191 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2192                                       struct hclge_pkt_buf_alloc *buf_alloc)
2193 {
2194 #define COMPENSATE_BUFFER       0x3C00
2195 #define COMPENSATE_HALF_MPS_NUM 5
2196 #define PRIV_WL_GAP             0x1800
2197
2198         u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2199         u32 tc_num = hclge_get_tc_num(hdev);
2200         u32 half_mps = hdev->mps >> 1;
2201         u32 min_rx_priv;
2202         unsigned int i;
2203
2204         if (tc_num)
2205                 rx_priv = rx_priv / tc_num;
2206
2207         if (tc_num <= NEED_RESERVE_TC_NUM)
2208                 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2209
2210         min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2211                         COMPENSATE_HALF_MPS_NUM * half_mps;
2212         min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2213         rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2214         if (rx_priv < min_rx_priv)
2215                 return false;
2216
2217         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2218                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2219
2220                 priv->enable = 0;
2221                 priv->wl.low = 0;
2222                 priv->wl.high = 0;
2223                 priv->buf_size = 0;
2224
2225                 if (!(hdev->hw_tc_map & BIT(i)))
2226                         continue;
2227
2228                 priv->enable = 1;
2229                 priv->buf_size = rx_priv;
2230                 priv->wl.high = rx_priv - hdev->dv_buf_size;
2231                 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2232         }
2233
2234         buf_alloc->s_buf.buf_size = 0;
2235
2236         return true;
2237 }
2238
2239 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2240  * @hdev: pointer to struct hclge_dev
2241  * @buf_alloc: pointer to buffer calculation data
2242  * @return: 0: calculate successful, negative: fail
2243  */
2244 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2245                                 struct hclge_pkt_buf_alloc *buf_alloc)
2246 {
2247         /* When DCB is not supported, rx private buffer is not allocated. */
2248         if (!hnae3_dev_dcb_supported(hdev)) {
2249                 u32 rx_all = hdev->pkt_buf_size;
2250
2251                 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2252                 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2253                         return -ENOMEM;
2254
2255                 return 0;
2256         }
2257
2258         if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2259                 return 0;
2260
2261         if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2262                 return 0;
2263
2264         /* try to decrease the buffer size */
2265         if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2266                 return 0;
2267
2268         if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2269                 return 0;
2270
2271         if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2272                 return 0;
2273
2274         return -ENOMEM;
2275 }
2276
2277 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2278                                    struct hclge_pkt_buf_alloc *buf_alloc)
2279 {
2280         struct hclge_rx_priv_buff_cmd *req;
2281         struct hclge_desc desc;
2282         int ret;
2283         int i;
2284
2285         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2286         req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2287
2288         /* Alloc private buffer TCs */
2289         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2290                 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2291
2292                 req->buf_num[i] =
2293                         cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2294                 req->buf_num[i] |=
2295                         cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2296         }
2297
2298         req->shared_buf =
2299                 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2300                             (1 << HCLGE_TC0_PRI_BUF_EN_B));
2301
2302         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2303         if (ret)
2304                 dev_err(&hdev->pdev->dev,
2305                         "rx private buffer alloc cmd failed %d\n", ret);
2306
2307         return ret;
2308 }
2309
2310 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2311                                    struct hclge_pkt_buf_alloc *buf_alloc)
2312 {
2313         struct hclge_rx_priv_wl_buf *req;
2314         struct hclge_priv_buf *priv;
2315         struct hclge_desc desc[2];
2316         int i, j;
2317         int ret;
2318
2319         for (i = 0; i < 2; i++) {
2320                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2321                                            false);
2322                 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2323
2324                 /* The first descriptor set the NEXT bit to 1 */
2325                 if (i == 0)
2326                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2327                 else
2328                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2329
2330                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2331                         u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2332
2333                         priv = &buf_alloc->priv_buf[idx];
2334                         req->tc_wl[j].high =
2335                                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2336                         req->tc_wl[j].high |=
2337                                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2338                         req->tc_wl[j].low =
2339                                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2340                         req->tc_wl[j].low |=
2341                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2342                 }
2343         }
2344
2345         /* Send 2 descriptor at one time */
2346         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2347         if (ret)
2348                 dev_err(&hdev->pdev->dev,
2349                         "rx private waterline config cmd failed %d\n",
2350                         ret);
2351         return ret;
2352 }
2353
2354 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2355                                     struct hclge_pkt_buf_alloc *buf_alloc)
2356 {
2357         struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2358         struct hclge_rx_com_thrd *req;
2359         struct hclge_desc desc[2];
2360         struct hclge_tc_thrd *tc;
2361         int i, j;
2362         int ret;
2363
2364         for (i = 0; i < 2; i++) {
2365                 hclge_cmd_setup_basic_desc(&desc[i],
2366                                            HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2367                 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2368
2369                 /* The first descriptor set the NEXT bit to 1 */
2370                 if (i == 0)
2371                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2372                 else
2373                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2374
2375                 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2376                         tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2377
2378                         req->com_thrd[j].high =
2379                                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2380                         req->com_thrd[j].high |=
2381                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2382                         req->com_thrd[j].low =
2383                                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2384                         req->com_thrd[j].low |=
2385                                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2386                 }
2387         }
2388
2389         /* Send 2 descriptors at one time */
2390         ret = hclge_cmd_send(&hdev->hw, desc, 2);
2391         if (ret)
2392                 dev_err(&hdev->pdev->dev,
2393                         "common threshold config cmd failed %d\n", ret);
2394         return ret;
2395 }
2396
2397 static int hclge_common_wl_config(struct hclge_dev *hdev,
2398                                   struct hclge_pkt_buf_alloc *buf_alloc)
2399 {
2400         struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2401         struct hclge_rx_com_wl *req;
2402         struct hclge_desc desc;
2403         int ret;
2404
2405         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2406
2407         req = (struct hclge_rx_com_wl *)desc.data;
2408         req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2409         req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2410
2411         req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2412         req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2413
2414         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2415         if (ret)
2416                 dev_err(&hdev->pdev->dev,
2417                         "common waterline config cmd failed %d\n", ret);
2418
2419         return ret;
2420 }
2421
2422 int hclge_buffer_alloc(struct hclge_dev *hdev)
2423 {
2424         struct hclge_pkt_buf_alloc *pkt_buf;
2425         int ret;
2426
2427         pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2428         if (!pkt_buf)
2429                 return -ENOMEM;
2430
2431         ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2432         if (ret) {
2433                 dev_err(&hdev->pdev->dev,
2434                         "could not calc tx buffer size for all TCs %d\n", ret);
2435                 goto out;
2436         }
2437
2438         ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2439         if (ret) {
2440                 dev_err(&hdev->pdev->dev,
2441                         "could not alloc tx buffers %d\n", ret);
2442                 goto out;
2443         }
2444
2445         ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2446         if (ret) {
2447                 dev_err(&hdev->pdev->dev,
2448                         "could not calc rx priv buffer size for all TCs %d\n",
2449                         ret);
2450                 goto out;
2451         }
2452
2453         ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2454         if (ret) {
2455                 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2456                         ret);
2457                 goto out;
2458         }
2459
2460         if (hnae3_dev_dcb_supported(hdev)) {
2461                 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2462                 if (ret) {
2463                         dev_err(&hdev->pdev->dev,
2464                                 "could not configure rx private waterline %d\n",
2465                                 ret);
2466                         goto out;
2467                 }
2468
2469                 ret = hclge_common_thrd_config(hdev, pkt_buf);
2470                 if (ret) {
2471                         dev_err(&hdev->pdev->dev,
2472                                 "could not configure common threshold %d\n",
2473                                 ret);
2474                         goto out;
2475                 }
2476         }
2477
2478         ret = hclge_common_wl_config(hdev, pkt_buf);
2479         if (ret)
2480                 dev_err(&hdev->pdev->dev,
2481                         "could not configure common waterline %d\n", ret);
2482
2483 out:
2484         kfree(pkt_buf);
2485         return ret;
2486 }
2487
2488 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2489 {
2490         struct hnae3_handle *roce = &vport->roce;
2491         struct hnae3_handle *nic = &vport->nic;
2492         struct hclge_dev *hdev = vport->back;
2493
2494         roce->rinfo.num_vectors = vport->back->num_roce_msi;
2495
2496         if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2497                 return -EINVAL;
2498
2499         roce->rinfo.base_vector = hdev->roce_base_vector;
2500
2501         roce->rinfo.netdev = nic->kinfo.netdev;
2502         roce->rinfo.roce_io_base = hdev->hw.io_base;
2503         roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2504
2505         roce->pdev = nic->pdev;
2506         roce->ae_algo = nic->ae_algo;
2507         roce->numa_node_mask = nic->numa_node_mask;
2508
2509         return 0;
2510 }
2511
2512 static int hclge_init_msi(struct hclge_dev *hdev)
2513 {
2514         struct pci_dev *pdev = hdev->pdev;
2515         int vectors;
2516         int i;
2517
2518         vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2519                                         hdev->num_msi,
2520                                         PCI_IRQ_MSI | PCI_IRQ_MSIX);
2521         if (vectors < 0) {
2522                 dev_err(&pdev->dev,
2523                         "failed(%d) to allocate MSI/MSI-X vectors\n",
2524                         vectors);
2525                 return vectors;
2526         }
2527         if (vectors < hdev->num_msi)
2528                 dev_warn(&hdev->pdev->dev,
2529                          "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2530                          hdev->num_msi, vectors);
2531
2532         hdev->num_msi = vectors;
2533         hdev->num_msi_left = vectors;
2534
2535         hdev->base_msi_vector = pdev->irq;
2536         hdev->roce_base_vector = hdev->base_msi_vector +
2537                                 hdev->num_nic_msi;
2538
2539         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2540                                            sizeof(u16), GFP_KERNEL);
2541         if (!hdev->vector_status) {
2542                 pci_free_irq_vectors(pdev);
2543                 return -ENOMEM;
2544         }
2545
2546         for (i = 0; i < hdev->num_msi; i++)
2547                 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2548
2549         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2550                                         sizeof(int), GFP_KERNEL);
2551         if (!hdev->vector_irq) {
2552                 pci_free_irq_vectors(pdev);
2553                 return -ENOMEM;
2554         }
2555
2556         return 0;
2557 }
2558
2559 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2560 {
2561         if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2562                 duplex = HCLGE_MAC_FULL;
2563
2564         return duplex;
2565 }
2566
2567 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2568                                       u8 duplex)
2569 {
2570         struct hclge_config_mac_speed_dup_cmd *req;
2571         struct hclge_desc desc;
2572         int ret;
2573
2574         req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2575
2576         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2577
2578         if (duplex)
2579                 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2580
2581         switch (speed) {
2582         case HCLGE_MAC_SPEED_10M:
2583                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2584                                 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10M);
2585                 break;
2586         case HCLGE_MAC_SPEED_100M:
2587                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2588                                 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100M);
2589                 break;
2590         case HCLGE_MAC_SPEED_1G:
2591                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2592                                 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_1G);
2593                 break;
2594         case HCLGE_MAC_SPEED_10G:
2595                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2596                                 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10G);
2597                 break;
2598         case HCLGE_MAC_SPEED_25G:
2599                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2600                                 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_25G);
2601                 break;
2602         case HCLGE_MAC_SPEED_40G:
2603                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2604                                 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_40G);
2605                 break;
2606         case HCLGE_MAC_SPEED_50G:
2607                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2608                                 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_50G);
2609                 break;
2610         case HCLGE_MAC_SPEED_100G:
2611                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2612                                 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100G);
2613                 break;
2614         case HCLGE_MAC_SPEED_200G:
2615                 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2616                                 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_200G);
2617                 break;
2618         default:
2619                 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2620                 return -EINVAL;
2621         }
2622
2623         hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2624                       1);
2625
2626         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2627         if (ret) {
2628                 dev_err(&hdev->pdev->dev,
2629                         "mac speed/duplex config cmd failed %d.\n", ret);
2630                 return ret;
2631         }
2632
2633         return 0;
2634 }
2635
2636 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2637 {
2638         struct hclge_mac *mac = &hdev->hw.mac;
2639         int ret;
2640
2641         duplex = hclge_check_speed_dup(duplex, speed);
2642         if (!mac->support_autoneg && mac->speed == speed &&
2643             mac->duplex == duplex)
2644                 return 0;
2645
2646         ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2647         if (ret)
2648                 return ret;
2649
2650         hdev->hw.mac.speed = speed;
2651         hdev->hw.mac.duplex = duplex;
2652
2653         return 0;
2654 }
2655
2656 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2657                                      u8 duplex)
2658 {
2659         struct hclge_vport *vport = hclge_get_vport(handle);
2660         struct hclge_dev *hdev = vport->back;
2661
2662         return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2663 }
2664
2665 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2666 {
2667         struct hclge_config_auto_neg_cmd *req;
2668         struct hclge_desc desc;
2669         u32 flag = 0;
2670         int ret;
2671
2672         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2673
2674         req = (struct hclge_config_auto_neg_cmd *)desc.data;
2675         if (enable)
2676                 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2677         req->cfg_an_cmd_flag = cpu_to_le32(flag);
2678
2679         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2680         if (ret)
2681                 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2682                         ret);
2683
2684         return ret;
2685 }
2686
2687 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2688 {
2689         struct hclge_vport *vport = hclge_get_vport(handle);
2690         struct hclge_dev *hdev = vport->back;
2691
2692         if (!hdev->hw.mac.support_autoneg) {
2693                 if (enable) {
2694                         dev_err(&hdev->pdev->dev,
2695                                 "autoneg is not supported by current port\n");
2696                         return -EOPNOTSUPP;
2697                 } else {
2698                         return 0;
2699                 }
2700         }
2701
2702         return hclge_set_autoneg_en(hdev, enable);
2703 }
2704
2705 static int hclge_get_autoneg(struct hnae3_handle *handle)
2706 {
2707         struct hclge_vport *vport = hclge_get_vport(handle);
2708         struct hclge_dev *hdev = vport->back;
2709         struct phy_device *phydev = hdev->hw.mac.phydev;
2710
2711         if (phydev)
2712                 return phydev->autoneg;
2713
2714         return hdev->hw.mac.autoneg;
2715 }
2716
2717 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2718 {
2719         struct hclge_vport *vport = hclge_get_vport(handle);
2720         struct hclge_dev *hdev = vport->back;
2721         int ret;
2722
2723         dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2724
2725         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2726         if (ret)
2727                 return ret;
2728         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2729 }
2730
2731 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2732 {
2733         struct hclge_vport *vport = hclge_get_vport(handle);
2734         struct hclge_dev *hdev = vport->back;
2735
2736         if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2737                 return hclge_set_autoneg_en(hdev, !halt);
2738
2739         return 0;
2740 }
2741
2742 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2743 {
2744         struct hclge_config_fec_cmd *req;
2745         struct hclge_desc desc;
2746         int ret;
2747
2748         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2749
2750         req = (struct hclge_config_fec_cmd *)desc.data;
2751         if (fec_mode & BIT(HNAE3_FEC_AUTO))
2752                 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2753         if (fec_mode & BIT(HNAE3_FEC_RS))
2754                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2755                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2756         if (fec_mode & BIT(HNAE3_FEC_BASER))
2757                 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2758                                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2759
2760         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2761         if (ret)
2762                 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2763
2764         return ret;
2765 }
2766
2767 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2768 {
2769         struct hclge_vport *vport = hclge_get_vport(handle);
2770         struct hclge_dev *hdev = vport->back;
2771         struct hclge_mac *mac = &hdev->hw.mac;
2772         int ret;
2773
2774         if (fec_mode && !(mac->fec_ability & fec_mode)) {
2775                 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2776                 return -EINVAL;
2777         }
2778
2779         ret = hclge_set_fec_hw(hdev, fec_mode);
2780         if (ret)
2781                 return ret;
2782
2783         mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2784         return 0;
2785 }
2786
2787 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2788                           u8 *fec_mode)
2789 {
2790         struct hclge_vport *vport = hclge_get_vport(handle);
2791         struct hclge_dev *hdev = vport->back;
2792         struct hclge_mac *mac = &hdev->hw.mac;
2793
2794         if (fec_ability)
2795                 *fec_ability = mac->fec_ability;
2796         if (fec_mode)
2797                 *fec_mode = mac->fec_mode;
2798 }
2799
2800 static int hclge_mac_init(struct hclge_dev *hdev)
2801 {
2802         struct hclge_mac *mac = &hdev->hw.mac;
2803         int ret;
2804
2805         hdev->support_sfp_query = true;
2806         hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2807         ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2808                                          hdev->hw.mac.duplex);
2809         if (ret)
2810                 return ret;
2811
2812         if (hdev->hw.mac.support_autoneg) {
2813                 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2814                 if (ret)
2815                         return ret;
2816         }
2817
2818         mac->link = 0;
2819
2820         if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2821                 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2822                 if (ret)
2823                         return ret;
2824         }
2825
2826         ret = hclge_set_mac_mtu(hdev, hdev->mps);
2827         if (ret) {
2828                 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2829                 return ret;
2830         }
2831
2832         ret = hclge_set_default_loopback(hdev);
2833         if (ret)
2834                 return ret;
2835
2836         ret = hclge_buffer_alloc(hdev);
2837         if (ret)
2838                 dev_err(&hdev->pdev->dev,
2839                         "allocate buffer fail, ret=%d\n", ret);
2840
2841         return ret;
2842 }
2843
2844 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2845 {
2846         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2847             !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2848                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2849                                     hclge_wq, &hdev->service_task, 0);
2850 }
2851
2852 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2853 {
2854         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2855             !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2856                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2857                                     hclge_wq, &hdev->service_task, 0);
2858 }
2859
2860 static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
2861 {
2862         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2863             !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
2864                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2865                                     hclge_wq, &hdev->service_task, 0);
2866 }
2867
2868 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2869 {
2870         if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2871             !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2872                 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2873                                     hclge_wq, &hdev->service_task,
2874                                     delay_time);
2875 }
2876
2877 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2878 {
2879         struct hclge_link_status_cmd *req;
2880         struct hclge_desc desc;
2881         int ret;
2882
2883         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2884         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2885         if (ret) {
2886                 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2887                         ret);
2888                 return ret;
2889         }
2890
2891         req = (struct hclge_link_status_cmd *)desc.data;
2892         *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2893                 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2894
2895         return 0;
2896 }
2897
2898 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2899 {
2900         struct phy_device *phydev = hdev->hw.mac.phydev;
2901
2902         *link_status = HCLGE_LINK_STATUS_DOWN;
2903
2904         if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2905                 return 0;
2906
2907         if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2908                 return 0;
2909
2910         return hclge_get_mac_link_status(hdev, link_status);
2911 }
2912
2913 static void hclge_push_link_status(struct hclge_dev *hdev)
2914 {
2915         struct hclge_vport *vport;
2916         int ret;
2917         u16 i;
2918
2919         for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
2920                 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
2921
2922                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
2923                     vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
2924                         continue;
2925
2926                 ret = hclge_push_vf_link_status(vport);
2927                 if (ret) {
2928                         dev_err(&hdev->pdev->dev,
2929                                 "failed to push link status to vf%u, ret = %d\n",
2930                                 i, ret);
2931                 }
2932         }
2933 }
2934
2935 static void hclge_update_link_status(struct hclge_dev *hdev)
2936 {
2937         struct hnae3_handle *rhandle = &hdev->vport[0].roce;
2938         struct hnae3_handle *handle = &hdev->vport[0].nic;
2939         struct hnae3_client *rclient = hdev->roce_client;
2940         struct hnae3_client *client = hdev->nic_client;
2941         int state;
2942         int ret;
2943
2944         if (!client)
2945                 return;
2946
2947         if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2948                 return;
2949
2950         ret = hclge_get_mac_phy_link(hdev, &state);
2951         if (ret) {
2952                 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2953                 return;
2954         }
2955
2956         if (state != hdev->hw.mac.link) {
2957                 hdev->hw.mac.link = state;
2958                 client->ops->link_status_change(handle, state);
2959                 hclge_config_mac_tnl_int(hdev, state);
2960                 if (rclient && rclient->ops->link_status_change)
2961                         rclient->ops->link_status_change(rhandle, state);
2962
2963                 hclge_push_link_status(hdev);
2964         }
2965
2966         clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2967 }
2968
2969 static void hclge_update_port_capability(struct hclge_dev *hdev,
2970                                          struct hclge_mac *mac)
2971 {
2972         if (hnae3_dev_fec_supported(hdev))
2973                 /* update fec ability by speed */
2974                 hclge_convert_setting_fec(mac);
2975
2976         /* firmware can not identify back plane type, the media type
2977          * read from configuration can help deal it
2978          */
2979         if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2980             mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2981                 mac->module_type = HNAE3_MODULE_TYPE_KR;
2982         else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2983                 mac->module_type = HNAE3_MODULE_TYPE_TP;
2984
2985         if (mac->support_autoneg) {
2986                 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2987                 linkmode_copy(mac->advertising, mac->supported);
2988         } else {
2989                 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2990                                    mac->supported);
2991                 linkmode_zero(mac->advertising);
2992         }
2993 }
2994
2995 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2996 {
2997         struct hclge_sfp_info_cmd *resp;
2998         struct hclge_desc desc;
2999         int ret;
3000
3001         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3002         resp = (struct hclge_sfp_info_cmd *)desc.data;
3003         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3004         if (ret == -EOPNOTSUPP) {
3005                 dev_warn(&hdev->pdev->dev,
3006                          "IMP do not support get SFP speed %d\n", ret);
3007                 return ret;
3008         } else if (ret) {
3009                 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
3010                 return ret;
3011         }
3012
3013         *speed = le32_to_cpu(resp->speed);
3014
3015         return 0;
3016 }
3017
3018 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
3019 {
3020         struct hclge_sfp_info_cmd *resp;
3021         struct hclge_desc desc;
3022         int ret;
3023
3024         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3025         resp = (struct hclge_sfp_info_cmd *)desc.data;
3026
3027         resp->query_type = QUERY_ACTIVE_SPEED;
3028
3029         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3030         if (ret == -EOPNOTSUPP) {
3031                 dev_warn(&hdev->pdev->dev,
3032                          "IMP does not support get SFP info %d\n", ret);
3033                 return ret;
3034         } else if (ret) {
3035                 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
3036                 return ret;
3037         }
3038
3039         /* In some case, mac speed get from IMP may be 0, it shouldn't be
3040          * set to mac->speed.
3041          */
3042         if (!le32_to_cpu(resp->speed))
3043                 return 0;
3044
3045         mac->speed = le32_to_cpu(resp->speed);
3046         /* if resp->speed_ability is 0, it means it's an old version
3047          * firmware, do not update these params
3048          */
3049         if (resp->speed_ability) {
3050                 mac->module_type = le32_to_cpu(resp->module_type);
3051                 mac->speed_ability = le32_to_cpu(resp->speed_ability);
3052                 mac->autoneg = resp->autoneg;
3053                 mac->support_autoneg = resp->autoneg_ability;
3054                 mac->speed_type = QUERY_ACTIVE_SPEED;
3055                 if (!resp->active_fec)
3056                         mac->fec_mode = 0;
3057                 else
3058                         mac->fec_mode = BIT(resp->active_fec);
3059         } else {
3060                 mac->speed_type = QUERY_SFP_SPEED;
3061         }
3062
3063         return 0;
3064 }
3065
3066 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3067                                         struct ethtool_link_ksettings *cmd)
3068 {
3069         struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3070         struct hclge_vport *vport = hclge_get_vport(handle);
3071         struct hclge_phy_link_ksetting_0_cmd *req0;
3072         struct hclge_phy_link_ksetting_1_cmd *req1;
3073         u32 supported, advertising, lp_advertising;
3074         struct hclge_dev *hdev = vport->back;
3075         int ret;
3076
3077         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3078                                    true);
3079         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3080         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3081                                    true);
3082
3083         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3084         if (ret) {
3085                 dev_err(&hdev->pdev->dev,
3086                         "failed to get phy link ksetting, ret = %d.\n", ret);
3087                 return ret;
3088         }
3089
3090         req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3091         cmd->base.autoneg = req0->autoneg;
3092         cmd->base.speed = le32_to_cpu(req0->speed);
3093         cmd->base.duplex = req0->duplex;
3094         cmd->base.port = req0->port;
3095         cmd->base.transceiver = req0->transceiver;
3096         cmd->base.phy_address = req0->phy_address;
3097         cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3098         cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3099         supported = le32_to_cpu(req0->supported);
3100         advertising = le32_to_cpu(req0->advertising);
3101         lp_advertising = le32_to_cpu(req0->lp_advertising);
3102         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3103                                                 supported);
3104         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3105                                                 advertising);
3106         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3107                                                 lp_advertising);
3108
3109         req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3110         cmd->base.master_slave_cfg = req1->master_slave_cfg;
3111         cmd->base.master_slave_state = req1->master_slave_state;
3112
3113         return 0;
3114 }
3115
3116 static int
3117 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3118                              const struct ethtool_link_ksettings *cmd)
3119 {
3120         struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3121         struct hclge_vport *vport = hclge_get_vport(handle);
3122         struct hclge_phy_link_ksetting_0_cmd *req0;
3123         struct hclge_phy_link_ksetting_1_cmd *req1;
3124         struct hclge_dev *hdev = vport->back;
3125         u32 advertising;
3126         int ret;
3127
3128         if (cmd->base.autoneg == AUTONEG_DISABLE &&
3129             ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3130              (cmd->base.duplex != DUPLEX_HALF &&
3131               cmd->base.duplex != DUPLEX_FULL)))
3132                 return -EINVAL;
3133
3134         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3135                                    false);
3136         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3137         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3138                                    false);
3139
3140         req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3141         req0->autoneg = cmd->base.autoneg;
3142         req0->speed = cpu_to_le32(cmd->base.speed);
3143         req0->duplex = cmd->base.duplex;
3144         ethtool_convert_link_mode_to_legacy_u32(&advertising,
3145                                                 cmd->link_modes.advertising);
3146         req0->advertising = cpu_to_le32(advertising);
3147         req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3148
3149         req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3150         req1->master_slave_cfg = cmd->base.master_slave_cfg;
3151
3152         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3153         if (ret) {
3154                 dev_err(&hdev->pdev->dev,
3155                         "failed to set phy link ksettings, ret = %d.\n", ret);
3156                 return ret;
3157         }
3158
3159         hdev->hw.mac.autoneg = cmd->base.autoneg;
3160         hdev->hw.mac.speed = cmd->base.speed;
3161         hdev->hw.mac.duplex = cmd->base.duplex;
3162         linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3163
3164         return 0;
3165 }
3166
3167 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3168 {
3169         struct ethtool_link_ksettings cmd;
3170         int ret;
3171
3172         if (!hnae3_dev_phy_imp_supported(hdev))
3173                 return 0;
3174
3175         ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3176         if (ret)
3177                 return ret;
3178
3179         hdev->hw.mac.autoneg = cmd.base.autoneg;
3180         hdev->hw.mac.speed = cmd.base.speed;
3181         hdev->hw.mac.duplex = cmd.base.duplex;
3182
3183         return 0;
3184 }
3185
3186 static int hclge_tp_port_init(struct hclge_dev *hdev)
3187 {
3188         struct ethtool_link_ksettings cmd;
3189
3190         if (!hnae3_dev_phy_imp_supported(hdev))
3191                 return 0;
3192
3193         cmd.base.autoneg = hdev->hw.mac.autoneg;
3194         cmd.base.speed = hdev->hw.mac.speed;
3195         cmd.base.duplex = hdev->hw.mac.duplex;
3196         linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3197
3198         return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3199 }
3200
3201 static int hclge_update_port_info(struct hclge_dev *hdev)
3202 {
3203         struct hclge_mac *mac = &hdev->hw.mac;
3204         int speed = HCLGE_MAC_SPEED_UNKNOWN;
3205         int ret;
3206
3207         /* get the port info from SFP cmd if not copper port */
3208         if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3209                 return hclge_update_tp_port_info(hdev);
3210
3211         /* if IMP does not support get SFP/qSFP info, return directly */
3212         if (!hdev->support_sfp_query)
3213                 return 0;
3214
3215         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3216                 ret = hclge_get_sfp_info(hdev, mac);
3217         else
3218                 ret = hclge_get_sfp_speed(hdev, &speed);
3219
3220         if (ret == -EOPNOTSUPP) {
3221                 hdev->support_sfp_query = false;
3222                 return ret;
3223         } else if (ret) {
3224                 return ret;
3225         }
3226
3227         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3228                 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3229                         hclge_update_port_capability(hdev, mac);
3230                         return 0;
3231                 }
3232                 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3233                                                HCLGE_MAC_FULL);
3234         } else {
3235                 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3236                         return 0; /* do nothing if no SFP */
3237
3238                 /* must config full duplex for SFP */
3239                 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3240         }
3241 }
3242
3243 static int hclge_get_status(struct hnae3_handle *handle)
3244 {
3245         struct hclge_vport *vport = hclge_get_vport(handle);
3246         struct hclge_dev *hdev = vport->back;
3247
3248         hclge_update_link_status(hdev);
3249
3250         return hdev->hw.mac.link;
3251 }
3252
3253 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3254 {
3255         if (!pci_num_vf(hdev->pdev)) {
3256                 dev_err(&hdev->pdev->dev,
3257                         "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3258                 return NULL;
3259         }
3260
3261         if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3262                 dev_err(&hdev->pdev->dev,
3263                         "vf id(%d) is out of range(0 <= vfid < %d)\n",
3264                         vf, pci_num_vf(hdev->pdev));
3265                 return NULL;
3266         }
3267
3268         /* VF start from 1 in vport */
3269         vf += HCLGE_VF_VPORT_START_NUM;
3270         return &hdev->vport[vf];
3271 }
3272
3273 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3274                                struct ifla_vf_info *ivf)
3275 {
3276         struct hclge_vport *vport = hclge_get_vport(handle);
3277         struct hclge_dev *hdev = vport->back;
3278
3279         vport = hclge_get_vf_vport(hdev, vf);
3280         if (!vport)
3281                 return -EINVAL;
3282
3283         ivf->vf = vf;
3284         ivf->linkstate = vport->vf_info.link_state;
3285         ivf->spoofchk = vport->vf_info.spoofchk;
3286         ivf->trusted = vport->vf_info.trusted;
3287         ivf->min_tx_rate = 0;
3288         ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3289         ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3290         ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3291         ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3292         ether_addr_copy(ivf->mac, vport->vf_info.mac);
3293
3294         return 0;
3295 }
3296
3297 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3298                                    int link_state)
3299 {
3300         struct hclge_vport *vport = hclge_get_vport(handle);
3301         struct hclge_dev *hdev = vport->back;
3302         int link_state_old;
3303         int ret;
3304
3305         vport = hclge_get_vf_vport(hdev, vf);
3306         if (!vport)
3307                 return -EINVAL;
3308
3309         link_state_old = vport->vf_info.link_state;
3310         vport->vf_info.link_state = link_state;
3311
3312         ret = hclge_push_vf_link_status(vport);
3313         if (ret) {
3314                 vport->vf_info.link_state = link_state_old;
3315                 dev_err(&hdev->pdev->dev,
3316                         "failed to push vf%d link status, ret = %d\n", vf, ret);
3317         }
3318
3319         return ret;
3320 }
3321
3322 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3323 {
3324         u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
3325
3326         /* fetch the events from their corresponding regs */
3327         cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3328         msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3329         hw_err_src_reg = hclge_read_dev(&hdev->hw,
3330                                         HCLGE_RAS_PF_OTHER_INT_STS_REG);
3331
3332         /* Assumption: If by any chance reset and mailbox events are reported
3333          * together then we will only process reset event in this go and will
3334          * defer the processing of the mailbox events. Since, we would have not
3335          * cleared RX CMDQ event this time we would receive again another
3336          * interrupt from H/W just for the mailbox.
3337          *
3338          * check for vector0 reset event sources
3339          */
3340         if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3341                 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3342                 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3343                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3344                 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3345                 hdev->rst_stats.imp_rst_cnt++;
3346                 return HCLGE_VECTOR0_EVENT_RST;
3347         }
3348
3349         if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3350                 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3351                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3352                 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3353                 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3354                 hdev->rst_stats.global_rst_cnt++;
3355                 return HCLGE_VECTOR0_EVENT_RST;
3356         }
3357
3358         /* check for vector0 msix event and hardware error event source */
3359         if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK ||
3360             hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK)
3361                 return HCLGE_VECTOR0_EVENT_ERR;
3362
3363         /* check for vector0 ptp event source */
3364         if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) {
3365                 *clearval = msix_src_reg;
3366                 return HCLGE_VECTOR0_EVENT_PTP;
3367         }
3368
3369         /* check for vector0 mailbox(=CMDQ RX) event source */
3370         if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3371                 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3372                 *clearval = cmdq_src_reg;
3373                 return HCLGE_VECTOR0_EVENT_MBX;
3374         }
3375
3376         /* print other vector0 event source */
3377         dev_info(&hdev->pdev->dev,
3378                  "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n",
3379                  cmdq_src_reg, hw_err_src_reg, msix_src_reg);
3380
3381         return HCLGE_VECTOR0_EVENT_OTHER;
3382 }
3383
3384 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3385                                     u32 regclr)
3386 {
3387         switch (event_type) {
3388         case HCLGE_VECTOR0_EVENT_PTP:
3389         case HCLGE_VECTOR0_EVENT_RST:
3390                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3391                 break;
3392         case HCLGE_VECTOR0_EVENT_MBX:
3393                 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3394                 break;
3395         default:
3396                 break;
3397         }
3398 }
3399
3400 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3401 {
3402         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3403                                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3404                                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3405                                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3406         hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3407 }
3408
3409 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3410 {
3411         writel(enable ? 1 : 0, vector->addr);
3412 }
3413
3414 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3415 {
3416         struct hclge_dev *hdev = data;
3417         unsigned long flags;
3418         u32 clearval = 0;
3419         u32 event_cause;
3420
3421         hclge_enable_vector(&hdev->misc_vector, false);
3422         event_cause = hclge_check_event_cause(hdev, &clearval);
3423
3424         /* vector 0 interrupt is shared with reset and mailbox source events. */
3425         switch (event_cause) {
3426         case HCLGE_VECTOR0_EVENT_ERR:
3427                 hclge_errhand_task_schedule(hdev);
3428                 break;
3429         case HCLGE_VECTOR0_EVENT_RST:
3430                 hclge_reset_task_schedule(hdev);
3431                 break;
3432         case HCLGE_VECTOR0_EVENT_PTP:
3433                 spin_lock_irqsave(&hdev->ptp->lock, flags);
3434                 hclge_ptp_clean_tx_hwts(hdev);
3435                 spin_unlock_irqrestore(&hdev->ptp->lock, flags);
3436                 break;
3437         case HCLGE_VECTOR0_EVENT_MBX:
3438                 /* If we are here then,
3439                  * 1. Either we are not handling any mbx task and we are not
3440                  *    scheduled as well
3441                  *                        OR
3442                  * 2. We could be handling a mbx task but nothing more is
3443                  *    scheduled.
3444                  * In both cases, we should schedule mbx task as there are more
3445                  * mbx messages reported by this interrupt.
3446                  */
3447                 hclge_mbx_task_schedule(hdev);
3448                 break;
3449         default:
3450                 dev_warn(&hdev->pdev->dev,
3451                          "received unknown or unhandled event of vector0\n");
3452                 break;
3453         }
3454
3455         hclge_clear_event_cause(hdev, event_cause, clearval);
3456
3457         /* Enable interrupt if it is not caused by reset event or error event */
3458         if (event_cause == HCLGE_VECTOR0_EVENT_PTP ||
3459             event_cause == HCLGE_VECTOR0_EVENT_MBX ||
3460             event_cause == HCLGE_VECTOR0_EVENT_OTHER)
3461                 hclge_enable_vector(&hdev->misc_vector, true);
3462
3463         return IRQ_HANDLED;
3464 }
3465
3466 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3467 {
3468         if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3469                 dev_warn(&hdev->pdev->dev,
3470                          "vector(vector_id %d) has been freed.\n", vector_id);
3471                 return;
3472         }
3473
3474         hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3475         hdev->num_msi_left += 1;
3476         hdev->num_msi_used -= 1;
3477 }
3478
3479 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3480 {
3481         struct hclge_misc_vector *vector = &hdev->misc_vector;
3482
3483         vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3484
3485         vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3486         hdev->vector_status[0] = 0;
3487
3488         hdev->num_msi_left -= 1;
3489         hdev->num_msi_used += 1;
3490 }
3491
3492 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3493                                       const cpumask_t *mask)
3494 {
3495         struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3496                                               affinity_notify);
3497
3498         cpumask_copy(&hdev->affinity_mask, mask);
3499 }
3500
3501 static void hclge_irq_affinity_release(struct kref *ref)
3502 {
3503 }
3504
3505 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3506 {
3507         irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3508                               &hdev->affinity_mask);
3509
3510         hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3511         hdev->affinity_notify.release = hclge_irq_affinity_release;
3512         irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3513                                   &hdev->affinity_notify);
3514 }
3515
3516 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3517 {
3518         irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3519         irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3520 }
3521
3522 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3523 {
3524         int ret;
3525
3526         hclge_get_misc_vector(hdev);
3527
3528         /* this would be explicitly freed in the end */
3529         snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3530                  HCLGE_NAME, pci_name(hdev->pdev));
3531         ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3532                           0, hdev->misc_vector.name, hdev);
3533         if (ret) {
3534                 hclge_free_vector(hdev, 0);
3535                 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3536                         hdev->misc_vector.vector_irq);
3537         }
3538
3539         return ret;
3540 }
3541
3542 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3543 {
3544         free_irq(hdev->misc_vector.vector_irq, hdev);
3545         hclge_free_vector(hdev, 0);
3546 }
3547
3548 int hclge_notify_client(struct hclge_dev *hdev,
3549                         enum hnae3_reset_notify_type type)
3550 {
3551         struct hnae3_handle *handle = &hdev->vport[0].nic;
3552         struct hnae3_client *client = hdev->nic_client;
3553         int ret;
3554
3555         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3556                 return 0;
3557
3558         if (!client->ops->reset_notify)
3559                 return -EOPNOTSUPP;
3560
3561         ret = client->ops->reset_notify(handle, type);
3562         if (ret)
3563                 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3564                         type, ret);
3565
3566         return ret;
3567 }
3568
3569 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3570                                     enum hnae3_reset_notify_type type)
3571 {
3572         struct hnae3_handle *handle = &hdev->vport[0].roce;
3573         struct hnae3_client *client = hdev->roce_client;
3574         int ret;
3575
3576         if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3577                 return 0;
3578
3579         if (!client->ops->reset_notify)
3580                 return -EOPNOTSUPP;
3581
3582         ret = client->ops->reset_notify(handle, type);
3583         if (ret)
3584                 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3585                         type, ret);
3586
3587         return ret;
3588 }
3589
3590 static int hclge_reset_wait(struct hclge_dev *hdev)
3591 {
3592 #define HCLGE_RESET_WATI_MS     100
3593 #define HCLGE_RESET_WAIT_CNT    350
3594
3595         u32 val, reg, reg_bit;
3596         u32 cnt = 0;
3597
3598         switch (hdev->reset_type) {
3599         case HNAE3_IMP_RESET:
3600                 reg = HCLGE_GLOBAL_RESET_REG;
3601                 reg_bit = HCLGE_IMP_RESET_BIT;
3602                 break;
3603         case HNAE3_GLOBAL_RESET:
3604                 reg = HCLGE_GLOBAL_RESET_REG;
3605                 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3606                 break;
3607         case HNAE3_FUNC_RESET:
3608                 reg = HCLGE_FUN_RST_ING;
3609                 reg_bit = HCLGE_FUN_RST_ING_B;
3610                 break;
3611         default:
3612                 dev_err(&hdev->pdev->dev,
3613                         "Wait for unsupported reset type: %d\n",
3614                         hdev->reset_type);
3615                 return -EINVAL;
3616         }
3617
3618         val = hclge_read_dev(&hdev->hw, reg);
3619         while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3620                 msleep(HCLGE_RESET_WATI_MS);
3621                 val = hclge_read_dev(&hdev->hw, reg);
3622                 cnt++;
3623         }
3624
3625         if (cnt >= HCLGE_RESET_WAIT_CNT) {
3626                 dev_warn(&hdev->pdev->dev,
3627                          "Wait for reset timeout: %d\n", hdev->reset_type);
3628                 return -EBUSY;
3629         }
3630
3631         return 0;
3632 }
3633
3634 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3635 {
3636         struct hclge_vf_rst_cmd *req;
3637         struct hclge_desc desc;
3638
3639         req = (struct hclge_vf_rst_cmd *)desc.data;
3640         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3641         req->dest_vfid = func_id;
3642
3643         if (reset)
3644                 req->vf_rst = 0x1;
3645
3646         return hclge_cmd_send(&hdev->hw, &desc, 1);
3647 }
3648
3649 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3650 {
3651         int i;
3652
3653         for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3654                 struct hclge_vport *vport = &hdev->vport[i];
3655                 int ret;
3656
3657                 /* Send cmd to set/clear VF's FUNC_RST_ING */
3658                 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3659                 if (ret) {
3660                         dev_err(&hdev->pdev->dev,
3661                                 "set vf(%u) rst failed %d!\n",
3662                                 vport->vport_id, ret);
3663                         return ret;
3664                 }
3665
3666                 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3667                         continue;
3668
3669                 /* Inform VF to process the reset.
3670                  * hclge_inform_reset_assert_to_vf may fail if VF
3671                  * driver is not loaded.
3672                  */
3673                 ret = hclge_inform_reset_assert_to_vf(vport);
3674                 if (ret)
3675                         dev_warn(&hdev->pdev->dev,
3676                                  "inform reset to vf(%u) failed %d!\n",
3677                                  vport->vport_id, ret);
3678         }
3679
3680         return 0;
3681 }
3682
3683 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3684 {
3685         if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3686             test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3687             test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3688                 return;
3689
3690         hclge_mbx_handler(hdev);
3691
3692         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3693 }
3694
3695 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3696 {
3697         struct hclge_pf_rst_sync_cmd *req;
3698         struct hclge_desc desc;
3699         int cnt = 0;
3700         int ret;
3701
3702         req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3703         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3704
3705         do {
3706                 /* vf need to down netdev by mbx during PF or FLR reset */
3707                 hclge_mailbox_service_task(hdev);
3708
3709                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3710                 /* for compatible with old firmware, wait
3711                  * 100 ms for VF to stop IO
3712                  */
3713                 if (ret == -EOPNOTSUPP) {
3714                         msleep(HCLGE_RESET_SYNC_TIME);
3715                         return;
3716                 } else if (ret) {
3717                         dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3718                                  ret);
3719                         return;
3720                 } else if (req->all_vf_ready) {
3721                         return;
3722                 }
3723                 msleep(HCLGE_PF_RESET_SYNC_TIME);
3724                 hclge_cmd_reuse_desc(&desc, true);
3725         } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3726
3727         dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3728 }
3729
3730 void hclge_report_hw_error(struct hclge_dev *hdev,
3731                            enum hnae3_hw_error_type type)
3732 {
3733         struct hnae3_client *client = hdev->nic_client;
3734
3735         if (!client || !client->ops->process_hw_error ||
3736             !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3737                 return;
3738
3739         client->ops->process_hw_error(&hdev->vport[0].nic, type);
3740 }
3741
3742 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3743 {
3744         u32 reg_val;
3745
3746         reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3747         if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3748                 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3749                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3750                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3751         }
3752
3753         if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3754                 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3755                 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3756                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3757         }
3758 }
3759
3760 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3761 {
3762         struct hclge_desc desc;
3763         struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3764         int ret;
3765
3766         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3767         hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3768         req->fun_reset_vfid = func_id;
3769
3770         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3771         if (ret)
3772                 dev_err(&hdev->pdev->dev,
3773                         "send function reset cmd fail, status =%d\n", ret);
3774
3775         return ret;
3776 }
3777
3778 static void hclge_do_reset(struct hclge_dev *hdev)
3779 {
3780         struct hnae3_handle *handle = &hdev->vport[0].nic;
3781         struct pci_dev *pdev = hdev->pdev;
3782         u32 val;
3783
3784         if (hclge_get_hw_reset_stat(handle)) {
3785                 dev_info(&pdev->dev, "hardware reset not finish\n");
3786                 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3787                          hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3788                          hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3789                 return;
3790         }
3791
3792         switch (hdev->reset_type) {
3793         case HNAE3_IMP_RESET:
3794                 dev_info(&pdev->dev, "IMP reset requested\n");
3795                 val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3796                 hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1);
3797                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val);
3798                 break;
3799         case HNAE3_GLOBAL_RESET:
3800                 dev_info(&pdev->dev, "global reset requested\n");
3801                 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3802                 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3803                 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3804                 break;
3805         case HNAE3_FUNC_RESET:
3806                 dev_info(&pdev->dev, "PF reset requested\n");
3807                 /* schedule again to check later */
3808                 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3809                 hclge_reset_task_schedule(hdev);
3810                 break;
3811         default:
3812                 dev_warn(&pdev->dev,
3813                          "unsupported reset type: %d\n", hdev->reset_type);
3814                 break;
3815         }
3816 }
3817
3818 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3819                                                    unsigned long *addr)
3820 {
3821         enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3822         struct hclge_dev *hdev = ae_dev->priv;
3823
3824         /* return the highest priority reset level amongst all */
3825         if (test_bit(HNAE3_IMP_RESET, addr)) {
3826                 rst_level = HNAE3_IMP_RESET;
3827                 clear_bit(HNAE3_IMP_RESET, addr);
3828                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3829                 clear_bit(HNAE3_FUNC_RESET, addr);
3830         } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3831                 rst_level = HNAE3_GLOBAL_RESET;
3832                 clear_bit(HNAE3_GLOBAL_RESET, addr);
3833                 clear_bit(HNAE3_FUNC_RESET, addr);
3834         } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3835                 rst_level = HNAE3_FUNC_RESET;
3836                 clear_bit(HNAE3_FUNC_RESET, addr);
3837         } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3838                 rst_level = HNAE3_FLR_RESET;
3839                 clear_bit(HNAE3_FLR_RESET, addr);
3840         }
3841
3842         if (hdev->reset_type != HNAE3_NONE_RESET &&
3843             rst_level < hdev->reset_type)
3844                 return HNAE3_NONE_RESET;
3845
3846         return rst_level;
3847 }
3848
3849 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3850 {
3851         u32 clearval = 0;
3852
3853         switch (hdev->reset_type) {
3854         case HNAE3_IMP_RESET:
3855                 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3856                 break;
3857         case HNAE3_GLOBAL_RESET:
3858                 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3859                 break;
3860         default:
3861                 break;
3862         }
3863
3864         if (!clearval)
3865                 return;
3866
3867         /* For revision 0x20, the reset interrupt source
3868          * can only be cleared after hardware reset done
3869          */
3870         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3871                 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3872                                 clearval);
3873
3874         hclge_enable_vector(&hdev->misc_vector, true);
3875 }
3876
3877 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3878 {
3879         u32 reg_val;
3880
3881         reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3882         if (enable)
3883                 reg_val |= HCLGE_NIC_SW_RST_RDY;
3884         else
3885                 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3886
3887         hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3888 }
3889
3890 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3891 {
3892         int ret;
3893
3894         ret = hclge_set_all_vf_rst(hdev, true);
3895         if (ret)
3896                 return ret;
3897
3898         hclge_func_reset_sync_vf(hdev);
3899
3900         return 0;
3901 }
3902
3903 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3904 {
3905         u32 reg_val;
3906         int ret = 0;
3907
3908         switch (hdev->reset_type) {
3909         case HNAE3_FUNC_RESET:
3910                 ret = hclge_func_reset_notify_vf(hdev);
3911                 if (ret)
3912                         return ret;
3913
3914                 ret = hclge_func_reset_cmd(hdev, 0);
3915                 if (ret) {
3916                         dev_err(&hdev->pdev->dev,
3917                                 "asserting function reset fail %d!\n", ret);
3918                         return ret;
3919                 }
3920
3921                 /* After performaning pf reset, it is not necessary to do the
3922                  * mailbox handling or send any command to firmware, because
3923                  * any mailbox handling or command to firmware is only valid
3924                  * after hclge_cmd_init is called.
3925                  */
3926                 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3927                 hdev->rst_stats.pf_rst_cnt++;
3928                 break;
3929         case HNAE3_FLR_RESET:
3930                 ret = hclge_func_reset_notify_vf(hdev);
3931                 if (ret)
3932                         return ret;
3933                 break;
3934         case HNAE3_IMP_RESET:
3935                 hclge_handle_imp_error(hdev);
3936                 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3937                 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3938                                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3939                 break;
3940         default:
3941                 break;
3942         }
3943
3944         /* inform hardware that preparatory work is done */
3945         msleep(HCLGE_RESET_SYNC_TIME);
3946         hclge_reset_handshake(hdev, true);
3947         dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3948
3949         return ret;
3950 }
3951
3952 static void hclge_show_rst_info(struct hclge_dev *hdev)
3953 {
3954         char *buf;
3955
3956         buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL);
3957         if (!buf)
3958                 return;
3959
3960         hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
3961
3962         dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
3963
3964         kfree(buf);
3965 }
3966
3967 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3968 {
3969 #define MAX_RESET_FAIL_CNT 5
3970
3971         if (hdev->reset_pending) {
3972                 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3973                          hdev->reset_pending);
3974                 return true;
3975         } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3976                    HCLGE_RESET_INT_M) {
3977                 dev_info(&hdev->pdev->dev,
3978                          "reset failed because new reset interrupt\n");
3979                 hclge_clear_reset_cause(hdev);
3980                 return false;
3981         } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3982                 hdev->rst_stats.reset_fail_cnt++;
3983                 set_bit(hdev->reset_type, &hdev->reset_pending);
3984                 dev_info(&hdev->pdev->dev,
3985                          "re-schedule reset task(%u)\n",
3986                          hdev->rst_stats.reset_fail_cnt);
3987                 return true;
3988         }
3989
3990         hclge_clear_reset_cause(hdev);
3991
3992         /* recover the handshake status when reset fail */
3993         hclge_reset_handshake(hdev, true);
3994
3995         dev_err(&hdev->pdev->dev, "Reset fail!\n");
3996
3997         hclge_show_rst_info(hdev);
3998
3999         set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4000
4001         return false;
4002 }
4003
4004 static void hclge_update_reset_level(struct hclge_dev *hdev)
4005 {
4006         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4007         enum hnae3_reset_type reset_level;
4008
4009         /* reset request will not be set during reset, so clear
4010          * pending reset request to avoid unnecessary reset
4011          * caused by the same reason.
4012          */
4013         hclge_get_reset_level(ae_dev, &hdev->reset_request);
4014
4015         /* if default_reset_request has a higher level reset request,
4016          * it should be handled as soon as possible. since some errors
4017          * need this kind of reset to fix.
4018          */
4019         reset_level = hclge_get_reset_level(ae_dev,
4020                                             &hdev->default_reset_request);
4021         if (reset_level != HNAE3_NONE_RESET)
4022                 set_bit(reset_level, &hdev->reset_request);
4023 }
4024
4025 static int hclge_set_rst_done(struct hclge_dev *hdev)
4026 {
4027         struct hclge_pf_rst_done_cmd *req;
4028         struct hclge_desc desc;
4029         int ret;
4030
4031         req = (struct hclge_pf_rst_done_cmd *)desc.data;
4032         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
4033         req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
4034
4035         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4036         /* To be compatible with the old firmware, which does not support
4037          * command HCLGE_OPC_PF_RST_DONE, just print a warning and
4038          * return success
4039          */
4040         if (ret == -EOPNOTSUPP) {
4041                 dev_warn(&hdev->pdev->dev,
4042                          "current firmware does not support command(0x%x)!\n",
4043                          HCLGE_OPC_PF_RST_DONE);
4044                 return 0;
4045         } else if (ret) {
4046                 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
4047                         ret);
4048         }
4049
4050         return ret;
4051 }
4052
4053 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
4054 {
4055         int ret = 0;
4056
4057         switch (hdev->reset_type) {
4058         case HNAE3_FUNC_RESET:
4059         case HNAE3_FLR_RESET:
4060                 ret = hclge_set_all_vf_rst(hdev, false);
4061                 break;
4062         case HNAE3_GLOBAL_RESET:
4063         case HNAE3_IMP_RESET:
4064                 ret = hclge_set_rst_done(hdev);
4065                 break;
4066         default:
4067                 break;
4068         }
4069
4070         /* clear up the handshake status after re-initialize done */
4071         hclge_reset_handshake(hdev, false);
4072
4073         return ret;
4074 }
4075
4076 static int hclge_reset_stack(struct hclge_dev *hdev)
4077 {
4078         int ret;
4079
4080         ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4081         if (ret)
4082                 return ret;
4083
4084         ret = hclge_reset_ae_dev(hdev->ae_dev);
4085         if (ret)
4086                 return ret;
4087
4088         return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4089 }
4090
4091 static int hclge_reset_prepare(struct hclge_dev *hdev)
4092 {
4093         int ret;
4094
4095         hdev->rst_stats.reset_cnt++;
4096         /* perform reset of the stack & ae device for a client */
4097         ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4098         if (ret)
4099                 return ret;
4100
4101         rtnl_lock();
4102         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4103         rtnl_unlock();
4104         if (ret)
4105                 return ret;
4106
4107         return hclge_reset_prepare_wait(hdev);
4108 }
4109
4110 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4111 {
4112         int ret;
4113
4114         hdev->rst_stats.hw_reset_done_cnt++;
4115
4116         ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4117         if (ret)
4118                 return ret;
4119
4120         rtnl_lock();
4121         ret = hclge_reset_stack(hdev);
4122         rtnl_unlock();
4123         if (ret)
4124                 return ret;
4125
4126         hclge_clear_reset_cause(hdev);
4127
4128         ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4129         /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4130          * times
4131          */
4132         if (ret &&
4133             hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4134                 return ret;
4135
4136         ret = hclge_reset_prepare_up(hdev);
4137         if (ret)
4138                 return ret;
4139
4140         rtnl_lock();
4141         ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4142         rtnl_unlock();
4143         if (ret)
4144                 return ret;
4145
4146         ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4147         if (ret)
4148                 return ret;
4149
4150         hdev->last_reset_time = jiffies;
4151         hdev->rst_stats.reset_fail_cnt = 0;
4152         hdev->rst_stats.reset_done_cnt++;
4153         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4154
4155         hclge_update_reset_level(hdev);
4156
4157         return 0;
4158 }
4159
4160 static void hclge_reset(struct hclge_dev *hdev)
4161 {
4162         if (hclge_reset_prepare(hdev))
4163                 goto err_reset;
4164
4165         if (hclge_reset_wait(hdev))
4166                 goto err_reset;
4167
4168         if (hclge_reset_rebuild(hdev))
4169                 goto err_reset;
4170
4171         return;
4172
4173 err_reset:
4174         if (hclge_reset_err_handle(hdev))
4175                 hclge_reset_task_schedule(hdev);
4176 }
4177
4178 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4179 {
4180         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4181         struct hclge_dev *hdev = ae_dev->priv;
4182
4183         /* We might end up getting called broadly because of 2 below cases:
4184          * 1. Recoverable error was conveyed through APEI and only way to bring
4185          *    normalcy is to reset.
4186          * 2. A new reset request from the stack due to timeout
4187          *
4188          * check if this is a new reset request and we are not here just because
4189          * last reset attempt did not succeed and watchdog hit us again. We will
4190          * know this if last reset request did not occur very recently (watchdog
4191          * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4192          * In case of new request we reset the "reset level" to PF reset.
4193          * And if it is a repeat reset request of the most recent one then we
4194          * want to make sure we throttle the reset request. Therefore, we will
4195          * not allow it again before 3*HZ times.
4196          */
4197
4198         if (time_before(jiffies, (hdev->last_reset_time +
4199                                   HCLGE_RESET_INTERVAL))) {
4200                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4201                 return;
4202         }
4203
4204         if (hdev->default_reset_request) {
4205                 hdev->reset_level =
4206                         hclge_get_reset_level(ae_dev,
4207                                               &hdev->default_reset_request);
4208         } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4209                 hdev->reset_level = HNAE3_FUNC_RESET;
4210         }
4211
4212         dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4213                  hdev->reset_level);
4214
4215         /* request reset & schedule reset task */
4216         set_bit(hdev->reset_level, &hdev->reset_request);
4217         hclge_reset_task_schedule(hdev);
4218
4219         if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4220                 hdev->reset_level++;
4221 }
4222
4223 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4224                                         enum hnae3_reset_type rst_type)
4225 {
4226         struct hclge_dev *hdev = ae_dev->priv;
4227
4228         set_bit(rst_type, &hdev->default_reset_request);
4229 }
4230
4231 static void hclge_reset_timer(struct timer_list *t)
4232 {
4233         struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4234
4235         /* if default_reset_request has no value, it means that this reset
4236          * request has already be handled, so just return here
4237          */
4238         if (!hdev->default_reset_request)
4239                 return;
4240
4241         dev_info(&hdev->pdev->dev,
4242                  "triggering reset in reset timer\n");
4243         hclge_reset_event(hdev->pdev, NULL);
4244 }
4245
4246 static void hclge_reset_subtask(struct hclge_dev *hdev)
4247 {
4248         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4249
4250         /* check if there is any ongoing reset in the hardware. This status can
4251          * be checked from reset_pending. If there is then, we need to wait for
4252          * hardware to complete reset.
4253          *    a. If we are able to figure out in reasonable time that hardware
4254          *       has fully resetted then, we can proceed with driver, client
4255          *       reset.
4256          *    b. else, we can come back later to check this status so re-sched
4257          *       now.
4258          */
4259         hdev->last_reset_time = jiffies;
4260         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4261         if (hdev->reset_type != HNAE3_NONE_RESET)
4262                 hclge_reset(hdev);
4263
4264         /* check if we got any *new* reset requests to be honored */
4265         hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4266         if (hdev->reset_type != HNAE3_NONE_RESET)
4267                 hclge_do_reset(hdev);
4268
4269         hdev->reset_type = HNAE3_NONE_RESET;
4270 }
4271
4272 static void hclge_handle_err_reset_request(struct hclge_dev *hdev)
4273 {
4274         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4275         enum hnae3_reset_type reset_type;
4276
4277         if (ae_dev->hw_err_reset_req) {
4278                 reset_type = hclge_get_reset_level(ae_dev,
4279                                                    &ae_dev->hw_err_reset_req);
4280                 hclge_set_def_reset_request(ae_dev, reset_type);
4281         }
4282
4283         if (hdev->default_reset_request && ae_dev->ops->reset_event)
4284                 ae_dev->ops->reset_event(hdev->pdev, NULL);
4285
4286         /* enable interrupt after error handling complete */
4287         hclge_enable_vector(&hdev->misc_vector, true);
4288 }
4289
4290 static void hclge_handle_err_recovery(struct hclge_dev *hdev)
4291 {
4292         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4293
4294         ae_dev->hw_err_reset_req = 0;
4295
4296         if (hclge_find_error_source(hdev)) {
4297                 hclge_handle_error_info_log(ae_dev);
4298                 hclge_handle_mac_tnl(hdev);
4299         }
4300
4301         hclge_handle_err_reset_request(hdev);
4302 }
4303
4304 static void hclge_misc_err_recovery(struct hclge_dev *hdev)
4305 {
4306         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4307         struct device *dev = &hdev->pdev->dev;
4308         u32 msix_sts_reg;
4309
4310         msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
4311         if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
4312                 if (hclge_handle_hw_msix_error
4313                                 (hdev, &hdev->default_reset_request))
4314                         dev_info(dev, "received msix interrupt 0x%x\n",
4315                                  msix_sts_reg);
4316         }
4317
4318         hclge_handle_hw_ras_error(ae_dev);
4319
4320         hclge_handle_err_reset_request(hdev);
4321 }
4322
4323 static void hclge_errhand_service_task(struct hclge_dev *hdev)
4324 {
4325         if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
4326                 return;
4327
4328         if (hnae3_dev_ras_imp_supported(hdev))
4329                 hclge_handle_err_recovery(hdev);
4330         else
4331                 hclge_misc_err_recovery(hdev);
4332 }
4333
4334 static void hclge_reset_service_task(struct hclge_dev *hdev)
4335 {
4336         if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4337                 return;
4338
4339         down(&hdev->reset_sem);
4340         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4341
4342         hclge_reset_subtask(hdev);
4343
4344         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4345         up(&hdev->reset_sem);
4346 }
4347
4348 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4349 {
4350         int i;
4351
4352         /* start from vport 1 for PF is always alive */
4353         for (i = 1; i < hdev->num_alloc_vport; i++) {
4354                 struct hclge_vport *vport = &hdev->vport[i];
4355
4356                 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4357                         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4358
4359                 /* If vf is not alive, set to default value */
4360                 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4361                         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4362         }
4363 }
4364
4365 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4366 {
4367         unsigned long delta = round_jiffies_relative(HZ);
4368
4369         if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4370                 return;
4371
4372         /* Always handle the link updating to make sure link state is
4373          * updated when it is triggered by mbx.
4374          */
4375         hclge_update_link_status(hdev);
4376         hclge_sync_mac_table(hdev);
4377         hclge_sync_promisc_mode(hdev);
4378         hclge_sync_fd_table(hdev);
4379
4380         if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4381                 delta = jiffies - hdev->last_serv_processed;
4382
4383                 if (delta < round_jiffies_relative(HZ)) {
4384                         delta = round_jiffies_relative(HZ) - delta;
4385                         goto out;
4386                 }
4387         }
4388
4389         hdev->serv_processed_cnt++;
4390         hclge_update_vport_alive(hdev);
4391
4392         if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4393                 hdev->last_serv_processed = jiffies;
4394                 goto out;
4395         }
4396
4397         if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4398                 hclge_update_stats_for_all(hdev);
4399
4400         hclge_update_port_info(hdev);
4401         hclge_sync_vlan_filter(hdev);
4402
4403         if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4404                 hclge_rfs_filter_expire(hdev);
4405
4406         hdev->last_serv_processed = jiffies;
4407
4408 out:
4409         hclge_task_schedule(hdev, delta);
4410 }
4411
4412 static void hclge_ptp_service_task(struct hclge_dev *hdev)
4413 {
4414         unsigned long flags;
4415
4416         if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) ||
4417             !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) ||
4418             !time_is_before_jiffies(hdev->ptp->tx_start + HZ))
4419                 return;
4420
4421         /* to prevent concurrence with the irq handler */
4422         spin_lock_irqsave(&hdev->ptp->lock, flags);
4423
4424         /* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq
4425          * handler may handle it just before spin_lock_irqsave().
4426          */
4427         if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state))
4428                 hclge_ptp_clean_tx_hwts(hdev);
4429
4430         spin_unlock_irqrestore(&hdev->ptp->lock, flags);
4431 }
4432
4433 static void hclge_service_task(struct work_struct *work)
4434 {
4435         struct hclge_dev *hdev =
4436                 container_of(work, struct hclge_dev, service_task.work);
4437
4438         hclge_errhand_service_task(hdev);
4439         hclge_reset_service_task(hdev);
4440         hclge_ptp_service_task(hdev);
4441         hclge_mailbox_service_task(hdev);
4442         hclge_periodic_service_task(hdev);
4443
4444         /* Handle error recovery, reset and mbx again in case periodical task
4445          * delays the handling by calling hclge_task_schedule() in
4446          * hclge_periodic_service_task().
4447          */
4448         hclge_errhand_service_task(hdev);
4449         hclge_reset_service_task(hdev);
4450         hclge_mailbox_service_task(hdev);
4451 }
4452
4453 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4454 {
4455         /* VF handle has no client */
4456         if (!handle->client)
4457                 return container_of(handle, struct hclge_vport, nic);
4458         else if (handle->client->type == HNAE3_CLIENT_ROCE)
4459                 return container_of(handle, struct hclge_vport, roce);
4460         else
4461                 return container_of(handle, struct hclge_vport, nic);
4462 }
4463
4464 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4465                                   struct hnae3_vector_info *vector_info)
4466 {
4467 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2  64
4468
4469         vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4470
4471         /* need an extend offset to config vector >= 64 */
4472         if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4473                 vector_info->io_addr = hdev->hw.io_base +
4474                                 HCLGE_VECTOR_REG_BASE +
4475                                 (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4476         else
4477                 vector_info->io_addr = hdev->hw.io_base +
4478                                 HCLGE_VECTOR_EXT_REG_BASE +
4479                                 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4480                                 HCLGE_VECTOR_REG_OFFSET_H +
4481                                 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4482                                 HCLGE_VECTOR_REG_OFFSET;
4483
4484         hdev->vector_status[idx] = hdev->vport[0].vport_id;
4485         hdev->vector_irq[idx] = vector_info->vector;
4486 }
4487
4488 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4489                             struct hnae3_vector_info *vector_info)
4490 {
4491         struct hclge_vport *vport = hclge_get_vport(handle);
4492         struct hnae3_vector_info *vector = vector_info;
4493         struct hclge_dev *hdev = vport->back;
4494         int alloc = 0;
4495         u16 i = 0;
4496         u16 j;
4497
4498         vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4499         vector_num = min(hdev->num_msi_left, vector_num);
4500
4501         for (j = 0; j < vector_num; j++) {
4502                 while (++i < hdev->num_nic_msi) {
4503                         if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4504                                 hclge_get_vector_info(hdev, i, vector);
4505                                 vector++;
4506                                 alloc++;
4507
4508                                 break;
4509                         }
4510                 }
4511         }
4512         hdev->num_msi_left -= alloc;
4513         hdev->num_msi_used += alloc;
4514
4515         return alloc;
4516 }
4517
4518 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4519 {
4520         int i;
4521
4522         for (i = 0; i < hdev->num_msi; i++)
4523                 if (vector == hdev->vector_irq[i])
4524                         return i;
4525
4526         return -EINVAL;
4527 }
4528
4529 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4530 {
4531         struct hclge_vport *vport = hclge_get_vport(handle);
4532         struct hclge_dev *hdev = vport->back;
4533         int vector_id;
4534
4535         vector_id = hclge_get_vector_index(hdev, vector);
4536         if (vector_id < 0) {
4537                 dev_err(&hdev->pdev->dev,
4538                         "Get vector index fail. vector = %d\n", vector);
4539                 return vector_id;
4540         }
4541
4542         hclge_free_vector(hdev, vector_id);
4543
4544         return 0;
4545 }
4546
4547 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4548 {
4549         return HCLGE_RSS_KEY_SIZE;
4550 }
4551
4552 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4553                                   const u8 hfunc, const u8 *key)
4554 {
4555         struct hclge_rss_config_cmd *req;
4556         unsigned int key_offset = 0;
4557         struct hclge_desc desc;
4558         int key_counts;
4559         int key_size;
4560         int ret;
4561
4562         key_counts = HCLGE_RSS_KEY_SIZE;
4563         req = (struct hclge_rss_config_cmd *)desc.data;
4564
4565         while (key_counts) {
4566                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4567                                            false);
4568
4569                 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4570                 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4571
4572                 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4573                 memcpy(req->hash_key,
4574                        key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4575
4576                 key_counts -= key_size;
4577                 key_offset++;
4578                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4579                 if (ret) {
4580                         dev_err(&hdev->pdev->dev,
4581                                 "Configure RSS config fail, status = %d\n",
4582                                 ret);
4583                         return ret;
4584                 }
4585         }
4586         return 0;
4587 }
4588
4589 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4590 {
4591         struct hclge_rss_indirection_table_cmd *req;
4592         struct hclge_desc desc;
4593         int rss_cfg_tbl_num;
4594         u8 rss_msb_oft;
4595         u8 rss_msb_val;
4596         int ret;
4597         u16 qid;
4598         int i;
4599         u32 j;
4600
4601         req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4602         rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4603                           HCLGE_RSS_CFG_TBL_SIZE;
4604
4605         for (i = 0; i < rss_cfg_tbl_num; i++) {
4606                 hclge_cmd_setup_basic_desc
4607                         (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4608
4609                 req->start_table_index =
4610                         cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4611                 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4612                 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4613                         qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4614                         req->rss_qid_l[j] = qid & 0xff;
4615                         rss_msb_oft =
4616                                 j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4617                         rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4618                                 (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4619                         req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4620                 }
4621                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4622                 if (ret) {
4623                         dev_err(&hdev->pdev->dev,
4624                                 "Configure rss indir table fail,status = %d\n",
4625                                 ret);
4626                         return ret;
4627                 }
4628         }
4629         return 0;
4630 }
4631
4632 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4633                                  u16 *tc_size, u16 *tc_offset)
4634 {
4635         struct hclge_rss_tc_mode_cmd *req;
4636         struct hclge_desc desc;
4637         int ret;
4638         int i;
4639
4640         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4641         req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4642
4643         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4644                 u16 mode = 0;
4645
4646                 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4647                 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4648                                 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4649                 hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4650                               tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4651                 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4652                                 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4653
4654                 req->rss_tc_mode[i] = cpu_to_le16(mode);
4655         }
4656
4657         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4658         if (ret)
4659                 dev_err(&hdev->pdev->dev,
4660                         "Configure rss tc mode fail, status = %d\n", ret);
4661
4662         return ret;
4663 }
4664
4665 static void hclge_get_rss_type(struct hclge_vport *vport)
4666 {
4667         if (vport->rss_tuple_sets.ipv4_tcp_en ||
4668             vport->rss_tuple_sets.ipv4_udp_en ||
4669             vport->rss_tuple_sets.ipv4_sctp_en ||
4670             vport->rss_tuple_sets.ipv6_tcp_en ||
4671             vport->rss_tuple_sets.ipv6_udp_en ||
4672             vport->rss_tuple_sets.ipv6_sctp_en)
4673                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4674         else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4675                  vport->rss_tuple_sets.ipv6_fragment_en)
4676                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4677         else
4678                 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4679 }
4680
4681 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4682 {
4683         struct hclge_rss_input_tuple_cmd *req;
4684         struct hclge_desc desc;
4685         int ret;
4686
4687         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4688
4689         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4690
4691         /* Get the tuple cfg from pf */
4692         req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4693         req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4694         req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4695         req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4696         req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4697         req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4698         req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4699         req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4700         hclge_get_rss_type(&hdev->vport[0]);
4701         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4702         if (ret)
4703                 dev_err(&hdev->pdev->dev,
4704                         "Configure rss input fail, status = %d\n", ret);
4705         return ret;
4706 }
4707
4708 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4709                          u8 *key, u8 *hfunc)
4710 {
4711         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4712         struct hclge_vport *vport = hclge_get_vport(handle);
4713         int i;
4714
4715         /* Get hash algorithm */
4716         if (hfunc) {
4717                 switch (vport->rss_algo) {
4718                 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4719                         *hfunc = ETH_RSS_HASH_TOP;
4720                         break;
4721                 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4722                         *hfunc = ETH_RSS_HASH_XOR;
4723                         break;
4724                 default:
4725                         *hfunc = ETH_RSS_HASH_UNKNOWN;
4726                         break;
4727                 }
4728         }
4729
4730         /* Get the RSS Key required by the user */
4731         if (key)
4732                 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4733
4734         /* Get indirect table */
4735         if (indir)
4736                 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4737                         indir[i] =  vport->rss_indirection_tbl[i];
4738
4739         return 0;
4740 }
4741
4742 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4743                          const  u8 *key, const  u8 hfunc)
4744 {
4745         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4746         struct hclge_vport *vport = hclge_get_vport(handle);
4747         struct hclge_dev *hdev = vport->back;
4748         u8 hash_algo;
4749         int ret, i;
4750
4751         /* Set the RSS Hash Key if specififed by the user */
4752         if (key) {
4753                 switch (hfunc) {
4754                 case ETH_RSS_HASH_TOP:
4755                         hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4756                         break;
4757                 case ETH_RSS_HASH_XOR:
4758                         hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4759                         break;
4760                 case ETH_RSS_HASH_NO_CHANGE:
4761                         hash_algo = vport->rss_algo;
4762                         break;
4763                 default:
4764                         return -EINVAL;
4765                 }
4766
4767                 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4768                 if (ret)
4769                         return ret;
4770
4771                 /* Update the shadow RSS key with user specified qids */
4772                 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4773                 vport->rss_algo = hash_algo;
4774         }
4775
4776         /* Update the shadow RSS table with user specified qids */
4777         for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4778                 vport->rss_indirection_tbl[i] = indir[i];
4779
4780         /* Update the hardware */
4781         return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4782 }
4783
4784 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4785 {
4786         u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4787
4788         if (nfc->data & RXH_L4_B_2_3)
4789                 hash_sets |= HCLGE_D_PORT_BIT;
4790         else
4791                 hash_sets &= ~HCLGE_D_PORT_BIT;
4792
4793         if (nfc->data & RXH_IP_SRC)
4794                 hash_sets |= HCLGE_S_IP_BIT;
4795         else
4796                 hash_sets &= ~HCLGE_S_IP_BIT;
4797
4798         if (nfc->data & RXH_IP_DST)
4799                 hash_sets |= HCLGE_D_IP_BIT;
4800         else
4801                 hash_sets &= ~HCLGE_D_IP_BIT;
4802
4803         if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4804                 hash_sets |= HCLGE_V_TAG_BIT;
4805
4806         return hash_sets;
4807 }
4808
4809 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4810                                     struct ethtool_rxnfc *nfc,
4811                                     struct hclge_rss_input_tuple_cmd *req)
4812 {
4813         struct hclge_dev *hdev = vport->back;
4814         u8 tuple_sets;
4815
4816         req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4817         req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4818         req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4819         req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4820         req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4821         req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4822         req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4823         req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4824
4825         tuple_sets = hclge_get_rss_hash_bits(nfc);
4826         switch (nfc->flow_type) {
4827         case TCP_V4_FLOW:
4828                 req->ipv4_tcp_en = tuple_sets;
4829                 break;
4830         case TCP_V6_FLOW:
4831                 req->ipv6_tcp_en = tuple_sets;
4832                 break;
4833         case UDP_V4_FLOW:
4834                 req->ipv4_udp_en = tuple_sets;
4835                 break;
4836         case UDP_V6_FLOW:
4837                 req->ipv6_udp_en = tuple_sets;
4838                 break;
4839         case SCTP_V4_FLOW:
4840                 req->ipv4_sctp_en = tuple_sets;
4841                 break;
4842         case SCTP_V6_FLOW:
4843                 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4844                     (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4845                         return -EINVAL;
4846
4847                 req->ipv6_sctp_en = tuple_sets;
4848                 break;
4849         case IPV4_FLOW:
4850                 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4851                 break;
4852         case IPV6_FLOW:
4853                 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4854                 break;
4855         default:
4856                 return -EINVAL;
4857         }
4858
4859         return 0;
4860 }
4861
4862 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4863                                struct ethtool_rxnfc *nfc)
4864 {
4865         struct hclge_vport *vport = hclge_get_vport(handle);
4866         struct hclge_dev *hdev = vport->back;
4867         struct hclge_rss_input_tuple_cmd *req;
4868         struct hclge_desc desc;
4869         int ret;
4870
4871         if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4872                           RXH_L4_B_0_1 | RXH_L4_B_2_3))
4873                 return -EINVAL;
4874
4875         req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4876         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4877
4878         ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4879         if (ret) {
4880                 dev_err(&hdev->pdev->dev,
4881                         "failed to init rss tuple cmd, ret = %d\n", ret);
4882                 return ret;
4883         }
4884
4885         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4886         if (ret) {
4887                 dev_err(&hdev->pdev->dev,
4888                         "Set rss tuple fail, status = %d\n", ret);
4889                 return ret;
4890         }
4891
4892         vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4893         vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4894         vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4895         vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4896         vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4897         vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4898         vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4899         vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4900         hclge_get_rss_type(vport);
4901         return 0;
4902 }
4903
4904 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4905                                      u8 *tuple_sets)
4906 {
4907         switch (flow_type) {
4908         case TCP_V4_FLOW:
4909                 *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4910                 break;
4911         case UDP_V4_FLOW:
4912                 *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4913                 break;
4914         case TCP_V6_FLOW:
4915                 *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4916                 break;
4917         case UDP_V6_FLOW:
4918                 *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4919                 break;
4920         case SCTP_V4_FLOW:
4921                 *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4922                 break;
4923         case SCTP_V6_FLOW:
4924                 *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4925                 break;
4926         case IPV4_FLOW:
4927         case IPV6_FLOW:
4928                 *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4929                 break;
4930         default:
4931                 return -EINVAL;
4932         }
4933
4934         return 0;
4935 }
4936
4937 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4938 {
4939         u64 tuple_data = 0;
4940
4941         if (tuple_sets & HCLGE_D_PORT_BIT)
4942                 tuple_data |= RXH_L4_B_2_3;
4943         if (tuple_sets & HCLGE_S_PORT_BIT)
4944                 tuple_data |= RXH_L4_B_0_1;
4945         if (tuple_sets & HCLGE_D_IP_BIT)
4946                 tuple_data |= RXH_IP_DST;
4947         if (tuple_sets & HCLGE_S_IP_BIT)
4948                 tuple_data |= RXH_IP_SRC;
4949
4950         return tuple_data;
4951 }
4952
4953 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4954                                struct ethtool_rxnfc *nfc)
4955 {
4956         struct hclge_vport *vport = hclge_get_vport(handle);
4957         u8 tuple_sets;
4958         int ret;
4959
4960         nfc->data = 0;
4961
4962         ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4963         if (ret || !tuple_sets)
4964                 return ret;
4965
4966         nfc->data = hclge_convert_rss_tuple(tuple_sets);
4967
4968         return 0;
4969 }
4970
4971 static int hclge_get_tc_size(struct hnae3_handle *handle)
4972 {
4973         struct hclge_vport *vport = hclge_get_vport(handle);
4974         struct hclge_dev *hdev = vport->back;
4975
4976         return hdev->pf_rss_size_max;
4977 }
4978
4979 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4980 {
4981         struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4982         struct hclge_vport *vport = hdev->vport;
4983         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4984         u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4985         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4986         struct hnae3_tc_info *tc_info;
4987         u16 roundup_size;
4988         u16 rss_size;
4989         int i;
4990
4991         tc_info = &vport->nic.kinfo.tc_info;
4992         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4993                 rss_size = tc_info->tqp_count[i];
4994                 tc_valid[i] = 0;
4995
4996                 if (!(hdev->hw_tc_map & BIT(i)))
4997                         continue;
4998
4999                 /* tc_size set to hardware is the log2 of roundup power of two
5000                  * of rss_size, the acutal queue size is limited by indirection
5001                  * table.
5002                  */
5003                 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
5004                     rss_size == 0) {
5005                         dev_err(&hdev->pdev->dev,
5006                                 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
5007                                 rss_size);
5008                         return -EINVAL;
5009                 }
5010
5011                 roundup_size = roundup_pow_of_two(rss_size);
5012                 roundup_size = ilog2(roundup_size);
5013
5014                 tc_valid[i] = 1;
5015                 tc_size[i] = roundup_size;
5016                 tc_offset[i] = tc_info->tqp_offset[i];
5017         }
5018
5019         return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
5020 }
5021
5022 int hclge_rss_init_hw(struct hclge_dev *hdev)
5023 {
5024         struct hclge_vport *vport = hdev->vport;
5025         u16 *rss_indir = vport[0].rss_indirection_tbl;
5026         u8 *key = vport[0].rss_hash_key;
5027         u8 hfunc = vport[0].rss_algo;
5028         int ret;
5029
5030         ret = hclge_set_rss_indir_table(hdev, rss_indir);
5031         if (ret)
5032                 return ret;
5033
5034         ret = hclge_set_rss_algo_key(hdev, hfunc, key);
5035         if (ret)
5036                 return ret;
5037
5038         ret = hclge_set_rss_input_tuple(hdev);
5039         if (ret)
5040                 return ret;
5041
5042         return hclge_init_rss_tc_mode(hdev);
5043 }
5044
5045 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
5046 {
5047         struct hclge_vport *vport = &hdev->vport[0];
5048         int i;
5049
5050         for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
5051                 vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size;
5052 }
5053
5054 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
5055 {
5056         u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
5057         int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
5058         struct hclge_vport *vport = &hdev->vport[0];
5059         u16 *rss_ind_tbl;
5060
5061         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
5062                 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
5063
5064         vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5065         vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5066         vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
5067         vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5068         vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5069         vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5070         vport->rss_tuple_sets.ipv6_sctp_en =
5071                 hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
5072                 HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
5073                 HCLGE_RSS_INPUT_TUPLE_SCTP;
5074         vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5075
5076         vport->rss_algo = rss_algo;
5077
5078         rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
5079                                    sizeof(*rss_ind_tbl), GFP_KERNEL);
5080         if (!rss_ind_tbl)
5081                 return -ENOMEM;
5082
5083         vport->rss_indirection_tbl = rss_ind_tbl;
5084         memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE);
5085
5086         hclge_rss_indir_init_cfg(hdev);
5087
5088         return 0;
5089 }
5090
5091 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
5092                                 int vector_id, bool en,
5093                                 struct hnae3_ring_chain_node *ring_chain)
5094 {
5095         struct hclge_dev *hdev = vport->back;
5096         struct hnae3_ring_chain_node *node;
5097         struct hclge_desc desc;
5098         struct hclge_ctrl_vector_chain_cmd *req =
5099                 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
5100         enum hclge_cmd_status status;
5101         enum hclge_opcode_type op;
5102         u16 tqp_type_and_id;
5103         int i;
5104
5105         op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
5106         hclge_cmd_setup_basic_desc(&desc, op, false);
5107         req->int_vector_id_l = hnae3_get_field(vector_id,
5108                                                HCLGE_VECTOR_ID_L_M,
5109                                                HCLGE_VECTOR_ID_L_S);
5110         req->int_vector_id_h = hnae3_get_field(vector_id,
5111                                                HCLGE_VECTOR_ID_H_M,
5112                                                HCLGE_VECTOR_ID_H_S);
5113
5114         i = 0;
5115         for (node = ring_chain; node; node = node->next) {
5116                 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
5117                 hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
5118                                 HCLGE_INT_TYPE_S,
5119                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
5120                 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
5121                                 HCLGE_TQP_ID_S, node->tqp_index);
5122                 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
5123                                 HCLGE_INT_GL_IDX_S,
5124                                 hnae3_get_field(node->int_gl_idx,
5125                                                 HNAE3_RING_GL_IDX_M,
5126                                                 HNAE3_RING_GL_IDX_S));
5127                 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
5128                 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
5129                         req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
5130                         req->vfid = vport->vport_id;
5131
5132                         status = hclge_cmd_send(&hdev->hw, &desc, 1);
5133                         if (status) {
5134                                 dev_err(&hdev->pdev->dev,
5135                                         "Map TQP fail, status is %d.\n",
5136                                         status);
5137                                 return -EIO;
5138                         }
5139                         i = 0;
5140
5141                         hclge_cmd_setup_basic_desc(&desc,
5142                                                    op,
5143                                                    false);
5144                         req->int_vector_id_l =
5145                                 hnae3_get_field(vector_id,
5146                                                 HCLGE_VECTOR_ID_L_M,
5147                                                 HCLGE_VECTOR_ID_L_S);
5148                         req->int_vector_id_h =
5149                                 hnae3_get_field(vector_id,
5150                                                 HCLGE_VECTOR_ID_H_M,
5151                                                 HCLGE_VECTOR_ID_H_S);
5152                 }
5153         }
5154
5155         if (i > 0) {
5156                 req->int_cause_num = i;
5157                 req->vfid = vport->vport_id;
5158                 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5159                 if (status) {
5160                         dev_err(&hdev->pdev->dev,
5161                                 "Map TQP fail, status is %d.\n", status);
5162                         return -EIO;
5163                 }
5164         }
5165
5166         return 0;
5167 }
5168
5169 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5170                                     struct hnae3_ring_chain_node *ring_chain)
5171 {
5172         struct hclge_vport *vport = hclge_get_vport(handle);
5173         struct hclge_dev *hdev = vport->back;
5174         int vector_id;
5175
5176         vector_id = hclge_get_vector_index(hdev, vector);
5177         if (vector_id < 0) {
5178                 dev_err(&hdev->pdev->dev,
5179                         "failed to get vector index. vector=%d\n", vector);
5180                 return vector_id;
5181         }
5182
5183         return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5184 }
5185
5186 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5187                                        struct hnae3_ring_chain_node *ring_chain)
5188 {
5189         struct hclge_vport *vport = hclge_get_vport(handle);
5190         struct hclge_dev *hdev = vport->back;
5191         int vector_id, ret;
5192
5193         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5194                 return 0;
5195
5196         vector_id = hclge_get_vector_index(hdev, vector);
5197         if (vector_id < 0) {
5198                 dev_err(&handle->pdev->dev,
5199                         "Get vector index fail. ret =%d\n", vector_id);
5200                 return vector_id;
5201         }
5202
5203         ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5204         if (ret)
5205                 dev_err(&handle->pdev->dev,
5206                         "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5207                         vector_id, ret);
5208
5209         return ret;
5210 }
5211
5212 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5213                                       bool en_uc, bool en_mc, bool en_bc)
5214 {
5215         struct hclge_vport *vport = &hdev->vport[vf_id];
5216         struct hnae3_handle *handle = &vport->nic;
5217         struct hclge_promisc_cfg_cmd *req;
5218         struct hclge_desc desc;
5219         bool uc_tx_en = en_uc;
5220         u8 promisc_cfg = 0;
5221         int ret;
5222
5223         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5224
5225         req = (struct hclge_promisc_cfg_cmd *)desc.data;
5226         req->vf_id = vf_id;
5227
5228         if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5229                 uc_tx_en = false;
5230
5231         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5232         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5233         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5234         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5235         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5236         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5237         req->extend_promisc = promisc_cfg;
5238
5239         /* to be compatible with DEVICE_VERSION_V1/2 */
5240         promisc_cfg = 0;
5241         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5242         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5243         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5244         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5245         hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5246         req->promisc = promisc_cfg;
5247
5248         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5249         if (ret)
5250                 dev_err(&hdev->pdev->dev,
5251                         "failed to set vport %u promisc mode, ret = %d.\n",
5252                         vf_id, ret);
5253
5254         return ret;
5255 }
5256
5257 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5258                                  bool en_mc_pmc, bool en_bc_pmc)
5259 {
5260         return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5261                                           en_uc_pmc, en_mc_pmc, en_bc_pmc);
5262 }
5263
5264 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5265                                   bool en_mc_pmc)
5266 {
5267         struct hclge_vport *vport = hclge_get_vport(handle);
5268         struct hclge_dev *hdev = vport->back;
5269         bool en_bc_pmc = true;
5270
5271         /* For device whose version below V2, if broadcast promisc enabled,
5272          * vlan filter is always bypassed. So broadcast promisc should be
5273          * disabled until user enable promisc mode
5274          */
5275         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5276                 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5277
5278         return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5279                                             en_bc_pmc);
5280 }
5281
5282 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5283 {
5284         struct hclge_vport *vport = hclge_get_vport(handle);
5285
5286         set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
5287 }
5288
5289 static void hclge_sync_fd_state(struct hclge_dev *hdev)
5290 {
5291         if (hlist_empty(&hdev->fd_rule_list))
5292                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5293 }
5294
5295 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
5296 {
5297         if (!test_bit(location, hdev->fd_bmap)) {
5298                 set_bit(location, hdev->fd_bmap);
5299                 hdev->hclge_fd_rule_num++;
5300         }
5301 }
5302
5303 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
5304 {
5305         if (test_bit(location, hdev->fd_bmap)) {
5306                 clear_bit(location, hdev->fd_bmap);
5307                 hdev->hclge_fd_rule_num--;
5308         }
5309 }
5310
5311 static void hclge_fd_free_node(struct hclge_dev *hdev,
5312                                struct hclge_fd_rule *rule)
5313 {
5314         hlist_del(&rule->rule_node);
5315         kfree(rule);
5316         hclge_sync_fd_state(hdev);
5317 }
5318
5319 static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5320                                       struct hclge_fd_rule *old_rule,
5321                                       struct hclge_fd_rule *new_rule,
5322                                       enum HCLGE_FD_NODE_STATE state)
5323 {
5324         switch (state) {
5325         case HCLGE_FD_TO_ADD:
5326         case HCLGE_FD_ACTIVE:
5327                 /* 1) if the new state is TO_ADD, just replace the old rule
5328                  * with the same location, no matter its state, because the
5329                  * new rule will be configured to the hardware.
5330                  * 2) if the new state is ACTIVE, it means the new rule
5331                  * has been configured to the hardware, so just replace
5332                  * the old rule node with the same location.
5333                  * 3) for it doesn't add a new node to the list, so it's
5334                  * unnecessary to update the rule number and fd_bmap.
5335                  */
5336                 new_rule->rule_node.next = old_rule->rule_node.next;
5337                 new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5338                 memcpy(old_rule, new_rule, sizeof(*old_rule));
5339                 kfree(new_rule);
5340                 break;
5341         case HCLGE_FD_DELETED:
5342                 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5343                 hclge_fd_free_node(hdev, old_rule);
5344                 break;
5345         case HCLGE_FD_TO_DEL:
5346                 /* if new request is TO_DEL, and old rule is existent
5347                  * 1) the state of old rule is TO_DEL, we need do nothing,
5348                  * because we delete rule by location, other rule content
5349                  * is unncessary.
5350                  * 2) the state of old rule is ACTIVE, we need to change its
5351                  * state to TO_DEL, so the rule will be deleted when periodic
5352                  * task being scheduled.
5353                  * 3) the state of old rule is TO_ADD, it means the rule hasn't
5354                  * been added to hardware, so we just delete the rule node from
5355                  * fd_rule_list directly.
5356                  */
5357                 if (old_rule->state == HCLGE_FD_TO_ADD) {
5358                         hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5359                         hclge_fd_free_node(hdev, old_rule);
5360                         return;
5361                 }
5362                 old_rule->state = HCLGE_FD_TO_DEL;
5363                 break;
5364         }
5365 }
5366
5367 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5368                                                 u16 location,
5369                                                 struct hclge_fd_rule **parent)
5370 {
5371         struct hclge_fd_rule *rule;
5372         struct hlist_node *node;
5373
5374         hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5375                 if (rule->location == location)
5376                         return rule;
5377                 else if (rule->location > location)
5378                         return NULL;
5379                 /* record the parent node, use to keep the nodes in fd_rule_list
5380                  * in ascend order.
5381                  */
5382                 *parent = rule;
5383         }
5384
5385         return NULL;
5386 }
5387
5388 /* insert fd rule node in ascend order according to rule->location */
5389 static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5390                                       struct hclge_fd_rule *rule,
5391                                       struct hclge_fd_rule *parent)
5392 {
5393         INIT_HLIST_NODE(&rule->rule_node);
5394
5395         if (parent)
5396                 hlist_add_behind(&rule->rule_node, &parent->rule_node);
5397         else
5398                 hlist_add_head(&rule->rule_node, hlist);
5399 }
5400
5401 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5402                                      struct hclge_fd_user_def_cfg *cfg)
5403 {
5404         struct hclge_fd_user_def_cfg_cmd *req;
5405         struct hclge_desc desc;
5406         u16 data = 0;
5407         int ret;
5408
5409         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5410
5411         req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5412
5413         hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5414         hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5415                         HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5416         req->ol2_cfg = cpu_to_le16(data);
5417
5418         data = 0;
5419         hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5420         hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5421                         HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5422         req->ol3_cfg = cpu_to_le16(data);
5423
5424         data = 0;
5425         hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5426         hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5427                         HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5428         req->ol4_cfg = cpu_to_le16(data);
5429
5430         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5431         if (ret)
5432                 dev_err(&hdev->pdev->dev,
5433                         "failed to set fd user def data, ret= %d\n", ret);
5434         return ret;
5435 }
5436
5437 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5438 {
5439         int ret;
5440
5441         if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5442                 return;
5443
5444         if (!locked)
5445                 spin_lock_bh(&hdev->fd_rule_lock);
5446
5447         ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5448         if (ret)
5449                 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5450
5451         if (!locked)
5452                 spin_unlock_bh(&hdev->fd_rule_lock);
5453 }
5454
5455 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5456                                           struct hclge_fd_rule *rule)
5457 {
5458         struct hlist_head *hlist = &hdev->fd_rule_list;
5459         struct hclge_fd_rule *fd_rule, *parent = NULL;
5460         struct hclge_fd_user_def_info *info, *old_info;
5461         struct hclge_fd_user_def_cfg *cfg;
5462
5463         if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5464             rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5465                 return 0;
5466
5467         /* for valid layer is start from 1, so need minus 1 to get the cfg */
5468         cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5469         info = &rule->ep.user_def;
5470
5471         if (!cfg->ref_cnt || cfg->offset == info->offset)
5472                 return 0;
5473
5474         if (cfg->ref_cnt > 1)
5475                 goto error;
5476
5477         fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5478         if (fd_rule) {
5479                 old_info = &fd_rule->ep.user_def;
5480                 if (info->layer == old_info->layer)
5481                         return 0;
5482         }
5483
5484 error:
5485         dev_err(&hdev->pdev->dev,
5486                 "No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5487                 info->layer + 1);
5488         return -ENOSPC;
5489 }
5490
5491 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5492                                          struct hclge_fd_rule *rule)
5493 {
5494         struct hclge_fd_user_def_cfg *cfg;
5495
5496         if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5497             rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5498                 return;
5499
5500         cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5501         if (!cfg->ref_cnt) {
5502                 cfg->offset = rule->ep.user_def.offset;
5503                 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5504         }
5505         cfg->ref_cnt++;
5506 }
5507
5508 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5509                                          struct hclge_fd_rule *rule)
5510 {
5511         struct hclge_fd_user_def_cfg *cfg;
5512
5513         if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5514             rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5515                 return;
5516
5517         cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5518         if (!cfg->ref_cnt)
5519                 return;
5520
5521         cfg->ref_cnt--;
5522         if (!cfg->ref_cnt) {
5523                 cfg->offset = 0;
5524                 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5525         }
5526 }
5527
5528 static void hclge_update_fd_list(struct hclge_dev *hdev,
5529                                  enum HCLGE_FD_NODE_STATE state, u16 location,
5530                                  struct hclge_fd_rule *new_rule)
5531 {
5532         struct hlist_head *hlist = &hdev->fd_rule_list;
5533         struct hclge_fd_rule *fd_rule, *parent = NULL;
5534
5535         fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5536         if (fd_rule) {
5537                 hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5538                 if (state == HCLGE_FD_ACTIVE)
5539                         hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5540                 hclge_sync_fd_user_def_cfg(hdev, true);
5541
5542                 hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5543                 return;
5544         }
5545
5546         /* it's unlikely to fail here, because we have checked the rule
5547          * exist before.
5548          */
5549         if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5550                 dev_warn(&hdev->pdev->dev,
5551                          "failed to delete fd rule %u, it's inexistent\n",
5552                          location);
5553                 return;
5554         }
5555
5556         hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5557         hclge_sync_fd_user_def_cfg(hdev, true);
5558
5559         hclge_fd_insert_rule_node(hlist, new_rule, parent);
5560         hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5561
5562         if (state == HCLGE_FD_TO_ADD) {
5563                 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5564                 hclge_task_schedule(hdev, 0);
5565         }
5566 }
5567
5568 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5569 {
5570         struct hclge_get_fd_mode_cmd *req;
5571         struct hclge_desc desc;
5572         int ret;
5573
5574         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5575
5576         req = (struct hclge_get_fd_mode_cmd *)desc.data;
5577
5578         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5579         if (ret) {
5580                 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5581                 return ret;
5582         }
5583
5584         *fd_mode = req->mode;
5585
5586         return ret;
5587 }
5588
5589 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5590                                    u32 *stage1_entry_num,
5591                                    u32 *stage2_entry_num,
5592                                    u16 *stage1_counter_num,
5593                                    u16 *stage2_counter_num)
5594 {
5595         struct hclge_get_fd_allocation_cmd *req;
5596         struct hclge_desc desc;
5597         int ret;
5598
5599         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5600
5601         req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5602
5603         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5604         if (ret) {
5605                 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5606                         ret);
5607                 return ret;
5608         }
5609
5610         *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5611         *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5612         *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5613         *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5614
5615         return ret;
5616 }
5617
5618 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5619                                    enum HCLGE_FD_STAGE stage_num)
5620 {
5621         struct hclge_set_fd_key_config_cmd *req;
5622         struct hclge_fd_key_cfg *stage;
5623         struct hclge_desc desc;
5624         int ret;
5625
5626         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5627
5628         req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5629         stage = &hdev->fd_cfg.key_cfg[stage_num];
5630         req->stage = stage_num;
5631         req->key_select = stage->key_sel;
5632         req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5633         req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5634         req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5635         req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5636         req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5637         req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5638
5639         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5640         if (ret)
5641                 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5642
5643         return ret;
5644 }
5645
5646 static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5647 {
5648         struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5649
5650         spin_lock_bh(&hdev->fd_rule_lock);
5651         memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5652         spin_unlock_bh(&hdev->fd_rule_lock);
5653
5654         hclge_fd_set_user_def_cmd(hdev, cfg);
5655 }
5656
5657 static int hclge_init_fd_config(struct hclge_dev *hdev)
5658 {
5659 #define LOW_2_WORDS             0x03
5660         struct hclge_fd_key_cfg *key_cfg;
5661         int ret;
5662
5663         if (!hnae3_dev_fd_supported(hdev))
5664                 return 0;
5665
5666         ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5667         if (ret)
5668                 return ret;
5669
5670         switch (hdev->fd_cfg.fd_mode) {
5671         case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5672                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5673                 break;
5674         case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5675                 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5676                 break;
5677         default:
5678                 dev_err(&hdev->pdev->dev,
5679                         "Unsupported flow director mode %u\n",
5680                         hdev->fd_cfg.fd_mode);
5681                 return -EOPNOTSUPP;
5682         }
5683
5684         key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5685         key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5686         key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5687         key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5688         key_cfg->outer_sipv6_word_en = 0;
5689         key_cfg->outer_dipv6_word_en = 0;
5690
5691         key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5692                                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5693                                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5694                                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5695
5696         /* If use max 400bit key, we can support tuples for ether type */
5697         if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5698                 key_cfg->tuple_active |=
5699                                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5700                 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5701                         key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5702         }
5703
5704         /* roce_type is used to filter roce frames
5705          * dst_vport is used to specify the rule
5706          */
5707         key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5708
5709         ret = hclge_get_fd_allocation(hdev,
5710                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5711                                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5712                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5713                                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5714         if (ret)
5715                 return ret;
5716
5717         return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5718 }
5719
5720 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5721                                 int loc, u8 *key, bool is_add)
5722 {
5723         struct hclge_fd_tcam_config_1_cmd *req1;
5724         struct hclge_fd_tcam_config_2_cmd *req2;
5725         struct hclge_fd_tcam_config_3_cmd *req3;
5726         struct hclge_desc desc[3];
5727         int ret;
5728
5729         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5730         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5731         hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5732         desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5733         hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5734
5735         req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5736         req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5737         req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5738
5739         req1->stage = stage;
5740         req1->xy_sel = sel_x ? 1 : 0;
5741         hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5742         req1->index = cpu_to_le32(loc);
5743         req1->entry_vld = sel_x ? is_add : 0;
5744
5745         if (key) {
5746                 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5747                 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5748                        sizeof(req2->tcam_data));
5749                 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5750                        sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5751         }
5752
5753         ret = hclge_cmd_send(&hdev->hw, desc, 3);
5754         if (ret)
5755                 dev_err(&hdev->pdev->dev,
5756                         "config tcam key fail, ret=%d\n",
5757                         ret);
5758
5759         return ret;
5760 }
5761
5762 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5763                               struct hclge_fd_ad_data *action)
5764 {
5765         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5766         struct hclge_fd_ad_config_cmd *req;
5767         struct hclge_desc desc;
5768         u64 ad_data = 0;
5769         int ret;
5770
5771         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5772
5773         req = (struct hclge_fd_ad_config_cmd *)desc.data;
5774         req->index = cpu_to_le32(loc);
5775         req->stage = stage;
5776
5777         hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5778                       action->write_rule_id_to_bd);
5779         hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5780                         action->rule_id);
5781         if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5782                 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5783                               action->override_tc);
5784                 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5785                                 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5786         }
5787         ad_data <<= 32;
5788         hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5789         hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5790                       action->forward_to_direct_queue);
5791         hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5792                         action->queue_id);
5793         hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5794         hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5795                         HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5796         hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5797         hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5798                         action->counter_id);
5799
5800         req->ad_data = cpu_to_le64(ad_data);
5801         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5802         if (ret)
5803                 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5804
5805         return ret;
5806 }
5807
5808 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5809                                    struct hclge_fd_rule *rule)
5810 {
5811         int offset, moffset, ip_offset;
5812         enum HCLGE_FD_KEY_OPT key_opt;
5813         u16 tmp_x_s, tmp_y_s;
5814         u32 tmp_x_l, tmp_y_l;
5815         u8 *p = (u8 *)rule;
5816         int i;
5817
5818         if (rule->unused_tuple & BIT(tuple_bit))
5819                 return true;
5820
5821         key_opt = tuple_key_info[tuple_bit].key_opt;
5822         offset = tuple_key_info[tuple_bit].offset;
5823         moffset = tuple_key_info[tuple_bit].moffset;
5824
5825         switch (key_opt) {
5826         case KEY_OPT_U8:
5827                 calc_x(*key_x, p[offset], p[moffset]);
5828                 calc_y(*key_y, p[offset], p[moffset]);
5829
5830                 return true;
5831         case KEY_OPT_LE16:
5832                 calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5833                 calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5834                 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5835                 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5836
5837                 return true;
5838         case KEY_OPT_LE32:
5839                 calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5840                 calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5841                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5842                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5843
5844                 return true;
5845         case KEY_OPT_MAC:
5846                 for (i = 0; i < ETH_ALEN; i++) {
5847                         calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
5848                                p[moffset + i]);
5849                         calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
5850                                p[moffset + i]);
5851                 }
5852
5853                 return true;
5854         case KEY_OPT_IP:
5855                 ip_offset = IPV4_INDEX * sizeof(u32);
5856                 calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
5857                        *(u32 *)(&p[moffset + ip_offset]));
5858                 calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
5859                        *(u32 *)(&p[moffset + ip_offset]));
5860                 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5861                 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5862
5863                 return true;
5864         default:
5865                 return false;
5866         }
5867 }
5868
5869 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5870                                  u8 vf_id, u8 network_port_id)
5871 {
5872         u32 port_number = 0;
5873
5874         if (port_type == HOST_PORT) {
5875                 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5876                                 pf_id);
5877                 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5878                                 vf_id);
5879                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5880         } else {
5881                 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5882                                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5883                 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5884         }
5885
5886         return port_number;
5887 }
5888
5889 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5890                                        __le32 *key_x, __le32 *key_y,
5891                                        struct hclge_fd_rule *rule)
5892 {
5893         u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5894         u8 cur_pos = 0, tuple_size, shift_bits;
5895         unsigned int i;
5896
5897         for (i = 0; i < MAX_META_DATA; i++) {
5898                 tuple_size = meta_data_key_info[i].key_length;
5899                 tuple_bit = key_cfg->meta_data_active & BIT(i);
5900
5901                 switch (tuple_bit) {
5902                 case BIT(ROCE_TYPE):
5903                         hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5904                         cur_pos += tuple_size;
5905                         break;
5906                 case BIT(DST_VPORT):
5907                         port_number = hclge_get_port_number(HOST_PORT, 0,
5908                                                             rule->vf_id, 0);
5909                         hnae3_set_field(meta_data,
5910                                         GENMASK(cur_pos + tuple_size, cur_pos),
5911                                         cur_pos, port_number);
5912                         cur_pos += tuple_size;
5913                         break;
5914                 default:
5915                         break;
5916                 }
5917         }
5918
5919         calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5920         calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5921         shift_bits = sizeof(meta_data) * 8 - cur_pos;
5922
5923         *key_x = cpu_to_le32(tmp_x << shift_bits);
5924         *key_y = cpu_to_le32(tmp_y << shift_bits);
5925 }
5926
5927 /* A complete key is combined with meta data key and tuple key.
5928  * Meta data key is stored at the MSB region, and tuple key is stored at
5929  * the LSB region, unused bits will be filled 0.
5930  */
5931 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5932                             struct hclge_fd_rule *rule)
5933 {
5934         struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5935         u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5936         u8 *cur_key_x, *cur_key_y;
5937         u8 meta_data_region;
5938         u8 tuple_size;
5939         int ret;
5940         u32 i;
5941
5942         memset(key_x, 0, sizeof(key_x));
5943         memset(key_y, 0, sizeof(key_y));
5944         cur_key_x = key_x;
5945         cur_key_y = key_y;
5946
5947         for (i = 0; i < MAX_TUPLE; i++) {
5948                 bool tuple_valid;
5949
5950                 tuple_size = tuple_key_info[i].key_length / 8;
5951                 if (!(key_cfg->tuple_active & BIT(i)))
5952                         continue;
5953
5954                 tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
5955                                                      cur_key_y, rule);
5956                 if (tuple_valid) {
5957                         cur_key_x += tuple_size;
5958                         cur_key_y += tuple_size;
5959                 }
5960         }
5961
5962         meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5963                         MAX_META_DATA_LENGTH / 8;
5964
5965         hclge_fd_convert_meta_data(key_cfg,
5966                                    (__le32 *)(key_x + meta_data_region),
5967                                    (__le32 *)(key_y + meta_data_region),
5968                                    rule);
5969
5970         ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5971                                    true);
5972         if (ret) {
5973                 dev_err(&hdev->pdev->dev,
5974                         "fd key_y config fail, loc=%u, ret=%d\n",
5975                         rule->queue_id, ret);
5976                 return ret;
5977         }
5978
5979         ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5980                                    true);
5981         if (ret)
5982                 dev_err(&hdev->pdev->dev,
5983                         "fd key_x config fail, loc=%u, ret=%d\n",
5984                         rule->queue_id, ret);
5985         return ret;
5986 }
5987
5988 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5989                                struct hclge_fd_rule *rule)
5990 {
5991         struct hclge_vport *vport = hdev->vport;
5992         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5993         struct hclge_fd_ad_data ad_data;
5994
5995         memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5996         ad_data.ad_id = rule->location;
5997
5998         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5999                 ad_data.drop_packet = true;
6000         } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
6001                 ad_data.override_tc = true;
6002                 ad_data.queue_id =
6003                         kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
6004                 ad_data.tc_size =
6005                         ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
6006         } else {
6007                 ad_data.forward_to_direct_queue = true;
6008                 ad_data.queue_id = rule->queue_id;
6009         }
6010
6011         if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) {
6012                 ad_data.use_counter = true;
6013                 ad_data.counter_id = rule->vf_id %
6014                                      hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1];
6015         } else {
6016                 ad_data.use_counter = false;
6017                 ad_data.counter_id = 0;
6018         }
6019
6020         ad_data.use_next_stage = false;
6021         ad_data.next_input_key = 0;
6022
6023         ad_data.write_rule_id_to_bd = true;
6024         ad_data.rule_id = rule->location;
6025
6026         return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
6027 }
6028
6029 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
6030                                        u32 *unused_tuple)
6031 {
6032         if (!spec || !unused_tuple)
6033                 return -EINVAL;
6034
6035         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6036
6037         if (!spec->ip4src)
6038                 *unused_tuple |= BIT(INNER_SRC_IP);
6039
6040         if (!spec->ip4dst)
6041                 *unused_tuple |= BIT(INNER_DST_IP);
6042
6043         if (!spec->psrc)
6044                 *unused_tuple |= BIT(INNER_SRC_PORT);
6045
6046         if (!spec->pdst)
6047                 *unused_tuple |= BIT(INNER_DST_PORT);
6048
6049         if (!spec->tos)
6050                 *unused_tuple |= BIT(INNER_IP_TOS);
6051
6052         return 0;
6053 }
6054
6055 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
6056                                     u32 *unused_tuple)
6057 {
6058         if (!spec || !unused_tuple)
6059                 return -EINVAL;
6060
6061         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6062                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6063
6064         if (!spec->ip4src)
6065                 *unused_tuple |= BIT(INNER_SRC_IP);
6066
6067         if (!spec->ip4dst)
6068                 *unused_tuple |= BIT(INNER_DST_IP);
6069
6070         if (!spec->tos)
6071                 *unused_tuple |= BIT(INNER_IP_TOS);
6072
6073         if (!spec->proto)
6074                 *unused_tuple |= BIT(INNER_IP_PROTO);
6075
6076         if (spec->l4_4_bytes)
6077                 return -EOPNOTSUPP;
6078
6079         if (spec->ip_ver != ETH_RX_NFC_IP4)
6080                 return -EOPNOTSUPP;
6081
6082         return 0;
6083 }
6084
6085 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
6086                                        u32 *unused_tuple)
6087 {
6088         if (!spec || !unused_tuple)
6089                 return -EINVAL;
6090
6091         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6092
6093         /* check whether src/dst ip address used */
6094         if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6095                 *unused_tuple |= BIT(INNER_SRC_IP);
6096
6097         if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6098                 *unused_tuple |= BIT(INNER_DST_IP);
6099
6100         if (!spec->psrc)
6101                 *unused_tuple |= BIT(INNER_SRC_PORT);
6102
6103         if (!spec->pdst)
6104                 *unused_tuple |= BIT(INNER_DST_PORT);
6105
6106         if (!spec->tclass)
6107                 *unused_tuple |= BIT(INNER_IP_TOS);
6108
6109         return 0;
6110 }
6111
6112 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
6113                                     u32 *unused_tuple)
6114 {
6115         if (!spec || !unused_tuple)
6116                 return -EINVAL;
6117
6118         *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6119                         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6120
6121         /* check whether src/dst ip address used */
6122         if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6123                 *unused_tuple |= BIT(INNER_SRC_IP);
6124
6125         if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6126                 *unused_tuple |= BIT(INNER_DST_IP);
6127
6128         if (!spec->l4_proto)
6129                 *unused_tuple |= BIT(INNER_IP_PROTO);
6130
6131         if (!spec->tclass)
6132                 *unused_tuple |= BIT(INNER_IP_TOS);
6133
6134         if (spec->l4_4_bytes)
6135                 return -EOPNOTSUPP;
6136
6137         return 0;
6138 }
6139
6140 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
6141 {
6142         if (!spec || !unused_tuple)
6143                 return -EINVAL;
6144
6145         *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
6146                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
6147                 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
6148
6149         if (is_zero_ether_addr(spec->h_source))
6150                 *unused_tuple |= BIT(INNER_SRC_MAC);
6151
6152         if (is_zero_ether_addr(spec->h_dest))
6153                 *unused_tuple |= BIT(INNER_DST_MAC);
6154
6155         if (!spec->h_proto)
6156                 *unused_tuple |= BIT(INNER_ETH_TYPE);
6157
6158         return 0;
6159 }
6160
6161 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
6162                                     struct ethtool_rx_flow_spec *fs,
6163                                     u32 *unused_tuple)
6164 {
6165         if (fs->flow_type & FLOW_EXT) {
6166                 if (fs->h_ext.vlan_etype) {
6167                         dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
6168                         return -EOPNOTSUPP;
6169                 }
6170
6171                 if (!fs->h_ext.vlan_tci)
6172                         *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6173
6174                 if (fs->m_ext.vlan_tci &&
6175                     be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
6176                         dev_err(&hdev->pdev->dev,
6177                                 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
6178                                 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
6179                         return -EINVAL;
6180                 }
6181         } else {
6182                 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6183         }
6184
6185         if (fs->flow_type & FLOW_MAC_EXT) {
6186                 if (hdev->fd_cfg.fd_mode !=
6187                     HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6188                         dev_err(&hdev->pdev->dev,
6189                                 "FLOW_MAC_EXT is not supported in current fd mode!\n");
6190                         return -EOPNOTSUPP;
6191                 }
6192
6193                 if (is_zero_ether_addr(fs->h_ext.h_dest))
6194                         *unused_tuple |= BIT(INNER_DST_MAC);
6195                 else
6196                         *unused_tuple &= ~BIT(INNER_DST_MAC);
6197         }
6198
6199         return 0;
6200 }
6201
6202 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
6203                                        struct hclge_fd_user_def_info *info)
6204 {
6205         switch (flow_type) {
6206         case ETHER_FLOW:
6207                 info->layer = HCLGE_FD_USER_DEF_L2;
6208                 *unused_tuple &= ~BIT(INNER_L2_RSV);
6209                 break;
6210         case IP_USER_FLOW:
6211         case IPV6_USER_FLOW:
6212                 info->layer = HCLGE_FD_USER_DEF_L3;
6213                 *unused_tuple &= ~BIT(INNER_L3_RSV);
6214                 break;
6215         case TCP_V4_FLOW:
6216         case UDP_V4_FLOW:
6217         case TCP_V6_FLOW:
6218         case UDP_V6_FLOW:
6219                 info->layer = HCLGE_FD_USER_DEF_L4;
6220                 *unused_tuple &= ~BIT(INNER_L4_RSV);
6221                 break;
6222         default:
6223                 return -EOPNOTSUPP;
6224         }
6225
6226         return 0;
6227 }
6228
6229 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
6230 {
6231         return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
6232 }
6233
6234 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
6235                                          struct ethtool_rx_flow_spec *fs,
6236                                          u32 *unused_tuple,
6237                                          struct hclge_fd_user_def_info *info)
6238 {
6239         u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
6240         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6241         u16 data, offset, data_mask, offset_mask;
6242         int ret;
6243
6244         info->layer = HCLGE_FD_USER_DEF_NONE;
6245         *unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
6246
6247         if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
6248                 return 0;
6249
6250         /* user-def data from ethtool is 64 bit value, the bit0~15 is used
6251          * for data, and bit32~47 is used for offset.
6252          */
6253         data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6254         data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6255         offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6256         offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6257
6258         if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
6259                 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
6260                 return -EOPNOTSUPP;
6261         }
6262
6263         if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
6264                 dev_err(&hdev->pdev->dev,
6265                         "user-def offset[%u] should be no more than %u\n",
6266                         offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
6267                 return -EINVAL;
6268         }
6269
6270         if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
6271                 dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
6272                 return -EINVAL;
6273         }
6274
6275         ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
6276         if (ret) {
6277                 dev_err(&hdev->pdev->dev,
6278                         "unsupported flow type for user-def bytes, ret = %d\n",
6279                         ret);
6280                 return ret;
6281         }
6282
6283         info->data = data;
6284         info->data_mask = data_mask;
6285         info->offset = offset;
6286
6287         return 0;
6288 }
6289
6290 static int hclge_fd_check_spec(struct hclge_dev *hdev,
6291                                struct ethtool_rx_flow_spec *fs,
6292                                u32 *unused_tuple,
6293                                struct hclge_fd_user_def_info *info)
6294 {
6295         u32 flow_type;
6296         int ret;
6297
6298         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6299                 dev_err(&hdev->pdev->dev,
6300                         "failed to config fd rules, invalid rule location: %u, max is %u\n.",
6301                         fs->location,
6302                         hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
6303                 return -EINVAL;
6304         }
6305
6306         ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
6307         if (ret)
6308                 return ret;
6309
6310         flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6311         switch (flow_type) {
6312         case SCTP_V4_FLOW:
6313         case TCP_V4_FLOW:
6314         case UDP_V4_FLOW:
6315                 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
6316                                                   unused_tuple);
6317                 break;
6318         case IP_USER_FLOW:
6319                 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
6320                                                unused_tuple);
6321                 break;
6322         case SCTP_V6_FLOW:
6323         case TCP_V6_FLOW:
6324         case UDP_V6_FLOW:
6325                 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6326                                                   unused_tuple);
6327                 break;
6328         case IPV6_USER_FLOW:
6329                 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6330                                                unused_tuple);
6331                 break;
6332         case ETHER_FLOW:
6333                 if (hdev->fd_cfg.fd_mode !=
6334                         HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6335                         dev_err(&hdev->pdev->dev,
6336                                 "ETHER_FLOW is not supported in current fd mode!\n");
6337                         return -EOPNOTSUPP;
6338                 }
6339
6340                 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6341                                                  unused_tuple);
6342                 break;
6343         default:
6344                 dev_err(&hdev->pdev->dev,
6345                         "unsupported protocol type, protocol type = %#x\n",
6346                         flow_type);
6347                 return -EOPNOTSUPP;
6348         }
6349
6350         if (ret) {
6351                 dev_err(&hdev->pdev->dev,
6352                         "failed to check flow union tuple, ret = %d\n",
6353                         ret);
6354                 return ret;
6355         }
6356
6357         return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6358 }
6359
6360 static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
6361                                       struct ethtool_rx_flow_spec *fs,
6362                                       struct hclge_fd_rule *rule, u8 ip_proto)
6363 {
6364         rule->tuples.src_ip[IPV4_INDEX] =
6365                         be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6366         rule->tuples_mask.src_ip[IPV4_INDEX] =
6367                         be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6368
6369         rule->tuples.dst_ip[IPV4_INDEX] =
6370                         be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6371         rule->tuples_mask.dst_ip[IPV4_INDEX] =
6372                         be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6373
6374         rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6375         rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6376
6377         rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6378         rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6379
6380         rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6381         rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6382
6383         rule->tuples.ether_proto = ETH_P_IP;
6384         rule->tuples_mask.ether_proto = 0xFFFF;
6385
6386         rule->tuples.ip_proto = ip_proto;
6387         rule->tuples_mask.ip_proto = 0xFF;
6388 }
6389
6390 static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
6391                                    struct ethtool_rx_flow_spec *fs,
6392                                    struct hclge_fd_rule *rule)
6393 {
6394         rule->tuples.src_ip[IPV4_INDEX] =
6395                         be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6396         rule->tuples_mask.src_ip[IPV4_INDEX] =
6397                         be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6398
6399         rule->tuples.dst_ip[IPV4_INDEX] =
6400                         be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6401         rule->tuples_mask.dst_ip[IPV4_INDEX] =
6402                         be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6403
6404         rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6405         rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6406
6407         rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6408         rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6409
6410         rule->tuples.ether_proto = ETH_P_IP;
6411         rule->tuples_mask.ether_proto = 0xFFFF;
6412 }
6413
6414 static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
6415                                       struct ethtool_rx_flow_spec *fs,
6416                                       struct hclge_fd_rule *rule, u8 ip_proto)
6417 {
6418         be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
6419                           IPV6_SIZE);
6420         be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6421                           IPV6_SIZE);
6422
6423         be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6424                           IPV6_SIZE);
6425         be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6426                           IPV6_SIZE);
6427
6428         rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6429         rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6430
6431         rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6432         rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6433
6434         rule->tuples.ether_proto = ETH_P_IPV6;
6435         rule->tuples_mask.ether_proto = 0xFFFF;
6436
6437         rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6438         rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6439
6440         rule->tuples.ip_proto = ip_proto;
6441         rule->tuples_mask.ip_proto = 0xFF;
6442 }
6443
6444 static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
6445                                    struct ethtool_rx_flow_spec *fs,
6446                                    struct hclge_fd_rule *rule)
6447 {
6448         be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6449                           IPV6_SIZE);
6450         be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6451                           IPV6_SIZE);
6452
6453         be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6454                           IPV6_SIZE);
6455         be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6456                           IPV6_SIZE);
6457
6458         rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6459         rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6460
6461         rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6462         rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6463
6464         rule->tuples.ether_proto = ETH_P_IPV6;
6465         rule->tuples_mask.ether_proto = 0xFFFF;
6466 }
6467
6468 static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
6469                                      struct ethtool_rx_flow_spec *fs,
6470                                      struct hclge_fd_rule *rule)
6471 {
6472         ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6473         ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6474
6475         ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6476         ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6477
6478         rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6479         rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6480 }
6481
6482 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6483                                         struct hclge_fd_rule *rule)
6484 {
6485         switch (info->layer) {
6486         case HCLGE_FD_USER_DEF_L2:
6487                 rule->tuples.l2_user_def = info->data;
6488                 rule->tuples_mask.l2_user_def = info->data_mask;
6489                 break;
6490         case HCLGE_FD_USER_DEF_L3:
6491                 rule->tuples.l3_user_def = info->data;
6492                 rule->tuples_mask.l3_user_def = info->data_mask;
6493                 break;
6494         case HCLGE_FD_USER_DEF_L4:
6495                 rule->tuples.l4_user_def = (u32)info->data << 16;
6496                 rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6497                 break;
6498         default:
6499                 break;
6500         }
6501
6502         rule->ep.user_def = *info;
6503 }
6504
6505 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
6506                               struct ethtool_rx_flow_spec *fs,
6507                               struct hclge_fd_rule *rule,
6508                               struct hclge_fd_user_def_info *info)
6509 {
6510         u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6511
6512         switch (flow_type) {
6513         case SCTP_V4_FLOW:
6514                 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
6515                 break;
6516         case TCP_V4_FLOW:
6517                 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
6518                 break;
6519         case UDP_V4_FLOW:
6520                 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
6521                 break;
6522         case IP_USER_FLOW:
6523                 hclge_fd_get_ip4_tuple(hdev, fs, rule);
6524                 break;
6525         case SCTP_V6_FLOW:
6526                 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
6527                 break;
6528         case TCP_V6_FLOW:
6529                 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
6530                 break;
6531         case UDP_V6_FLOW:
6532                 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
6533                 break;
6534         case IPV6_USER_FLOW:
6535                 hclge_fd_get_ip6_tuple(hdev, fs, rule);
6536                 break;
6537         case ETHER_FLOW:
6538                 hclge_fd_get_ether_tuple(hdev, fs, rule);
6539                 break;
6540         default:
6541                 return -EOPNOTSUPP;
6542         }
6543
6544         if (fs->flow_type & FLOW_EXT) {
6545                 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6546                 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6547                 hclge_fd_get_user_def_tuple(info, rule);
6548         }
6549
6550         if (fs->flow_type & FLOW_MAC_EXT) {
6551                 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6552                 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6553         }
6554
6555         return 0;
6556 }
6557
6558 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6559                                 struct hclge_fd_rule *rule)
6560 {
6561         int ret;
6562
6563         ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6564         if (ret)
6565                 return ret;
6566
6567         return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6568 }
6569
6570 static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6571                                      struct hclge_fd_rule *rule)
6572 {
6573         int ret;
6574
6575         spin_lock_bh(&hdev->fd_rule_lock);
6576
6577         if (hdev->fd_active_type != rule->rule_type &&
6578             (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6579              hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6580                 dev_err(&hdev->pdev->dev,
6581                         "mode conflict(new type %d, active type %d), please delete existent rules first\n",
6582                         rule->rule_type, hdev->fd_active_type);
6583                 spin_unlock_bh(&hdev->fd_rule_lock);
6584                 return -EINVAL;
6585         }
6586
6587         ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6588         if (ret)
6589                 goto out;
6590
6591         ret = hclge_clear_arfs_rules(hdev);
6592         if (ret)
6593                 goto out;
6594
6595         ret = hclge_fd_config_rule(hdev, rule);
6596         if (ret)
6597                 goto out;
6598
6599         rule->state = HCLGE_FD_ACTIVE;
6600         hdev->fd_active_type = rule->rule_type;
6601         hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6602
6603 out:
6604         spin_unlock_bh(&hdev->fd_rule_lock);
6605         return ret;
6606 }
6607
6608 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6609 {
6610         struct hclge_vport *vport = hclge_get_vport(handle);
6611         struct hclge_dev *hdev = vport->back;
6612
6613         return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6614 }
6615
6616 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6617                                       u16 *vport_id, u8 *action, u16 *queue_id)
6618 {
6619         struct hclge_vport *vport = hdev->vport;
6620
6621         if (ring_cookie == RX_CLS_FLOW_DISC) {
6622                 *action = HCLGE_FD_ACTION_DROP_PACKET;
6623         } else {
6624                 u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6625                 u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6626                 u16 tqps;
6627
6628                 if (vf > hdev->num_req_vfs) {
6629                         dev_err(&hdev->pdev->dev,
6630                                 "Error: vf id (%u) > max vf num (%u)\n",
6631                                 vf, hdev->num_req_vfs);
6632                         return -EINVAL;
6633                 }
6634
6635                 *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6636                 tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6637
6638                 if (ring >= tqps) {
6639                         dev_err(&hdev->pdev->dev,
6640                                 "Error: queue id (%u) > max tqp num (%u)\n",
6641                                 ring, tqps - 1);
6642                         return -EINVAL;
6643                 }
6644
6645                 *action = HCLGE_FD_ACTION_SELECT_QUEUE;
6646                 *queue_id = ring;
6647         }
6648
6649         return 0;
6650 }
6651
6652 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6653                               struct ethtool_rxnfc *cmd)
6654 {
6655         struct hclge_vport *vport = hclge_get_vport(handle);
6656         struct hclge_dev *hdev = vport->back;
6657         struct hclge_fd_user_def_info info;
6658         u16 dst_vport_id = 0, q_index = 0;
6659         struct ethtool_rx_flow_spec *fs;
6660         struct hclge_fd_rule *rule;
6661         u32 unused = 0;
6662         u8 action;
6663         int ret;
6664
6665         if (!hnae3_dev_fd_supported(hdev)) {
6666                 dev_err(&hdev->pdev->dev,
6667                         "flow table director is not supported\n");
6668                 return -EOPNOTSUPP;
6669         }
6670
6671         if (!hdev->fd_en) {
6672                 dev_err(&hdev->pdev->dev,
6673                         "please enable flow director first\n");
6674                 return -EOPNOTSUPP;
6675         }
6676
6677         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6678
6679         ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6680         if (ret)
6681                 return ret;
6682
6683         ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6684                                          &action, &q_index);
6685         if (ret)
6686                 return ret;
6687
6688         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6689         if (!rule)
6690                 return -ENOMEM;
6691
6692         ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
6693         if (ret) {
6694                 kfree(rule);
6695                 return ret;
6696         }
6697
6698         rule->flow_type = fs->flow_type;
6699         rule->location = fs->location;
6700         rule->unused_tuple = unused;
6701         rule->vf_id = dst_vport_id;
6702         rule->queue_id = q_index;
6703         rule->action = action;
6704         rule->rule_type = HCLGE_FD_EP_ACTIVE;
6705
6706         ret = hclge_add_fd_entry_common(hdev, rule);
6707         if (ret)
6708                 kfree(rule);
6709
6710         return ret;
6711 }
6712
6713 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6714                               struct ethtool_rxnfc *cmd)
6715 {
6716         struct hclge_vport *vport = hclge_get_vport(handle);
6717         struct hclge_dev *hdev = vport->back;
6718         struct ethtool_rx_flow_spec *fs;
6719         int ret;
6720
6721         if (!hnae3_dev_fd_supported(hdev))
6722                 return -EOPNOTSUPP;
6723
6724         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6725
6726         if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6727                 return -EINVAL;
6728
6729         spin_lock_bh(&hdev->fd_rule_lock);
6730         if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6731             !test_bit(fs->location, hdev->fd_bmap)) {
6732                 dev_err(&hdev->pdev->dev,
6733                         "Delete fail, rule %u is inexistent\n", fs->location);
6734                 spin_unlock_bh(&hdev->fd_rule_lock);
6735                 return -ENOENT;
6736         }
6737
6738         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6739                                    NULL, false);
6740         if (ret)
6741                 goto out;
6742
6743         hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6744
6745 out:
6746         spin_unlock_bh(&hdev->fd_rule_lock);
6747         return ret;
6748 }
6749
6750 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6751                                          bool clear_list)
6752 {
6753         struct hclge_fd_rule *rule;
6754         struct hlist_node *node;
6755         u16 location;
6756
6757         if (!hnae3_dev_fd_supported(hdev))
6758                 return;
6759
6760         spin_lock_bh(&hdev->fd_rule_lock);
6761
6762         for_each_set_bit(location, hdev->fd_bmap,
6763                          hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6764                 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6765                                      NULL, false);
6766
6767         if (clear_list) {
6768                 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6769                                           rule_node) {
6770                         hlist_del(&rule->rule_node);
6771                         kfree(rule);
6772                 }
6773                 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6774                 hdev->hclge_fd_rule_num = 0;
6775                 bitmap_zero(hdev->fd_bmap,
6776                             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6777         }
6778
6779         spin_unlock_bh(&hdev->fd_rule_lock);
6780 }
6781
6782 static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6783 {
6784         hclge_clear_fd_rules_in_list(hdev, true);
6785         hclge_fd_disable_user_def(hdev);
6786 }
6787
6788 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6789 {
6790         struct hclge_vport *vport = hclge_get_vport(handle);
6791         struct hclge_dev *hdev = vport->back;
6792         struct hclge_fd_rule *rule;
6793         struct hlist_node *node;
6794
6795         /* Return ok here, because reset error handling will check this
6796          * return value. If error is returned here, the reset process will
6797          * fail.
6798          */
6799         if (!hnae3_dev_fd_supported(hdev))
6800                 return 0;
6801
6802         /* if fd is disabled, should not restore it when reset */
6803         if (!hdev->fd_en)
6804                 return 0;
6805
6806         spin_lock_bh(&hdev->fd_rule_lock);
6807         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6808                 if (rule->state == HCLGE_FD_ACTIVE)
6809                         rule->state = HCLGE_FD_TO_ADD;
6810         }
6811         spin_unlock_bh(&hdev->fd_rule_lock);
6812         set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6813
6814         return 0;
6815 }
6816
6817 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6818                                  struct ethtool_rxnfc *cmd)
6819 {
6820         struct hclge_vport *vport = hclge_get_vport(handle);
6821         struct hclge_dev *hdev = vport->back;
6822
6823         if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6824                 return -EOPNOTSUPP;
6825
6826         cmd->rule_cnt = hdev->hclge_fd_rule_num;
6827         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6828
6829         return 0;
6830 }
6831
6832 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6833                                      struct ethtool_tcpip4_spec *spec,
6834                                      struct ethtool_tcpip4_spec *spec_mask)
6835 {
6836         spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6837         spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6838                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6839
6840         spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6841         spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6842                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6843
6844         spec->psrc = cpu_to_be16(rule->tuples.src_port);
6845         spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6846                         0 : cpu_to_be16(rule->tuples_mask.src_port);
6847
6848         spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6849         spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6850                         0 : cpu_to_be16(rule->tuples_mask.dst_port);
6851
6852         spec->tos = rule->tuples.ip_tos;
6853         spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6854                         0 : rule->tuples_mask.ip_tos;
6855 }
6856
6857 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6858                                   struct ethtool_usrip4_spec *spec,
6859                                   struct ethtool_usrip4_spec *spec_mask)
6860 {
6861         spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6862         spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6863                         0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6864
6865         spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6866         spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6867                         0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6868
6869         spec->tos = rule->tuples.ip_tos;
6870         spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6871                         0 : rule->tuples_mask.ip_tos;
6872
6873         spec->proto = rule->tuples.ip_proto;
6874         spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6875                         0 : rule->tuples_mask.ip_proto;
6876
6877         spec->ip_ver = ETH_RX_NFC_IP4;
6878 }
6879
6880 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6881                                      struct ethtool_tcpip6_spec *spec,
6882                                      struct ethtool_tcpip6_spec *spec_mask)
6883 {
6884         cpu_to_be32_array(spec->ip6src,
6885                           rule->tuples.src_ip, IPV6_SIZE);
6886         cpu_to_be32_array(spec->ip6dst,
6887                           rule->tuples.dst_ip, IPV6_SIZE);
6888         if (rule->unused_tuple & BIT(INNER_SRC_IP))
6889                 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6890         else
6891                 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6892                                   IPV6_SIZE);
6893
6894         if (rule->unused_tuple & BIT(INNER_DST_IP))
6895                 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6896         else
6897                 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6898                                   IPV6_SIZE);
6899
6900         spec->tclass = rule->tuples.ip_tos;
6901         spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6902                         0 : rule->tuples_mask.ip_tos;
6903
6904         spec->psrc = cpu_to_be16(rule->tuples.src_port);
6905         spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6906                         0 : cpu_to_be16(rule->tuples_mask.src_port);
6907
6908         spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6909         spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6910                         0 : cpu_to_be16(rule->tuples_mask.dst_port);
6911 }
6912
6913 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6914                                   struct ethtool_usrip6_spec *spec,
6915                                   struct ethtool_usrip6_spec *spec_mask)
6916 {
6917         cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6918         cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6919         if (rule->unused_tuple & BIT(INNER_SRC_IP))
6920                 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6921         else
6922                 cpu_to_be32_array(spec_mask->ip6src,
6923                                   rule->tuples_mask.src_ip, IPV6_SIZE);
6924
6925         if (rule->unused_tuple & BIT(INNER_DST_IP))
6926                 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6927         else
6928                 cpu_to_be32_array(spec_mask->ip6dst,
6929                                   rule->tuples_mask.dst_ip, IPV6_SIZE);
6930
6931         spec->tclass = rule->tuples.ip_tos;
6932         spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6933                         0 : rule->tuples_mask.ip_tos;
6934
6935         spec->l4_proto = rule->tuples.ip_proto;
6936         spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6937                         0 : rule->tuples_mask.ip_proto;
6938 }
6939
6940 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6941                                     struct ethhdr *spec,
6942                                     struct ethhdr *spec_mask)
6943 {
6944         ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6945         ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6946
6947         if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6948                 eth_zero_addr(spec_mask->h_source);
6949         else
6950                 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6951
6952         if (rule->unused_tuple & BIT(INNER_DST_MAC))
6953                 eth_zero_addr(spec_mask->h_dest);
6954         else
6955                 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6956
6957         spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6958         spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6959                         0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6960 }
6961
6962 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
6963                                        struct hclge_fd_rule *rule)
6964 {
6965         if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
6966             HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
6967                 fs->h_ext.data[0] = 0;
6968                 fs->h_ext.data[1] = 0;
6969                 fs->m_ext.data[0] = 0;
6970                 fs->m_ext.data[1] = 0;
6971         } else {
6972                 fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
6973                 fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
6974                 fs->m_ext.data[0] =
6975                                 cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
6976                 fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
6977         }
6978 }
6979
6980 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6981                                   struct hclge_fd_rule *rule)
6982 {
6983         if (fs->flow_type & FLOW_EXT) {
6984                 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6985                 fs->m_ext.vlan_tci =
6986                                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6987                                 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6988
6989                 hclge_fd_get_user_def_info(fs, rule);
6990         }
6991
6992         if (fs->flow_type & FLOW_MAC_EXT) {
6993                 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6994                 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6995                         eth_zero_addr(fs->m_u.ether_spec.h_dest);
6996                 else
6997                         ether_addr_copy(fs->m_u.ether_spec.h_dest,
6998                                         rule->tuples_mask.dst_mac);
6999         }
7000 }
7001
7002 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
7003                                   struct ethtool_rxnfc *cmd)
7004 {
7005         struct hclge_vport *vport = hclge_get_vport(handle);
7006         struct hclge_fd_rule *rule = NULL;
7007         struct hclge_dev *hdev = vport->back;
7008         struct ethtool_rx_flow_spec *fs;
7009         struct hlist_node *node2;
7010
7011         if (!hnae3_dev_fd_supported(hdev))
7012                 return -EOPNOTSUPP;
7013
7014         fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
7015
7016         spin_lock_bh(&hdev->fd_rule_lock);
7017
7018         hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
7019                 if (rule->location >= fs->location)
7020                         break;
7021         }
7022
7023         if (!rule || fs->location != rule->location) {
7024                 spin_unlock_bh(&hdev->fd_rule_lock);
7025
7026                 return -ENOENT;
7027         }
7028
7029         fs->flow_type = rule->flow_type;
7030         switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
7031         case SCTP_V4_FLOW:
7032         case TCP_V4_FLOW:
7033         case UDP_V4_FLOW:
7034                 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
7035                                          &fs->m_u.tcp_ip4_spec);
7036                 break;
7037         case IP_USER_FLOW:
7038                 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
7039                                       &fs->m_u.usr_ip4_spec);
7040                 break;
7041         case SCTP_V6_FLOW:
7042         case TCP_V6_FLOW:
7043         case UDP_V6_FLOW:
7044                 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
7045                                          &fs->m_u.tcp_ip6_spec);
7046                 break;
7047         case IPV6_USER_FLOW:
7048                 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
7049                                       &fs->m_u.usr_ip6_spec);
7050                 break;
7051         /* The flow type of fd rule has been checked before adding in to rule
7052          * list. As other flow types have been handled, it must be ETHER_FLOW
7053          * for the default case
7054          */
7055         default:
7056                 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
7057                                         &fs->m_u.ether_spec);
7058                 break;
7059         }
7060
7061         hclge_fd_get_ext_info(fs, rule);
7062
7063         if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
7064                 fs->ring_cookie = RX_CLS_FLOW_DISC;
7065         } else {
7066                 u64 vf_id;
7067
7068                 fs->ring_cookie = rule->queue_id;
7069                 vf_id = rule->vf_id;
7070                 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
7071                 fs->ring_cookie |= vf_id;
7072         }
7073
7074         spin_unlock_bh(&hdev->fd_rule_lock);
7075
7076         return 0;
7077 }
7078
7079 static int hclge_get_all_rules(struct hnae3_handle *handle,
7080                                struct ethtool_rxnfc *cmd, u32 *rule_locs)
7081 {
7082         struct hclge_vport *vport = hclge_get_vport(handle);
7083         struct hclge_dev *hdev = vport->back;
7084         struct hclge_fd_rule *rule;
7085         struct hlist_node *node2;
7086         int cnt = 0;
7087
7088         if (!hnae3_dev_fd_supported(hdev))
7089                 return -EOPNOTSUPP;
7090
7091         cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
7092
7093         spin_lock_bh(&hdev->fd_rule_lock);
7094         hlist_for_each_entry_safe(rule, node2,
7095                                   &hdev->fd_rule_list, rule_node) {
7096                 if (cnt == cmd->rule_cnt) {
7097                         spin_unlock_bh(&hdev->fd_rule_lock);
7098                         return -EMSGSIZE;
7099                 }
7100
7101                 if (rule->state == HCLGE_FD_TO_DEL)
7102                         continue;
7103
7104                 rule_locs[cnt] = rule->location;
7105                 cnt++;
7106         }
7107
7108         spin_unlock_bh(&hdev->fd_rule_lock);
7109
7110         cmd->rule_cnt = cnt;
7111
7112         return 0;
7113 }
7114
7115 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
7116                                      struct hclge_fd_rule_tuples *tuples)
7117 {
7118 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
7119 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
7120
7121         tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
7122         tuples->ip_proto = fkeys->basic.ip_proto;
7123         tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
7124
7125         if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
7126                 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
7127                 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
7128         } else {
7129                 int i;
7130
7131                 for (i = 0; i < IPV6_SIZE; i++) {
7132                         tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
7133                         tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
7134                 }
7135         }
7136 }
7137
7138 /* traverse all rules, check whether an existed rule has the same tuples */
7139 static struct hclge_fd_rule *
7140 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
7141                           const struct hclge_fd_rule_tuples *tuples)
7142 {
7143         struct hclge_fd_rule *rule = NULL;
7144         struct hlist_node *node;
7145
7146         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7147                 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
7148                         return rule;
7149         }
7150
7151         return NULL;
7152 }
7153
7154 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
7155                                      struct hclge_fd_rule *rule)
7156 {
7157         rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
7158                              BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
7159                              BIT(INNER_SRC_PORT);
7160         rule->action = 0;
7161         rule->vf_id = 0;
7162         rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
7163         rule->state = HCLGE_FD_TO_ADD;
7164         if (tuples->ether_proto == ETH_P_IP) {
7165                 if (tuples->ip_proto == IPPROTO_TCP)
7166                         rule->flow_type = TCP_V4_FLOW;
7167                 else
7168                         rule->flow_type = UDP_V4_FLOW;
7169         } else {
7170                 if (tuples->ip_proto == IPPROTO_TCP)
7171                         rule->flow_type = TCP_V6_FLOW;
7172                 else
7173                         rule->flow_type = UDP_V6_FLOW;
7174         }
7175         memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
7176         memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
7177 }
7178
7179 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
7180                                       u16 flow_id, struct flow_keys *fkeys)
7181 {
7182         struct hclge_vport *vport = hclge_get_vport(handle);
7183         struct hclge_fd_rule_tuples new_tuples = {};
7184         struct hclge_dev *hdev = vport->back;
7185         struct hclge_fd_rule *rule;
7186         u16 bit_id;
7187
7188         if (!hnae3_dev_fd_supported(hdev))
7189                 return -EOPNOTSUPP;
7190
7191         /* when there is already fd rule existed add by user,
7192          * arfs should not work
7193          */
7194         spin_lock_bh(&hdev->fd_rule_lock);
7195         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
7196             hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
7197                 spin_unlock_bh(&hdev->fd_rule_lock);
7198                 return -EOPNOTSUPP;
7199         }
7200
7201         hclge_fd_get_flow_tuples(fkeys, &new_tuples);
7202
7203         /* check is there flow director filter existed for this flow,
7204          * if not, create a new filter for it;
7205          * if filter exist with different queue id, modify the filter;
7206          * if filter exist with same queue id, do nothing
7207          */
7208         rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7209         if (!rule) {
7210                 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
7211                 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7212                         spin_unlock_bh(&hdev->fd_rule_lock);
7213                         return -ENOSPC;
7214                 }
7215
7216                 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
7217                 if (!rule) {
7218                         spin_unlock_bh(&hdev->fd_rule_lock);
7219                         return -ENOMEM;
7220                 }
7221
7222                 rule->location = bit_id;
7223                 rule->arfs.flow_id = flow_id;
7224                 rule->queue_id = queue_id;
7225                 hclge_fd_build_arfs_rule(&new_tuples, rule);
7226                 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
7227                 hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
7228         } else if (rule->queue_id != queue_id) {
7229                 rule->queue_id = queue_id;
7230                 rule->state = HCLGE_FD_TO_ADD;
7231                 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7232                 hclge_task_schedule(hdev, 0);
7233         }
7234         spin_unlock_bh(&hdev->fd_rule_lock);
7235         return rule->location;
7236 }
7237
7238 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
7239 {
7240 #ifdef CONFIG_RFS_ACCEL
7241         struct hnae3_handle *handle = &hdev->vport[0].nic;
7242         struct hclge_fd_rule *rule;
7243         struct hlist_node *node;
7244
7245         spin_lock_bh(&hdev->fd_rule_lock);
7246         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
7247                 spin_unlock_bh(&hdev->fd_rule_lock);
7248                 return;
7249         }
7250         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7251                 if (rule->state != HCLGE_FD_ACTIVE)
7252                         continue;
7253                 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
7254                                         rule->arfs.flow_id, rule->location)) {
7255                         rule->state = HCLGE_FD_TO_DEL;
7256                         set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7257                 }
7258         }
7259         spin_unlock_bh(&hdev->fd_rule_lock);
7260 #endif
7261 }
7262
7263 /* make sure being called after lock up with fd_rule_lock */
7264 static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
7265 {
7266 #ifdef CONFIG_RFS_ACCEL
7267         struct hclge_fd_rule *rule;
7268         struct hlist_node *node;
7269         int ret;
7270
7271         if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
7272                 return 0;
7273
7274         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7275                 switch (rule->state) {
7276                 case HCLGE_FD_TO_DEL:
7277                 case HCLGE_FD_ACTIVE:
7278                         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7279                                                    rule->location, NULL, false);
7280                         if (ret)
7281                                 return ret;
7282                         fallthrough;
7283                 case HCLGE_FD_TO_ADD:
7284                         hclge_fd_dec_rule_cnt(hdev, rule->location);
7285                         hlist_del(&rule->rule_node);
7286                         kfree(rule);
7287                         break;
7288                 default:
7289                         break;
7290                 }
7291         }
7292         hclge_sync_fd_state(hdev);
7293
7294 #endif
7295         return 0;
7296 }
7297
7298 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
7299                                     struct hclge_fd_rule *rule)
7300 {
7301         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
7302                 struct flow_match_basic match;
7303                 u16 ethtype_key, ethtype_mask;
7304
7305                 flow_rule_match_basic(flow, &match);
7306                 ethtype_key = ntohs(match.key->n_proto);
7307                 ethtype_mask = ntohs(match.mask->n_proto);
7308
7309                 if (ethtype_key == ETH_P_ALL) {
7310                         ethtype_key = 0;
7311                         ethtype_mask = 0;
7312                 }
7313                 rule->tuples.ether_proto = ethtype_key;
7314                 rule->tuples_mask.ether_proto = ethtype_mask;
7315                 rule->tuples.ip_proto = match.key->ip_proto;
7316                 rule->tuples_mask.ip_proto = match.mask->ip_proto;
7317         } else {
7318                 rule->unused_tuple |= BIT(INNER_IP_PROTO);
7319                 rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7320         }
7321 }
7322
7323 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7324                                   struct hclge_fd_rule *rule)
7325 {
7326         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7327                 struct flow_match_eth_addrs match;
7328
7329                 flow_rule_match_eth_addrs(flow, &match);
7330                 ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7331                 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7332                 ether_addr_copy(rule->tuples.src_mac, match.key->src);
7333                 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7334         } else {
7335                 rule->unused_tuple |= BIT(INNER_DST_MAC);
7336                 rule->unused_tuple |= BIT(INNER_SRC_MAC);
7337         }
7338 }
7339
7340 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7341                                    struct hclge_fd_rule *rule)
7342 {
7343         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7344                 struct flow_match_vlan match;
7345
7346                 flow_rule_match_vlan(flow, &match);
7347                 rule->tuples.vlan_tag1 = match.key->vlan_id |
7348                                 (match.key->vlan_priority << VLAN_PRIO_SHIFT);
7349                 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7350                                 (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7351         } else {
7352                 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7353         }
7354 }
7355
7356 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
7357                                  struct hclge_fd_rule *rule)
7358 {
7359         u16 addr_type = 0;
7360
7361         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7362                 struct flow_match_control match;
7363
7364                 flow_rule_match_control(flow, &match);
7365                 addr_type = match.key->addr_type;
7366         }
7367
7368         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7369                 struct flow_match_ipv4_addrs match;
7370
7371                 flow_rule_match_ipv4_addrs(flow, &match);
7372                 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7373                 rule->tuples_mask.src_ip[IPV4_INDEX] =
7374                                                 be32_to_cpu(match.mask->src);
7375                 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7376                 rule->tuples_mask.dst_ip[IPV4_INDEX] =
7377                                                 be32_to_cpu(match.mask->dst);
7378         } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7379                 struct flow_match_ipv6_addrs match;
7380
7381                 flow_rule_match_ipv6_addrs(flow, &match);
7382                 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
7383                                   IPV6_SIZE);
7384                 be32_to_cpu_array(rule->tuples_mask.src_ip,
7385                                   match.mask->src.s6_addr32, IPV6_SIZE);
7386                 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
7387                                   IPV6_SIZE);
7388                 be32_to_cpu_array(rule->tuples_mask.dst_ip,
7389                                   match.mask->dst.s6_addr32, IPV6_SIZE);
7390         } else {
7391                 rule->unused_tuple |= BIT(INNER_SRC_IP);
7392                 rule->unused_tuple |= BIT(INNER_DST_IP);
7393         }
7394 }
7395
7396 static void hclge_get_cls_key_port(const struct flow_rule *flow,
7397                                    struct hclge_fd_rule *rule)
7398 {
7399         if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7400                 struct flow_match_ports match;
7401
7402                 flow_rule_match_ports(flow, &match);
7403
7404                 rule->tuples.src_port = be16_to_cpu(match.key->src);
7405                 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7406                 rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7407                 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7408         } else {
7409                 rule->unused_tuple |= BIT(INNER_SRC_PORT);
7410                 rule->unused_tuple |= BIT(INNER_DST_PORT);
7411         }
7412 }
7413
7414 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7415                                   struct flow_cls_offload *cls_flower,
7416                                   struct hclge_fd_rule *rule)
7417 {
7418         struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7419         struct flow_dissector *dissector = flow->match.dissector;
7420
7421         if (dissector->used_keys &
7422             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7423               BIT(FLOW_DISSECTOR_KEY_BASIC) |
7424               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7425               BIT(FLOW_DISSECTOR_KEY_VLAN) |
7426               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7427               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7428               BIT(FLOW_DISSECTOR_KEY_PORTS))) {
7429                 dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
7430                         dissector->used_keys);
7431                 return -EOPNOTSUPP;
7432         }
7433
7434         hclge_get_cls_key_basic(flow, rule);
7435         hclge_get_cls_key_mac(flow, rule);
7436         hclge_get_cls_key_vlan(flow, rule);
7437         hclge_get_cls_key_ip(flow, rule);
7438         hclge_get_cls_key_port(flow, rule);
7439
7440         return 0;
7441 }
7442
7443 static int hclge_check_cls_flower(struct hclge_dev *hdev,
7444                                   struct flow_cls_offload *cls_flower, int tc)
7445 {
7446         u32 prio = cls_flower->common.prio;
7447
7448         if (tc < 0 || tc > hdev->tc_max) {
7449                 dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7450                 return -EINVAL;
7451         }
7452
7453         if (prio == 0 ||
7454             prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7455                 dev_err(&hdev->pdev->dev,
7456                         "prio %u should be in range[1, %u]\n",
7457                         prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7458                 return -EINVAL;
7459         }
7460
7461         if (test_bit(prio - 1, hdev->fd_bmap)) {
7462                 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7463                 return -EINVAL;
7464         }
7465         return 0;
7466 }
7467
7468 static int hclge_add_cls_flower(struct hnae3_handle *handle,
7469                                 struct flow_cls_offload *cls_flower,
7470                                 int tc)
7471 {
7472         struct hclge_vport *vport = hclge_get_vport(handle);
7473         struct hclge_dev *hdev = vport->back;
7474         struct hclge_fd_rule *rule;
7475         int ret;
7476
7477         ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7478         if (ret) {
7479                 dev_err(&hdev->pdev->dev,
7480                         "failed to check cls flower params, ret = %d\n", ret);
7481                 return ret;
7482         }
7483
7484         rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7485         if (!rule)
7486                 return -ENOMEM;
7487
7488         ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7489         if (ret) {
7490                 kfree(rule);
7491                 return ret;
7492         }
7493
7494         rule->action = HCLGE_FD_ACTION_SELECT_TC;
7495         rule->cls_flower.tc = tc;
7496         rule->location = cls_flower->common.prio - 1;
7497         rule->vf_id = 0;
7498         rule->cls_flower.cookie = cls_flower->cookie;
7499         rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7500
7501         ret = hclge_add_fd_entry_common(hdev, rule);
7502         if (ret)
7503                 kfree(rule);
7504
7505         return ret;
7506 }
7507
7508 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7509                                                    unsigned long cookie)
7510 {
7511         struct hclge_fd_rule *rule;
7512         struct hlist_node *node;
7513
7514         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7515                 if (rule->cls_flower.cookie == cookie)
7516                         return rule;
7517         }
7518
7519         return NULL;
7520 }
7521
7522 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7523                                 struct flow_cls_offload *cls_flower)
7524 {
7525         struct hclge_vport *vport = hclge_get_vport(handle);
7526         struct hclge_dev *hdev = vport->back;
7527         struct hclge_fd_rule *rule;
7528         int ret;
7529
7530         spin_lock_bh(&hdev->fd_rule_lock);
7531
7532         rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7533         if (!rule) {
7534                 spin_unlock_bh(&hdev->fd_rule_lock);
7535                 return -EINVAL;
7536         }
7537
7538         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7539                                    NULL, false);
7540         if (ret) {
7541                 spin_unlock_bh(&hdev->fd_rule_lock);
7542                 return ret;
7543         }
7544
7545         hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7546         spin_unlock_bh(&hdev->fd_rule_lock);
7547
7548         return 0;
7549 }
7550
7551 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7552 {
7553         struct hclge_fd_rule *rule;
7554         struct hlist_node *node;
7555         int ret = 0;
7556
7557         if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7558                 return;
7559
7560         spin_lock_bh(&hdev->fd_rule_lock);
7561
7562         hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7563                 switch (rule->state) {
7564                 case HCLGE_FD_TO_ADD:
7565                         ret = hclge_fd_config_rule(hdev, rule);
7566                         if (ret)
7567                                 goto out;
7568                         rule->state = HCLGE_FD_ACTIVE;
7569                         break;
7570                 case HCLGE_FD_TO_DEL:
7571                         ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7572                                                    rule->location, NULL, false);
7573                         if (ret)
7574                                 goto out;
7575                         hclge_fd_dec_rule_cnt(hdev, rule->location);
7576                         hclge_fd_free_node(hdev, rule);
7577                         break;
7578                 default:
7579                         break;
7580                 }
7581         }
7582
7583 out:
7584         if (ret)
7585                 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7586
7587         spin_unlock_bh(&hdev->fd_rule_lock);
7588 }
7589
7590 static void hclge_sync_fd_table(struct hclge_dev *hdev)
7591 {
7592         if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7593                 bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7594
7595                 hclge_clear_fd_rules_in_list(hdev, clear_list);
7596         }
7597
7598         hclge_sync_fd_user_def_cfg(hdev, false);
7599
7600         hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7601 }
7602
7603 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7604 {
7605         struct hclge_vport *vport = hclge_get_vport(handle);
7606         struct hclge_dev *hdev = vport->back;
7607
7608         return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7609                hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7610 }
7611
7612 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7613 {
7614         struct hclge_vport *vport = hclge_get_vport(handle);
7615         struct hclge_dev *hdev = vport->back;
7616
7617         return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7618 }
7619
7620 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7621 {
7622         struct hclge_vport *vport = hclge_get_vport(handle);
7623         struct hclge_dev *hdev = vport->back;
7624
7625         return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7626 }
7627
7628 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7629 {
7630         struct hclge_vport *vport = hclge_get_vport(handle);
7631         struct hclge_dev *hdev = vport->back;
7632
7633         return hdev->rst_stats.hw_reset_done_cnt;
7634 }
7635
7636 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7637 {
7638         struct hclge_vport *vport = hclge_get_vport(handle);
7639         struct hclge_dev *hdev = vport->back;
7640
7641         hdev->fd_en = enable;
7642
7643         if (!enable)
7644                 set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7645         else
7646                 hclge_restore_fd_entries(handle);
7647
7648         hclge_task_schedule(hdev, 0);
7649 }
7650
7651 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7652 {
7653         struct hclge_desc desc;
7654         struct hclge_config_mac_mode_cmd *req =
7655                 (struct hclge_config_mac_mode_cmd *)desc.data;
7656         u32 loop_en = 0;
7657         int ret;
7658
7659         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7660
7661         if (enable) {
7662                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7663                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7664                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7665                 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7666                 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7667                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7668                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7669                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7670                 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7671                 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7672         }
7673
7674         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7675
7676         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7677         if (ret)
7678                 dev_err(&hdev->pdev->dev,
7679                         "mac enable fail, ret =%d.\n", ret);
7680 }
7681
7682 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7683                                      u8 switch_param, u8 param_mask)
7684 {
7685         struct hclge_mac_vlan_switch_cmd *req;
7686         struct hclge_desc desc;
7687         u32 func_id;
7688         int ret;
7689
7690         func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7691         req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7692
7693         /* read current config parameter */
7694         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7695                                    true);
7696         req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7697         req->func_id = cpu_to_le32(func_id);
7698
7699         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7700         if (ret) {
7701                 dev_err(&hdev->pdev->dev,
7702                         "read mac vlan switch parameter fail, ret = %d\n", ret);
7703                 return ret;
7704         }
7705
7706         /* modify and write new config parameter */
7707         hclge_cmd_reuse_desc(&desc, false);
7708         req->switch_param = (req->switch_param & param_mask) | switch_param;
7709         req->param_mask = param_mask;
7710
7711         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7712         if (ret)
7713                 dev_err(&hdev->pdev->dev,
7714                         "set mac vlan switch parameter fail, ret = %d\n", ret);
7715         return ret;
7716 }
7717
7718 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7719                                        int link_ret)
7720 {
7721 #define HCLGE_PHY_LINK_STATUS_NUM  200
7722
7723         struct phy_device *phydev = hdev->hw.mac.phydev;
7724         int i = 0;
7725         int ret;
7726
7727         do {
7728                 ret = phy_read_status(phydev);
7729                 if (ret) {
7730                         dev_err(&hdev->pdev->dev,
7731                                 "phy update link status fail, ret = %d\n", ret);
7732                         return;
7733                 }
7734
7735                 if (phydev->link == link_ret)
7736                         break;
7737
7738                 msleep(HCLGE_LINK_STATUS_MS);
7739         } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7740 }
7741
7742 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7743 {
7744 #define HCLGE_MAC_LINK_STATUS_NUM  100
7745
7746         int link_status;
7747         int i = 0;
7748         int ret;
7749
7750         do {
7751                 ret = hclge_get_mac_link_status(hdev, &link_status);
7752                 if (ret)
7753                         return ret;
7754                 if (link_status == link_ret)
7755                         return 0;
7756
7757                 msleep(HCLGE_LINK_STATUS_MS);
7758         } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7759         return -EBUSY;
7760 }
7761
7762 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7763                                           bool is_phy)
7764 {
7765         int link_ret;
7766
7767         link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7768
7769         if (is_phy)
7770                 hclge_phy_link_status_wait(hdev, link_ret);
7771
7772         return hclge_mac_link_status_wait(hdev, link_ret);
7773 }
7774
7775 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7776 {
7777         struct hclge_config_mac_mode_cmd *req;
7778         struct hclge_desc desc;
7779         u32 loop_en;
7780         int ret;
7781
7782         req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7783         /* 1 Read out the MAC mode config at first */
7784         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7785         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7786         if (ret) {
7787                 dev_err(&hdev->pdev->dev,
7788                         "mac loopback get fail, ret =%d.\n", ret);
7789                 return ret;
7790         }
7791
7792         /* 2 Then setup the loopback flag */
7793         loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7794         hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7795
7796         req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7797
7798         /* 3 Config mac work mode with loopback flag
7799          * and its original configure parameters
7800          */
7801         hclge_cmd_reuse_desc(&desc, false);
7802         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7803         if (ret)
7804                 dev_err(&hdev->pdev->dev,
7805                         "mac loopback set fail, ret =%d.\n", ret);
7806         return ret;
7807 }
7808
7809 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7810                                      enum hnae3_loop loop_mode)
7811 {
7812 #define HCLGE_COMMON_LB_RETRY_MS        10
7813 #define HCLGE_COMMON_LB_RETRY_NUM       100
7814
7815         struct hclge_common_lb_cmd *req;
7816         struct hclge_desc desc;
7817         int ret, i = 0;
7818         u8 loop_mode_b;
7819
7820         req = (struct hclge_common_lb_cmd *)desc.data;
7821         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7822
7823         switch (loop_mode) {
7824         case HNAE3_LOOP_SERIAL_SERDES:
7825                 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7826                 break;
7827         case HNAE3_LOOP_PARALLEL_SERDES:
7828                 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7829                 break;
7830         case HNAE3_LOOP_PHY:
7831                 loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7832                 break;
7833         default:
7834                 dev_err(&hdev->pdev->dev,
7835                         "unsupported common loopback mode %d\n", loop_mode);
7836                 return -ENOTSUPP;
7837         }
7838
7839         if (en) {
7840                 req->enable = loop_mode_b;
7841                 req->mask = loop_mode_b;
7842         } else {
7843                 req->mask = loop_mode_b;
7844         }
7845
7846         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7847         if (ret) {
7848                 dev_err(&hdev->pdev->dev,
7849                         "common loopback set fail, ret = %d\n", ret);
7850                 return ret;
7851         }
7852
7853         do {
7854                 msleep(HCLGE_COMMON_LB_RETRY_MS);
7855                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7856                                            true);
7857                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7858                 if (ret) {
7859                         dev_err(&hdev->pdev->dev,
7860                                 "common loopback get, ret = %d\n", ret);
7861                         return ret;
7862                 }
7863         } while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7864                  !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7865
7866         if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7867                 dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
7868                 return -EBUSY;
7869         } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7870                 dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
7871                 return -EIO;
7872         }
7873         return ret;
7874 }
7875
7876 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7877                                      enum hnae3_loop loop_mode)
7878 {
7879         int ret;
7880
7881         ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7882         if (ret)
7883                 return ret;
7884
7885         hclge_cfg_mac_mode(hdev, en);
7886
7887         ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7888         if (ret)
7889                 dev_err(&hdev->pdev->dev,
7890                         "serdes loopback config mac mode timeout\n");
7891
7892         return ret;
7893 }
7894
7895 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7896                                      struct phy_device *phydev)
7897 {
7898         int ret;
7899
7900         if (!phydev->suspended) {
7901                 ret = phy_suspend(phydev);
7902                 if (ret)
7903                         return ret;
7904         }
7905
7906         ret = phy_resume(phydev);
7907         if (ret)
7908                 return ret;
7909
7910         return phy_loopback(phydev, true);
7911 }
7912
7913 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7914                                       struct phy_device *phydev)
7915 {
7916         int ret;
7917
7918         ret = phy_loopback(phydev, false);
7919         if (ret)
7920                 return ret;
7921
7922         return phy_suspend(phydev);
7923 }
7924
7925 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7926 {
7927         struct phy_device *phydev = hdev->hw.mac.phydev;
7928         int ret;
7929
7930         if (!phydev) {
7931                 if (hnae3_dev_phy_imp_supported(hdev))
7932                         return hclge_set_common_loopback(hdev, en,
7933                                                          HNAE3_LOOP_PHY);
7934                 return -ENOTSUPP;
7935         }
7936
7937         if (en)
7938                 ret = hclge_enable_phy_loopback(hdev, phydev);
7939         else
7940                 ret = hclge_disable_phy_loopback(hdev, phydev);
7941         if (ret) {
7942                 dev_err(&hdev->pdev->dev,
7943                         "set phy loopback fail, ret = %d\n", ret);
7944                 return ret;
7945         }
7946
7947         hclge_cfg_mac_mode(hdev, en);
7948
7949         ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7950         if (ret)
7951                 dev_err(&hdev->pdev->dev,
7952                         "phy loopback config mac mode timeout\n");
7953
7954         return ret;
7955 }
7956
7957 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
7958                                      u16 stream_id, bool enable)
7959 {
7960         struct hclge_desc desc;
7961         struct hclge_cfg_com_tqp_queue_cmd *req =
7962                 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7963
7964         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7965         req->tqp_id = cpu_to_le16(tqp_id);
7966         req->stream_id = cpu_to_le16(stream_id);
7967         if (enable)
7968                 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7969
7970         return hclge_cmd_send(&hdev->hw, &desc, 1);
7971 }
7972
7973 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
7974 {
7975         struct hclge_vport *vport = hclge_get_vport(handle);
7976         struct hclge_dev *hdev = vport->back;
7977         int ret;
7978         u16 i;
7979
7980         for (i = 0; i < handle->kinfo.num_tqps; i++) {
7981                 ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
7982                 if (ret)
7983                         return ret;
7984         }
7985         return 0;
7986 }
7987
7988 static int hclge_set_loopback(struct hnae3_handle *handle,
7989                               enum hnae3_loop loop_mode, bool en)
7990 {
7991         struct hclge_vport *vport = hclge_get_vport(handle);
7992         struct hclge_dev *hdev = vport->back;
7993         int ret;
7994
7995         /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7996          * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7997          * the same, the packets are looped back in the SSU. If SSU loopback
7998          * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7999          */
8000         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8001                 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
8002
8003                 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
8004                                                 HCLGE_SWITCH_ALW_LPBK_MASK);
8005                 if (ret)
8006                         return ret;
8007         }
8008
8009         switch (loop_mode) {
8010         case HNAE3_LOOP_APP:
8011                 ret = hclge_set_app_loopback(hdev, en);
8012                 break;
8013         case HNAE3_LOOP_SERIAL_SERDES:
8014         case HNAE3_LOOP_PARALLEL_SERDES:
8015                 ret = hclge_set_common_loopback(hdev, en, loop_mode);
8016                 break;
8017         case HNAE3_LOOP_PHY:
8018                 ret = hclge_set_phy_loopback(hdev, en);
8019                 break;
8020         default:
8021                 ret = -ENOTSUPP;
8022                 dev_err(&hdev->pdev->dev,
8023                         "loop_mode %d is not supported\n", loop_mode);
8024                 break;
8025         }
8026
8027         if (ret)
8028                 return ret;
8029
8030         ret = hclge_tqp_enable(handle, en);
8031         if (ret)
8032                 dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
8033                         en ? "enable" : "disable", ret);
8034
8035         return ret;
8036 }
8037
8038 static int hclge_set_default_loopback(struct hclge_dev *hdev)
8039 {
8040         int ret;
8041
8042         ret = hclge_set_app_loopback(hdev, false);
8043         if (ret)
8044                 return ret;
8045
8046         ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
8047         if (ret)
8048                 return ret;
8049
8050         return hclge_cfg_common_loopback(hdev, false,
8051                                          HNAE3_LOOP_PARALLEL_SERDES);
8052 }
8053
8054 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
8055 {
8056         struct hclge_vport *vport = hclge_get_vport(handle);
8057         struct hnae3_knic_private_info *kinfo;
8058         struct hnae3_queue *queue;
8059         struct hclge_tqp *tqp;
8060         int i;
8061
8062         kinfo = &vport->nic.kinfo;
8063         for (i = 0; i < kinfo->num_tqps; i++) {
8064                 queue = handle->kinfo.tqp[i];
8065                 tqp = container_of(queue, struct hclge_tqp, q);
8066                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
8067         }
8068 }
8069
8070 static void hclge_flush_link_update(struct hclge_dev *hdev)
8071 {
8072 #define HCLGE_FLUSH_LINK_TIMEOUT        100000
8073
8074         unsigned long last = hdev->serv_processed_cnt;
8075         int i = 0;
8076
8077         while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
8078                i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
8079                last == hdev->serv_processed_cnt)
8080                 usleep_range(1, 1);
8081 }
8082
8083 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
8084 {
8085         struct hclge_vport *vport = hclge_get_vport(handle);
8086         struct hclge_dev *hdev = vport->back;
8087
8088         if (enable) {
8089                 hclge_task_schedule(hdev, 0);
8090         } else {
8091                 /* Set the DOWN flag here to disable link updating */
8092                 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8093
8094                 /* flush memory to make sure DOWN is seen by service task */
8095                 smp_mb__before_atomic();
8096                 hclge_flush_link_update(hdev);
8097         }
8098 }
8099
8100 static int hclge_ae_start(struct hnae3_handle *handle)
8101 {
8102         struct hclge_vport *vport = hclge_get_vport(handle);
8103         struct hclge_dev *hdev = vport->back;
8104
8105         /* mac enable */
8106         hclge_cfg_mac_mode(hdev, true);
8107         clear_bit(HCLGE_STATE_DOWN, &hdev->state);
8108         hdev->hw.mac.link = 0;
8109
8110         /* reset tqp stats */
8111         hclge_reset_tqp_stats(handle);
8112
8113         hclge_mac_start_phy(hdev);
8114
8115         return 0;
8116 }
8117
8118 static void hclge_ae_stop(struct hnae3_handle *handle)
8119 {
8120         struct hclge_vport *vport = hclge_get_vport(handle);
8121         struct hclge_dev *hdev = vport->back;
8122
8123         set_bit(HCLGE_STATE_DOWN, &hdev->state);
8124         spin_lock_bh(&hdev->fd_rule_lock);
8125         hclge_clear_arfs_rules(hdev);
8126         spin_unlock_bh(&hdev->fd_rule_lock);
8127
8128         /* If it is not PF reset, the firmware will disable the MAC,
8129          * so it only need to stop phy here.
8130          */
8131         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
8132             hdev->reset_type != HNAE3_FUNC_RESET) {
8133                 hclge_mac_stop_phy(hdev);
8134                 hclge_update_link_status(hdev);
8135                 return;
8136         }
8137
8138         hclge_reset_tqp(handle);
8139
8140         hclge_config_mac_tnl_int(hdev, false);
8141
8142         /* Mac disable */
8143         hclge_cfg_mac_mode(hdev, false);
8144
8145         hclge_mac_stop_phy(hdev);
8146
8147         /* reset tqp stats */
8148         hclge_reset_tqp_stats(handle);
8149         hclge_update_link_status(hdev);
8150 }
8151
8152 int hclge_vport_start(struct hclge_vport *vport)
8153 {
8154         struct hclge_dev *hdev = vport->back;
8155
8156         set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8157         set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
8158         vport->last_active_jiffies = jiffies;
8159
8160         if (test_bit(vport->vport_id, hdev->vport_config_block)) {
8161                 if (vport->vport_id) {
8162                         hclge_restore_mac_table_common(vport);
8163                         hclge_restore_vport_vlan_table(vport);
8164                 } else {
8165                         hclge_restore_hw_table(hdev);
8166                 }
8167         }
8168
8169         clear_bit(vport->vport_id, hdev->vport_config_block);
8170
8171         return 0;
8172 }
8173
8174 void hclge_vport_stop(struct hclge_vport *vport)
8175 {
8176         clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8177 }
8178
8179 static int hclge_client_start(struct hnae3_handle *handle)
8180 {
8181         struct hclge_vport *vport = hclge_get_vport(handle);
8182
8183         return hclge_vport_start(vport);
8184 }
8185
8186 static void hclge_client_stop(struct hnae3_handle *handle)
8187 {
8188         struct hclge_vport *vport = hclge_get_vport(handle);
8189
8190         hclge_vport_stop(vport);
8191 }
8192
8193 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
8194                                          u16 cmdq_resp, u8  resp_code,
8195                                          enum hclge_mac_vlan_tbl_opcode op)
8196 {
8197         struct hclge_dev *hdev = vport->back;
8198
8199         if (cmdq_resp) {
8200                 dev_err(&hdev->pdev->dev,
8201                         "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8202                         cmdq_resp);
8203                 return -EIO;
8204         }
8205
8206         if (op == HCLGE_MAC_VLAN_ADD) {
8207                 if (!resp_code || resp_code == 1)
8208                         return 0;
8209                 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
8210                          resp_code == HCLGE_ADD_MC_OVERFLOW)
8211                         return -ENOSPC;
8212
8213                 dev_err(&hdev->pdev->dev,
8214                         "add mac addr failed for undefined, code=%u.\n",
8215                         resp_code);
8216                 return -EIO;
8217         } else if (op == HCLGE_MAC_VLAN_REMOVE) {
8218                 if (!resp_code) {
8219                         return 0;
8220                 } else if (resp_code == 1) {
8221                         dev_dbg(&hdev->pdev->dev,
8222                                 "remove mac addr failed for miss.\n");
8223                         return -ENOENT;
8224                 }
8225
8226                 dev_err(&hdev->pdev->dev,
8227                         "remove mac addr failed for undefined, code=%u.\n",
8228                         resp_code);
8229                 return -EIO;
8230         } else if (op == HCLGE_MAC_VLAN_LKUP) {
8231                 if (!resp_code) {
8232                         return 0;
8233                 } else if (resp_code == 1) {
8234                         dev_dbg(&hdev->pdev->dev,
8235                                 "lookup mac addr failed for miss.\n");
8236                         return -ENOENT;
8237                 }
8238
8239                 dev_err(&hdev->pdev->dev,
8240                         "lookup mac addr failed for undefined, code=%u.\n",
8241                         resp_code);
8242                 return -EIO;
8243         }
8244
8245         dev_err(&hdev->pdev->dev,
8246                 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
8247
8248         return -EINVAL;
8249 }
8250
8251 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
8252 {
8253 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8254
8255         unsigned int word_num;
8256         unsigned int bit_num;
8257
8258         if (vfid > 255 || vfid < 0)
8259                 return -EIO;
8260
8261         if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
8262                 word_num = vfid / 32;
8263                 bit_num  = vfid % 32;
8264                 if (clr)
8265                         desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8266                 else
8267                         desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
8268         } else {
8269                 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
8270                 bit_num  = vfid % 32;
8271                 if (clr)
8272                         desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8273                 else
8274                         desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
8275         }
8276
8277         return 0;
8278 }
8279
8280 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
8281 {
8282 #define HCLGE_DESC_NUMBER 3
8283 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8284         int i, j;
8285
8286         for (i = 1; i < HCLGE_DESC_NUMBER; i++)
8287                 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
8288                         if (desc[i].data[j])
8289                                 return false;
8290
8291         return true;
8292 }
8293
8294 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
8295                                    const u8 *addr, bool is_mc)
8296 {
8297         const unsigned char *mac_addr = addr;
8298         u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
8299                        (mac_addr[0]) | (mac_addr[1] << 8);
8300         u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
8301
8302         hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8303         if (is_mc) {
8304                 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8305                 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8306         }
8307
8308         new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8309         new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8310 }
8311
8312 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8313                                      struct hclge_mac_vlan_tbl_entry_cmd *req)
8314 {
8315         struct hclge_dev *hdev = vport->back;
8316         struct hclge_desc desc;
8317         u8 resp_code;
8318         u16 retval;
8319         int ret;
8320
8321         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8322
8323         memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8324
8325         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8326         if (ret) {
8327                 dev_err(&hdev->pdev->dev,
8328                         "del mac addr failed for cmd_send, ret =%d.\n",
8329                         ret);
8330                 return ret;
8331         }
8332         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8333         retval = le16_to_cpu(desc.retval);
8334
8335         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8336                                              HCLGE_MAC_VLAN_REMOVE);
8337 }
8338
8339 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8340                                      struct hclge_mac_vlan_tbl_entry_cmd *req,
8341                                      struct hclge_desc *desc,
8342                                      bool is_mc)
8343 {
8344         struct hclge_dev *hdev = vport->back;
8345         u8 resp_code;
8346         u16 retval;
8347         int ret;
8348
8349         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8350         if (is_mc) {
8351                 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8352                 memcpy(desc[0].data,
8353                        req,
8354                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8355                 hclge_cmd_setup_basic_desc(&desc[1],
8356                                            HCLGE_OPC_MAC_VLAN_ADD,
8357                                            true);
8358                 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8359                 hclge_cmd_setup_basic_desc(&desc[2],
8360                                            HCLGE_OPC_MAC_VLAN_ADD,
8361                                            true);
8362                 ret = hclge_cmd_send(&hdev->hw, desc, 3);
8363         } else {
8364                 memcpy(desc[0].data,
8365                        req,
8366                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8367                 ret = hclge_cmd_send(&hdev->hw, desc, 1);
8368         }
8369         if (ret) {
8370                 dev_err(&hdev->pdev->dev,
8371                         "lookup mac addr failed for cmd_send, ret =%d.\n",
8372                         ret);
8373                 return ret;
8374         }
8375         resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8376         retval = le16_to_cpu(desc[0].retval);
8377
8378         return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8379                                              HCLGE_MAC_VLAN_LKUP);
8380 }
8381
8382 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8383                                   struct hclge_mac_vlan_tbl_entry_cmd *req,
8384                                   struct hclge_desc *mc_desc)
8385 {
8386         struct hclge_dev *hdev = vport->back;
8387         int cfg_status;
8388         u8 resp_code;
8389         u16 retval;
8390         int ret;
8391
8392         if (!mc_desc) {
8393                 struct hclge_desc desc;
8394
8395                 hclge_cmd_setup_basic_desc(&desc,
8396                                            HCLGE_OPC_MAC_VLAN_ADD,
8397                                            false);
8398                 memcpy(desc.data, req,
8399                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8400                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8401                 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8402                 retval = le16_to_cpu(desc.retval);
8403
8404                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8405                                                            resp_code,
8406                                                            HCLGE_MAC_VLAN_ADD);
8407         } else {
8408                 hclge_cmd_reuse_desc(&mc_desc[0], false);
8409                 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8410                 hclge_cmd_reuse_desc(&mc_desc[1], false);
8411                 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8412                 hclge_cmd_reuse_desc(&mc_desc[2], false);
8413                 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
8414                 memcpy(mc_desc[0].data, req,
8415                        sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8416                 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8417                 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8418                 retval = le16_to_cpu(mc_desc[0].retval);
8419
8420                 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8421                                                            resp_code,
8422                                                            HCLGE_MAC_VLAN_ADD);
8423         }
8424
8425         if (ret) {
8426                 dev_err(&hdev->pdev->dev,
8427                         "add mac addr failed for cmd_send, ret =%d.\n",
8428                         ret);
8429                 return ret;
8430         }
8431
8432         return cfg_status;
8433 }
8434
8435 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8436                                u16 *allocated_size)
8437 {
8438         struct hclge_umv_spc_alc_cmd *req;
8439         struct hclge_desc desc;
8440         int ret;
8441
8442         req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8443         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8444
8445         req->space_size = cpu_to_le32(space_size);
8446
8447         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8448         if (ret) {
8449                 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8450                         ret);
8451                 return ret;
8452         }
8453
8454         *allocated_size = le32_to_cpu(desc.data[1]);
8455
8456         return 0;
8457 }
8458
8459 static int hclge_init_umv_space(struct hclge_dev *hdev)
8460 {
8461         u16 allocated_size = 0;
8462         int ret;
8463
8464         ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8465         if (ret)
8466                 return ret;
8467
8468         if (allocated_size < hdev->wanted_umv_size)
8469                 dev_warn(&hdev->pdev->dev,
8470                          "failed to alloc umv space, want %u, get %u\n",
8471                          hdev->wanted_umv_size, allocated_size);
8472
8473         hdev->max_umv_size = allocated_size;
8474         hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8475         hdev->share_umv_size = hdev->priv_umv_size +
8476                         hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8477
8478         return 0;
8479 }
8480
8481 static void hclge_reset_umv_space(struct hclge_dev *hdev)
8482 {
8483         struct hclge_vport *vport;
8484         int i;
8485
8486         for (i = 0; i < hdev->num_alloc_vport; i++) {
8487                 vport = &hdev->vport[i];
8488                 vport->used_umv_num = 0;
8489         }
8490
8491         mutex_lock(&hdev->vport_lock);
8492         hdev->share_umv_size = hdev->priv_umv_size +
8493                         hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8494         mutex_unlock(&hdev->vport_lock);
8495 }
8496
8497 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8498 {
8499         struct hclge_dev *hdev = vport->back;
8500         bool is_full;
8501
8502         if (need_lock)
8503                 mutex_lock(&hdev->vport_lock);
8504
8505         is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8506                    hdev->share_umv_size == 0);
8507
8508         if (need_lock)
8509                 mutex_unlock(&hdev->vport_lock);
8510
8511         return is_full;
8512 }
8513
8514 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8515 {
8516         struct hclge_dev *hdev = vport->back;
8517
8518         if (is_free) {
8519                 if (vport->used_umv_num > hdev->priv_umv_size)
8520                         hdev->share_umv_size++;
8521
8522                 if (vport->used_umv_num > 0)
8523                         vport->used_umv_num--;
8524         } else {
8525                 if (vport->used_umv_num >= hdev->priv_umv_size &&
8526                     hdev->share_umv_size > 0)
8527                         hdev->share_umv_size--;
8528                 vport->used_umv_num++;
8529         }
8530 }
8531
8532 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8533                                                   const u8 *mac_addr)
8534 {
8535         struct hclge_mac_node *mac_node, *tmp;
8536
8537         list_for_each_entry_safe(mac_node, tmp, list, node)
8538                 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8539                         return mac_node;
8540
8541         return NULL;
8542 }
8543
8544 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8545                                   enum HCLGE_MAC_NODE_STATE state)
8546 {
8547         switch (state) {
8548         /* from set_rx_mode or tmp_add_list */
8549         case HCLGE_MAC_TO_ADD:
8550                 if (mac_node->state == HCLGE_MAC_TO_DEL)
8551                         mac_node->state = HCLGE_MAC_ACTIVE;
8552                 break;
8553         /* only from set_rx_mode */
8554         case HCLGE_MAC_TO_DEL:
8555                 if (mac_node->state == HCLGE_MAC_TO_ADD) {
8556                         list_del(&mac_node->node);
8557                         kfree(mac_node);
8558                 } else {
8559                         mac_node->state = HCLGE_MAC_TO_DEL;
8560                 }
8561                 break;
8562         /* only from tmp_add_list, the mac_node->state won't be
8563          * ACTIVE.
8564          */
8565         case HCLGE_MAC_ACTIVE:
8566                 if (mac_node->state == HCLGE_MAC_TO_ADD)
8567                         mac_node->state = HCLGE_MAC_ACTIVE;
8568
8569                 break;
8570         }
8571 }
8572
8573 int hclge_update_mac_list(struct hclge_vport *vport,
8574                           enum HCLGE_MAC_NODE_STATE state,
8575                           enum HCLGE_MAC_ADDR_TYPE mac_type,
8576                           const unsigned char *addr)
8577 {
8578         struct hclge_dev *hdev = vport->back;
8579         struct hclge_mac_node *mac_node;
8580         struct list_head *list;
8581
8582         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8583                 &vport->uc_mac_list : &vport->mc_mac_list;
8584
8585         spin_lock_bh(&vport->mac_list_lock);
8586
8587         /* if the mac addr is already in the mac list, no need to add a new
8588          * one into it, just check the mac addr state, convert it to a new
8589          * state, or just remove it, or do nothing.
8590          */
8591         mac_node = hclge_find_mac_node(list, addr);
8592         if (mac_node) {
8593                 hclge_update_mac_node(mac_node, state);
8594                 spin_unlock_bh(&vport->mac_list_lock);
8595                 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8596                 return 0;
8597         }
8598
8599         /* if this address is never added, unnecessary to delete */
8600         if (state == HCLGE_MAC_TO_DEL) {
8601                 spin_unlock_bh(&vport->mac_list_lock);
8602                 dev_err(&hdev->pdev->dev,
8603                         "failed to delete address %pM from mac list\n",
8604                         addr);
8605                 return -ENOENT;
8606         }
8607
8608         mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8609         if (!mac_node) {
8610                 spin_unlock_bh(&vport->mac_list_lock);
8611                 return -ENOMEM;
8612         }
8613
8614         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8615
8616         mac_node->state = state;
8617         ether_addr_copy(mac_node->mac_addr, addr);
8618         list_add_tail(&mac_node->node, list);
8619
8620         spin_unlock_bh(&vport->mac_list_lock);
8621
8622         return 0;
8623 }
8624
8625 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8626                              const unsigned char *addr)
8627 {
8628         struct hclge_vport *vport = hclge_get_vport(handle);
8629
8630         return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8631                                      addr);
8632 }
8633
8634 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8635                              const unsigned char *addr)
8636 {
8637         struct hclge_dev *hdev = vport->back;
8638         struct hclge_mac_vlan_tbl_entry_cmd req;
8639         struct hclge_desc desc;
8640         u16 egress_port = 0;
8641         int ret;
8642
8643         /* mac addr check */
8644         if (is_zero_ether_addr(addr) ||
8645             is_broadcast_ether_addr(addr) ||
8646             is_multicast_ether_addr(addr)) {
8647                 dev_err(&hdev->pdev->dev,
8648                         "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
8649                          addr, is_zero_ether_addr(addr),
8650                          is_broadcast_ether_addr(addr),
8651                          is_multicast_ether_addr(addr));
8652                 return -EINVAL;
8653         }
8654
8655         memset(&req, 0, sizeof(req));
8656
8657         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8658                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8659
8660         req.egress_port = cpu_to_le16(egress_port);
8661
8662         hclge_prepare_mac_addr(&req, addr, false);
8663
8664         /* Lookup the mac address in the mac_vlan table, and add
8665          * it if the entry is inexistent. Repeated unicast entry
8666          * is not allowed in the mac vlan table.
8667          */
8668         ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8669         if (ret == -ENOENT) {
8670                 mutex_lock(&hdev->vport_lock);
8671                 if (!hclge_is_umv_space_full(vport, false)) {
8672                         ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8673                         if (!ret)
8674                                 hclge_update_umv_space(vport, false);
8675                         mutex_unlock(&hdev->vport_lock);
8676                         return ret;
8677                 }
8678                 mutex_unlock(&hdev->vport_lock);
8679
8680                 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8681                         dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8682                                 hdev->priv_umv_size);
8683
8684                 return -ENOSPC;
8685         }
8686
8687         /* check if we just hit the duplicate */
8688         if (!ret) {
8689                 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
8690                          vport->vport_id, addr);
8691                 return 0;
8692         }
8693
8694         dev_err(&hdev->pdev->dev,
8695                 "PF failed to add unicast entry(%pM) in the MAC table\n",
8696                 addr);
8697
8698         return ret;
8699 }
8700
8701 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8702                             const unsigned char *addr)
8703 {
8704         struct hclge_vport *vport = hclge_get_vport(handle);
8705
8706         return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8707                                      addr);
8708 }
8709
8710 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8711                             const unsigned char *addr)
8712 {
8713         struct hclge_dev *hdev = vport->back;
8714         struct hclge_mac_vlan_tbl_entry_cmd req;
8715         int ret;
8716
8717         /* mac addr check */
8718         if (is_zero_ether_addr(addr) ||
8719             is_broadcast_ether_addr(addr) ||
8720             is_multicast_ether_addr(addr)) {
8721                 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8722                         addr);
8723                 return -EINVAL;
8724         }
8725
8726         memset(&req, 0, sizeof(req));
8727         hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8728         hclge_prepare_mac_addr(&req, addr, false);
8729         ret = hclge_remove_mac_vlan_tbl(vport, &req);
8730         if (!ret) {
8731                 mutex_lock(&hdev->vport_lock);
8732                 hclge_update_umv_space(vport, true);
8733                 mutex_unlock(&hdev->vport_lock);
8734         } else if (ret == -ENOENT) {
8735                 ret = 0;
8736         }
8737
8738         return ret;
8739 }
8740
8741 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8742                              const unsigned char *addr)
8743 {
8744         struct hclge_vport *vport = hclge_get_vport(handle);
8745
8746         return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8747                                      addr);
8748 }
8749
8750 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8751                              const unsigned char *addr)
8752 {
8753         struct hclge_dev *hdev = vport->back;
8754         struct hclge_mac_vlan_tbl_entry_cmd req;
8755         struct hclge_desc desc[3];
8756         int status;
8757
8758         /* mac addr check */
8759         if (!is_multicast_ether_addr(addr)) {
8760                 dev_err(&hdev->pdev->dev,
8761                         "Add mc mac err! invalid mac:%pM.\n",
8762                          addr);
8763                 return -EINVAL;
8764         }
8765         memset(&req, 0, sizeof(req));
8766         hclge_prepare_mac_addr(&req, addr, true);
8767         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8768         if (status) {
8769                 /* This mac addr do not exist, add new entry for it */
8770                 memset(desc[0].data, 0, sizeof(desc[0].data));
8771                 memset(desc[1].data, 0, sizeof(desc[0].data));
8772                 memset(desc[2].data, 0, sizeof(desc[0].data));
8773         }
8774         status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8775         if (status)
8776                 return status;
8777         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8778         /* if already overflow, not to print each time */
8779         if (status == -ENOSPC &&
8780             !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8781                 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8782
8783         return status;
8784 }
8785
8786 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8787                             const unsigned char *addr)
8788 {
8789         struct hclge_vport *vport = hclge_get_vport(handle);
8790
8791         return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8792                                      addr);
8793 }
8794
8795 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8796                             const unsigned char *addr)
8797 {
8798         struct hclge_dev *hdev = vport->back;
8799         struct hclge_mac_vlan_tbl_entry_cmd req;
8800         enum hclge_cmd_status status;
8801         struct hclge_desc desc[3];
8802
8803         /* mac addr check */
8804         if (!is_multicast_ether_addr(addr)) {
8805                 dev_dbg(&hdev->pdev->dev,
8806                         "Remove mc mac err! invalid mac:%pM.\n",
8807                          addr);
8808                 return -EINVAL;
8809         }
8810
8811         memset(&req, 0, sizeof(req));
8812         hclge_prepare_mac_addr(&req, addr, true);
8813         status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8814         if (!status) {
8815                 /* This mac addr exist, remove this handle's VFID for it */
8816                 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8817                 if (status)
8818                         return status;
8819
8820                 if (hclge_is_all_function_id_zero(desc))
8821                         /* All the vfid is zero, so need to delete this entry */
8822                         status = hclge_remove_mac_vlan_tbl(vport, &req);
8823                 else
8824                         /* Not all the vfid is zero, update the vfid */
8825                         status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8826         } else if (status == -ENOENT) {
8827                 status = 0;
8828         }
8829
8830         return status;
8831 }
8832
8833 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8834                                       struct list_head *list,
8835                                       int (*sync)(struct hclge_vport *,
8836                                                   const unsigned char *))
8837 {
8838         struct hclge_mac_node *mac_node, *tmp;
8839         int ret;
8840
8841         list_for_each_entry_safe(mac_node, tmp, list, node) {
8842                 ret = sync(vport, mac_node->mac_addr);
8843                 if (!ret) {
8844                         mac_node->state = HCLGE_MAC_ACTIVE;
8845                 } else {
8846                         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8847                                 &vport->state);
8848                         break;
8849                 }
8850         }
8851 }
8852
8853 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8854                                         struct list_head *list,
8855                                         int (*unsync)(struct hclge_vport *,
8856                                                       const unsigned char *))
8857 {
8858         struct hclge_mac_node *mac_node, *tmp;
8859         int ret;
8860
8861         list_for_each_entry_safe(mac_node, tmp, list, node) {
8862                 ret = unsync(vport, mac_node->mac_addr);
8863                 if (!ret || ret == -ENOENT) {
8864                         list_del(&mac_node->node);
8865                         kfree(mac_node);
8866                 } else {
8867                         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8868                                 &vport->state);
8869                         break;
8870                 }
8871         }
8872 }
8873
8874 static bool hclge_sync_from_add_list(struct list_head *add_list,
8875                                      struct list_head *mac_list)
8876 {
8877         struct hclge_mac_node *mac_node, *tmp, *new_node;
8878         bool all_added = true;
8879
8880         list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8881                 if (mac_node->state == HCLGE_MAC_TO_ADD)
8882                         all_added = false;
8883
8884                 /* if the mac address from tmp_add_list is not in the
8885                  * uc/mc_mac_list, it means have received a TO_DEL request
8886                  * during the time window of adding the mac address into mac
8887                  * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8888                  * then it will be removed at next time. else it must be TO_ADD,
8889                  * this address hasn't been added into mac table,
8890                  * so just remove the mac node.
8891                  */
8892                 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8893                 if (new_node) {
8894                         hclge_update_mac_node(new_node, mac_node->state);
8895                         list_del(&mac_node->node);
8896                         kfree(mac_node);
8897                 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8898                         mac_node->state = HCLGE_MAC_TO_DEL;
8899                         list_move_tail(&mac_node->node, mac_list);
8900                 } else {
8901                         list_del(&mac_node->node);
8902                         kfree(mac_node);
8903                 }
8904         }
8905
8906         return all_added;
8907 }
8908
8909 static void hclge_sync_from_del_list(struct list_head *del_list,
8910                                      struct list_head *mac_list)
8911 {
8912         struct hclge_mac_node *mac_node, *tmp, *new_node;
8913
8914         list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8915                 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8916                 if (new_node) {
8917                         /* If the mac addr exists in the mac list, it means
8918                          * received a new TO_ADD request during the time window
8919                          * of configuring the mac address. For the mac node
8920                          * state is TO_ADD, and the address is already in the
8921                          * in the hardware(due to delete fail), so we just need
8922                          * to change the mac node state to ACTIVE.
8923                          */
8924                         new_node->state = HCLGE_MAC_ACTIVE;
8925                         list_del(&mac_node->node);
8926                         kfree(mac_node);
8927                 } else {
8928                         list_move_tail(&mac_node->node, mac_list);
8929                 }
8930         }
8931 }
8932
8933 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8934                                         enum HCLGE_MAC_ADDR_TYPE mac_type,
8935                                         bool is_all_added)
8936 {
8937         if (mac_type == HCLGE_MAC_ADDR_UC) {
8938                 if (is_all_added)
8939                         vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8940                 else
8941                         vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8942         } else {
8943                 if (is_all_added)
8944                         vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8945                 else
8946                         vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8947         }
8948 }
8949
8950 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8951                                        enum HCLGE_MAC_ADDR_TYPE mac_type)
8952 {
8953         struct hclge_mac_node *mac_node, *tmp, *new_node;
8954         struct list_head tmp_add_list, tmp_del_list;
8955         struct list_head *list;
8956         bool all_added;
8957
8958         INIT_LIST_HEAD(&tmp_add_list);
8959         INIT_LIST_HEAD(&tmp_del_list);
8960
8961         /* move the mac addr to the tmp_add_list and tmp_del_list, then
8962          * we can add/delete these mac addr outside the spin lock
8963          */
8964         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8965                 &vport->uc_mac_list : &vport->mc_mac_list;
8966
8967         spin_lock_bh(&vport->mac_list_lock);
8968
8969         list_for_each_entry_safe(mac_node, tmp, list, node) {
8970                 switch (mac_node->state) {
8971                 case HCLGE_MAC_TO_DEL:
8972                         list_move_tail(&mac_node->node, &tmp_del_list);
8973                         break;
8974                 case HCLGE_MAC_TO_ADD:
8975                         new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8976                         if (!new_node)
8977                                 goto stop_traverse;
8978                         ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8979                         new_node->state = mac_node->state;
8980                         list_add_tail(&new_node->node, &tmp_add_list);
8981                         break;
8982                 default:
8983                         break;
8984                 }
8985         }
8986
8987 stop_traverse:
8988         spin_unlock_bh(&vport->mac_list_lock);
8989
8990         /* delete first, in order to get max mac table space for adding */
8991         if (mac_type == HCLGE_MAC_ADDR_UC) {
8992                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8993                                             hclge_rm_uc_addr_common);
8994                 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8995                                           hclge_add_uc_addr_common);
8996         } else {
8997                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8998                                             hclge_rm_mc_addr_common);
8999                 hclge_sync_vport_mac_list(vport, &tmp_add_list,
9000                                           hclge_add_mc_addr_common);
9001         }
9002
9003         /* if some mac addresses were added/deleted fail, move back to the
9004          * mac_list, and retry at next time.
9005          */
9006         spin_lock_bh(&vport->mac_list_lock);
9007
9008         hclge_sync_from_del_list(&tmp_del_list, list);
9009         all_added = hclge_sync_from_add_list(&tmp_add_list, list);
9010
9011         spin_unlock_bh(&vport->mac_list_lock);
9012
9013         hclge_update_overflow_flags(vport, mac_type, all_added);
9014 }
9015
9016 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
9017 {
9018         struct hclge_dev *hdev = vport->back;
9019
9020         if (test_bit(vport->vport_id, hdev->vport_config_block))
9021                 return false;
9022
9023         if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
9024                 return true;
9025
9026         return false;
9027 }
9028
9029 static void hclge_sync_mac_table(struct hclge_dev *hdev)
9030 {
9031         int i;
9032
9033         for (i = 0; i < hdev->num_alloc_vport; i++) {
9034                 struct hclge_vport *vport = &hdev->vport[i];
9035
9036                 if (!hclge_need_sync_mac_table(vport))
9037                         continue;
9038
9039                 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
9040                 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
9041         }
9042 }
9043
9044 static void hclge_build_del_list(struct list_head *list,
9045                                  bool is_del_list,
9046                                  struct list_head *tmp_del_list)
9047 {
9048         struct hclge_mac_node *mac_cfg, *tmp;
9049
9050         list_for_each_entry_safe(mac_cfg, tmp, list, node) {
9051                 switch (mac_cfg->state) {
9052                 case HCLGE_MAC_TO_DEL:
9053                 case HCLGE_MAC_ACTIVE:
9054                         list_move_tail(&mac_cfg->node, tmp_del_list);
9055                         break;
9056                 case HCLGE_MAC_TO_ADD:
9057                         if (is_del_list) {
9058                                 list_del(&mac_cfg->node);
9059                                 kfree(mac_cfg);
9060                         }
9061                         break;
9062                 }
9063         }
9064 }
9065
9066 static void hclge_unsync_del_list(struct hclge_vport *vport,
9067                                   int (*unsync)(struct hclge_vport *vport,
9068                                                 const unsigned char *addr),
9069                                   bool is_del_list,
9070                                   struct list_head *tmp_del_list)
9071 {
9072         struct hclge_mac_node *mac_cfg, *tmp;
9073         int ret;
9074
9075         list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
9076                 ret = unsync(vport, mac_cfg->mac_addr);
9077                 if (!ret || ret == -ENOENT) {
9078                         /* clear all mac addr from hardware, but remain these
9079                          * mac addr in the mac list, and restore them after
9080                          * vf reset finished.
9081                          */
9082                         if (!is_del_list &&
9083                             mac_cfg->state == HCLGE_MAC_ACTIVE) {
9084                                 mac_cfg->state = HCLGE_MAC_TO_ADD;
9085                         } else {
9086                                 list_del(&mac_cfg->node);
9087                                 kfree(mac_cfg);
9088                         }
9089                 } else if (is_del_list) {
9090                         mac_cfg->state = HCLGE_MAC_TO_DEL;
9091                 }
9092         }
9093 }
9094
9095 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
9096                                   enum HCLGE_MAC_ADDR_TYPE mac_type)
9097 {
9098         int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
9099         struct hclge_dev *hdev = vport->back;
9100         struct list_head tmp_del_list, *list;
9101
9102         if (mac_type == HCLGE_MAC_ADDR_UC) {
9103                 list = &vport->uc_mac_list;
9104                 unsync = hclge_rm_uc_addr_common;
9105         } else {
9106                 list = &vport->mc_mac_list;
9107                 unsync = hclge_rm_mc_addr_common;
9108         }
9109
9110         INIT_LIST_HEAD(&tmp_del_list);
9111
9112         if (!is_del_list)
9113                 set_bit(vport->vport_id, hdev->vport_config_block);
9114
9115         spin_lock_bh(&vport->mac_list_lock);
9116
9117         hclge_build_del_list(list, is_del_list, &tmp_del_list);
9118
9119         spin_unlock_bh(&vport->mac_list_lock);
9120
9121         hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
9122
9123         spin_lock_bh(&vport->mac_list_lock);
9124
9125         hclge_sync_from_del_list(&tmp_del_list, list);
9126
9127         spin_unlock_bh(&vport->mac_list_lock);
9128 }
9129
9130 /* remove all mac address when uninitailize */
9131 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
9132                                         enum HCLGE_MAC_ADDR_TYPE mac_type)
9133 {
9134         struct hclge_mac_node *mac_node, *tmp;
9135         struct hclge_dev *hdev = vport->back;
9136         struct list_head tmp_del_list, *list;
9137
9138         INIT_LIST_HEAD(&tmp_del_list);
9139
9140         list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9141                 &vport->uc_mac_list : &vport->mc_mac_list;
9142
9143         spin_lock_bh(&vport->mac_list_lock);
9144
9145         list_for_each_entry_safe(mac_node, tmp, list, node) {
9146                 switch (mac_node->state) {
9147                 case HCLGE_MAC_TO_DEL:
9148                 case HCLGE_MAC_ACTIVE:
9149                         list_move_tail(&mac_node->node, &tmp_del_list);
9150                         break;
9151                 case HCLGE_MAC_TO_ADD:
9152                         list_del(&mac_node->node);
9153                         kfree(mac_node);
9154                         break;
9155                 }
9156         }
9157
9158         spin_unlock_bh(&vport->mac_list_lock);
9159
9160         if (mac_type == HCLGE_MAC_ADDR_UC)
9161                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9162                                             hclge_rm_uc_addr_common);
9163         else
9164                 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9165                                             hclge_rm_mc_addr_common);
9166
9167         if (!list_empty(&tmp_del_list))
9168                 dev_warn(&hdev->pdev->dev,
9169                          "uninit %s mac list for vport %u not completely.\n",
9170                          mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
9171                          vport->vport_id);
9172
9173         list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
9174                 list_del(&mac_node->node);
9175                 kfree(mac_node);
9176         }
9177 }
9178
9179 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
9180 {
9181         struct hclge_vport *vport;
9182         int i;
9183
9184         for (i = 0; i < hdev->num_alloc_vport; i++) {
9185                 vport = &hdev->vport[i];
9186                 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
9187                 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
9188         }
9189 }
9190
9191 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
9192                                               u16 cmdq_resp, u8 resp_code)
9193 {
9194 #define HCLGE_ETHERTYPE_SUCCESS_ADD             0
9195 #define HCLGE_ETHERTYPE_ALREADY_ADD             1
9196 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW        2
9197 #define HCLGE_ETHERTYPE_KEY_CONFLICT            3
9198
9199         int return_status;
9200
9201         if (cmdq_resp) {
9202                 dev_err(&hdev->pdev->dev,
9203                         "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9204                         cmdq_resp);
9205                 return -EIO;
9206         }
9207
9208         switch (resp_code) {
9209         case HCLGE_ETHERTYPE_SUCCESS_ADD:
9210         case HCLGE_ETHERTYPE_ALREADY_ADD:
9211                 return_status = 0;
9212                 break;
9213         case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
9214                 dev_err(&hdev->pdev->dev,
9215                         "add mac ethertype failed for manager table overflow.\n");
9216                 return_status = -EIO;
9217                 break;
9218         case HCLGE_ETHERTYPE_KEY_CONFLICT:
9219                 dev_err(&hdev->pdev->dev,
9220                         "add mac ethertype failed for key conflict.\n");
9221                 return_status = -EIO;
9222                 break;
9223         default:
9224                 dev_err(&hdev->pdev->dev,
9225                         "add mac ethertype failed for undefined, code=%u.\n",
9226                         resp_code);
9227                 return_status = -EIO;
9228         }
9229
9230         return return_status;
9231 }
9232
9233 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
9234                                      u8 *mac_addr)
9235 {
9236         struct hclge_mac_vlan_tbl_entry_cmd req;
9237         struct hclge_dev *hdev = vport->back;
9238         struct hclge_desc desc;
9239         u16 egress_port = 0;
9240         int i;
9241
9242         if (is_zero_ether_addr(mac_addr))
9243                 return false;
9244
9245         memset(&req, 0, sizeof(req));
9246         hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
9247                         HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
9248         req.egress_port = cpu_to_le16(egress_port);
9249         hclge_prepare_mac_addr(&req, mac_addr, false);
9250
9251         if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
9252                 return true;
9253
9254         vf_idx += HCLGE_VF_VPORT_START_NUM;
9255         for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++)
9256                 if (i != vf_idx &&
9257                     ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
9258                         return true;
9259
9260         return false;
9261 }
9262
9263 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
9264                             u8 *mac_addr)
9265 {
9266         struct hclge_vport *vport = hclge_get_vport(handle);
9267         struct hclge_dev *hdev = vport->back;
9268
9269         vport = hclge_get_vf_vport(hdev, vf);
9270         if (!vport)
9271                 return -EINVAL;
9272
9273         if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
9274                 dev_info(&hdev->pdev->dev,
9275                          "Specified MAC(=%pM) is same as before, no change committed!\n",
9276                          mac_addr);
9277                 return 0;
9278         }
9279
9280         if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
9281                 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
9282                         mac_addr);
9283                 return -EEXIST;
9284         }
9285
9286         ether_addr_copy(vport->vf_info.mac, mac_addr);
9287
9288         if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9289                 dev_info(&hdev->pdev->dev,
9290                          "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
9291                          vf, mac_addr);
9292                 return hclge_inform_reset_assert_to_vf(vport);
9293         }
9294
9295         dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
9296                  vf, mac_addr);
9297         return 0;
9298 }
9299
9300 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9301                              const struct hclge_mac_mgr_tbl_entry_cmd *req)
9302 {
9303         struct hclge_desc desc;
9304         u8 resp_code;
9305         u16 retval;
9306         int ret;
9307
9308         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9309         memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9310
9311         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9312         if (ret) {
9313                 dev_err(&hdev->pdev->dev,
9314                         "add mac ethertype failed for cmd_send, ret =%d.\n",
9315                         ret);
9316                 return ret;
9317         }
9318
9319         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9320         retval = le16_to_cpu(desc.retval);
9321
9322         return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9323 }
9324
9325 static int init_mgr_tbl(struct hclge_dev *hdev)
9326 {
9327         int ret;
9328         int i;
9329
9330         for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9331                 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9332                 if (ret) {
9333                         dev_err(&hdev->pdev->dev,
9334                                 "add mac ethertype failed, ret =%d.\n",
9335                                 ret);
9336                         return ret;
9337                 }
9338         }
9339
9340         return 0;
9341 }
9342
9343 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9344 {
9345         struct hclge_vport *vport = hclge_get_vport(handle);
9346         struct hclge_dev *hdev = vport->back;
9347
9348         ether_addr_copy(p, hdev->hw.mac.mac_addr);
9349 }
9350
9351 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9352                                        const u8 *old_addr, const u8 *new_addr)
9353 {
9354         struct list_head *list = &vport->uc_mac_list;
9355         struct hclge_mac_node *old_node, *new_node;
9356
9357         new_node = hclge_find_mac_node(list, new_addr);
9358         if (!new_node) {
9359                 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9360                 if (!new_node)
9361                         return -ENOMEM;
9362
9363                 new_node->state = HCLGE_MAC_TO_ADD;
9364                 ether_addr_copy(new_node->mac_addr, new_addr);
9365                 list_add(&new_node->node, list);
9366         } else {
9367                 if (new_node->state == HCLGE_MAC_TO_DEL)
9368                         new_node->state = HCLGE_MAC_ACTIVE;
9369
9370                 /* make sure the new addr is in the list head, avoid dev
9371                  * addr may be not re-added into mac table for the umv space
9372                  * limitation after global/imp reset which will clear mac
9373                  * table by hardware.
9374                  */
9375                 list_move(&new_node->node, list);
9376         }
9377
9378         if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9379                 old_node = hclge_find_mac_node(list, old_addr);
9380                 if (old_node) {
9381                         if (old_node->state == HCLGE_MAC_TO_ADD) {
9382                                 list_del(&old_node->node);
9383                                 kfree(old_node);
9384                         } else {
9385                                 old_node->state = HCLGE_MAC_TO_DEL;
9386                         }
9387                 }
9388         }
9389
9390         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9391
9392         return 0;
9393 }
9394
9395 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
9396                               bool is_first)
9397 {
9398         const unsigned char *new_addr = (const unsigned char *)p;
9399         struct hclge_vport *vport = hclge_get_vport(handle);
9400         struct hclge_dev *hdev = vport->back;
9401         unsigned char *old_addr = NULL;
9402         int ret;
9403
9404         /* mac addr check */
9405         if (is_zero_ether_addr(new_addr) ||
9406             is_broadcast_ether_addr(new_addr) ||
9407             is_multicast_ether_addr(new_addr)) {
9408                 dev_err(&hdev->pdev->dev,
9409                         "change uc mac err! invalid mac: %pM.\n",
9410                          new_addr);
9411                 return -EINVAL;
9412         }
9413
9414         ret = hclge_pause_addr_cfg(hdev, new_addr);
9415         if (ret) {
9416                 dev_err(&hdev->pdev->dev,
9417                         "failed to configure mac pause address, ret = %d\n",
9418                         ret);
9419                 return ret;
9420         }
9421
9422         if (!is_first)
9423                 old_addr = hdev->hw.mac.mac_addr;
9424
9425         spin_lock_bh(&vport->mac_list_lock);
9426         ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9427         if (ret) {
9428                 dev_err(&hdev->pdev->dev,
9429                         "failed to change the mac addr:%pM, ret = %d\n",
9430                         new_addr, ret);
9431                 spin_unlock_bh(&vport->mac_list_lock);
9432
9433                 if (!is_first)
9434                         hclge_pause_addr_cfg(hdev, old_addr);
9435
9436                 return ret;
9437         }
9438         /* we must update dev addr with spin lock protect, preventing dev addr
9439          * being removed by set_rx_mode path.
9440          */
9441         ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9442         spin_unlock_bh(&vport->mac_list_lock);
9443
9444         hclge_task_schedule(hdev, 0);
9445
9446         return 0;
9447 }
9448
9449 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9450 {
9451         struct mii_ioctl_data *data = if_mii(ifr);
9452
9453         if (!hnae3_dev_phy_imp_supported(hdev))
9454                 return -EOPNOTSUPP;
9455
9456         switch (cmd) {
9457         case SIOCGMIIPHY:
9458                 data->phy_id = hdev->hw.mac.phy_addr;
9459                 /* this command reads phy id and register at the same time */
9460                 fallthrough;
9461         case SIOCGMIIREG:
9462                 data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9463                 return 0;
9464
9465         case SIOCSMIIREG:
9466                 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9467         default:
9468                 return -EOPNOTSUPP;
9469         }
9470 }
9471
9472 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9473                           int cmd)
9474 {
9475         struct hclge_vport *vport = hclge_get_vport(handle);
9476         struct hclge_dev *hdev = vport->back;
9477
9478         switch (cmd) {
9479         case SIOCGHWTSTAMP:
9480                 return hclge_ptp_get_cfg(hdev, ifr);
9481         case SIOCSHWTSTAMP:
9482                 return hclge_ptp_set_cfg(hdev, ifr);
9483         default:
9484                 if (!hdev->hw.mac.phydev)
9485                         return hclge_mii_ioctl(hdev, ifr, cmd);
9486         }
9487
9488         return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9489 }
9490
9491 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id,
9492                                              bool bypass_en)
9493 {
9494         struct hclge_port_vlan_filter_bypass_cmd *req;
9495         struct hclge_desc desc;
9496         int ret;
9497
9498         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false);
9499         req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
9500         req->vf_id = vf_id;
9501         hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B,
9502                       bypass_en ? 1 : 0);
9503
9504         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9505         if (ret)
9506                 dev_err(&hdev->pdev->dev,
9507                         "failed to set vport%u port vlan filter bypass state, ret = %d.\n",
9508                         vf_id, ret);
9509
9510         return ret;
9511 }
9512
9513 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9514                                       u8 fe_type, bool filter_en, u8 vf_id)
9515 {
9516         struct hclge_vlan_filter_ctrl_cmd *req;
9517         struct hclge_desc desc;
9518         int ret;
9519
9520         /* read current vlan filter parameter */
9521         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9522         req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9523         req->vlan_type = vlan_type;
9524         req->vf_id = vf_id;
9525
9526         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9527         if (ret) {
9528                 dev_err(&hdev->pdev->dev,
9529                         "failed to get vlan filter config, ret = %d.\n", ret);
9530                 return ret;
9531         }
9532
9533         /* modify and write new config parameter */
9534         hclge_cmd_reuse_desc(&desc, false);
9535         req->vlan_fe = filter_en ?
9536                         (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9537
9538         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9539         if (ret)
9540                 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
9541                         ret);
9542
9543         return ret;
9544 }
9545
9546 static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
9547 {
9548         struct hclge_dev *hdev = vport->back;
9549         struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
9550         int ret;
9551
9552         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9553                 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9554                                                   HCLGE_FILTER_FE_EGRESS_V1_B,
9555                                                   enable, vport->vport_id);
9556
9557         ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9558                                          HCLGE_FILTER_FE_EGRESS, enable,
9559                                          vport->vport_id);
9560         if (ret)
9561                 return ret;
9562
9563         if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) {
9564                 ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
9565                                                         !enable);
9566         } else if (!vport->vport_id) {
9567                 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
9568                         enable = false;
9569
9570                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9571                                                  HCLGE_FILTER_FE_INGRESS,
9572                                                  enable, 0);
9573         }
9574
9575         return ret;
9576 }
9577
9578 static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
9579 {
9580         struct hnae3_handle *handle = &vport->nic;
9581         struct hclge_vport_vlan_cfg *vlan, *tmp;
9582         struct hclge_dev *hdev = vport->back;
9583
9584         if (vport->vport_id) {
9585                 if (vport->port_base_vlan_cfg.state !=
9586                         HNAE3_PORT_BASE_VLAN_DISABLE)
9587                         return true;
9588
9589                 if (vport->vf_info.trusted && vport->vf_info.request_uc_en)
9590                         return false;
9591         } else if (handle->netdev_flags & HNAE3_USER_UPE) {
9592                 return false;
9593         }
9594
9595         if (!vport->req_vlan_fltr_en)
9596                 return false;
9597
9598         /* compatible with former device, always enable vlan filter */
9599         if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
9600                 return true;
9601
9602         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
9603                 if (vlan->vlan_id != 0)
9604                         return true;
9605
9606         return false;
9607 }
9608
9609 int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
9610 {
9611         struct hclge_dev *hdev = vport->back;
9612         bool need_en;
9613         int ret;
9614
9615         mutex_lock(&hdev->vport_lock);
9616
9617         vport->req_vlan_fltr_en = request_en;
9618
9619         need_en = hclge_need_enable_vport_vlan_filter(vport);
9620         if (need_en == vport->cur_vlan_fltr_en) {
9621                 mutex_unlock(&hdev->vport_lock);
9622                 return 0;
9623         }
9624
9625         ret = hclge_set_vport_vlan_filter(vport, need_en);
9626         if (ret) {
9627                 mutex_unlock(&hdev->vport_lock);
9628                 return ret;
9629         }
9630
9631         vport->cur_vlan_fltr_en = need_en;
9632
9633         mutex_unlock(&hdev->vport_lock);
9634
9635         return 0;
9636 }
9637
9638 static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9639 {
9640         struct hclge_vport *vport = hclge_get_vport(handle);
9641
9642         return hclge_enable_vport_vlan_filter(vport, enable);
9643 }
9644
9645 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9646                                         bool is_kill, u16 vlan,
9647                                         struct hclge_desc *desc)
9648 {
9649         struct hclge_vlan_filter_vf_cfg_cmd *req0;
9650         struct hclge_vlan_filter_vf_cfg_cmd *req1;
9651         u8 vf_byte_val;
9652         u8 vf_byte_off;
9653         int ret;
9654
9655         hclge_cmd_setup_basic_desc(&desc[0],
9656                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9657         hclge_cmd_setup_basic_desc(&desc[1],
9658                                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9659
9660         desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9661
9662         vf_byte_off = vfid / 8;
9663         vf_byte_val = 1 << (vfid % 8);
9664
9665         req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9666         req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9667
9668         req0->vlan_id  = cpu_to_le16(vlan);
9669         req0->vlan_cfg = is_kill;
9670
9671         if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9672                 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9673         else
9674                 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9675
9676         ret = hclge_cmd_send(&hdev->hw, desc, 2);
9677         if (ret) {
9678                 dev_err(&hdev->pdev->dev,
9679                         "Send vf vlan command fail, ret =%d.\n",
9680                         ret);
9681                 return ret;
9682         }
9683
9684         return 0;
9685 }
9686
9687 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9688                                           bool is_kill, struct hclge_desc *desc)
9689 {
9690         struct hclge_vlan_filter_vf_cfg_cmd *req;
9691
9692         req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9693
9694         if (!is_kill) {
9695 #define HCLGE_VF_VLAN_NO_ENTRY  2
9696                 if (!req->resp_code || req->resp_code == 1)
9697                         return 0;
9698
9699                 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9700                         set_bit(vfid, hdev->vf_vlan_full);
9701                         dev_warn(&hdev->pdev->dev,
9702                                  "vf vlan table is full, vf vlan filter is disabled\n");
9703                         return 0;
9704                 }
9705
9706                 dev_err(&hdev->pdev->dev,
9707                         "Add vf vlan filter fail, ret =%u.\n",
9708                         req->resp_code);
9709         } else {
9710 #define HCLGE_VF_VLAN_DEL_NO_FOUND      1
9711                 if (!req->resp_code)
9712                         return 0;
9713
9714                 /* vf vlan filter is disabled when vf vlan table is full,
9715                  * then new vlan id will not be added into vf vlan table.
9716                  * Just return 0 without warning, avoid massive verbose
9717                  * print logs when unload.
9718                  */
9719                 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9720                         return 0;
9721
9722                 dev_err(&hdev->pdev->dev,
9723                         "Kill vf vlan filter fail, ret =%u.\n",
9724                         req->resp_code);
9725         }
9726
9727         return -EIO;
9728 }
9729
9730 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9731                                     bool is_kill, u16 vlan)
9732 {
9733         struct hclge_vport *vport = &hdev->vport[vfid];
9734         struct hclge_desc desc[2];
9735         int ret;
9736
9737         /* if vf vlan table is full, firmware will close vf vlan filter, it
9738          * is unable and unnecessary to add new vlan id to vf vlan filter.
9739          * If spoof check is enable, and vf vlan is full, it shouldn't add
9740          * new vlan, because tx packets with these vlan id will be dropped.
9741          */
9742         if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9743                 if (vport->vf_info.spoofchk && vlan) {
9744                         dev_err(&hdev->pdev->dev,
9745                                 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
9746                         return -EPERM;
9747                 }
9748                 return 0;
9749         }
9750
9751         ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9752         if (ret)
9753                 return ret;
9754
9755         return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9756 }
9757
9758 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9759                                       u16 vlan_id, bool is_kill)
9760 {
9761         struct hclge_vlan_filter_pf_cfg_cmd *req;
9762         struct hclge_desc desc;
9763         u8 vlan_offset_byte_val;
9764         u8 vlan_offset_byte;
9765         u8 vlan_offset_160;
9766         int ret;
9767
9768         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9769
9770         vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9771         vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9772                            HCLGE_VLAN_BYTE_SIZE;
9773         vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9774
9775         req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9776         req->vlan_offset = vlan_offset_160;
9777         req->vlan_cfg = is_kill;
9778         req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9779
9780         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9781         if (ret)
9782                 dev_err(&hdev->pdev->dev,
9783                         "port vlan command, send fail, ret =%d.\n", ret);
9784         return ret;
9785 }
9786
9787 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9788                                     u16 vport_id, u16 vlan_id,
9789                                     bool is_kill)
9790 {
9791         u16 vport_idx, vport_num = 0;
9792         int ret;
9793
9794         if (is_kill && !vlan_id)
9795                 return 0;
9796
9797         ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9798         if (ret) {
9799                 dev_err(&hdev->pdev->dev,
9800                         "Set %u vport vlan filter config fail, ret =%d.\n",
9801                         vport_id, ret);
9802                 return ret;
9803         }
9804
9805         /* vlan 0 may be added twice when 8021q module is enabled */
9806         if (!is_kill && !vlan_id &&
9807             test_bit(vport_id, hdev->vlan_table[vlan_id]))
9808                 return 0;
9809
9810         if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9811                 dev_err(&hdev->pdev->dev,
9812                         "Add port vlan failed, vport %u is already in vlan %u\n",
9813                         vport_id, vlan_id);
9814                 return -EINVAL;
9815         }
9816
9817         if (is_kill &&
9818             !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9819                 dev_err(&hdev->pdev->dev,
9820                         "Delete port vlan failed, vport %u is not in vlan %u\n",
9821                         vport_id, vlan_id);
9822                 return -EINVAL;
9823         }
9824
9825         for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9826                 vport_num++;
9827
9828         if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9829                 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9830                                                  is_kill);
9831
9832         return ret;
9833 }
9834
9835 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9836 {
9837         struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9838         struct hclge_vport_vtag_tx_cfg_cmd *req;
9839         struct hclge_dev *hdev = vport->back;
9840         struct hclge_desc desc;
9841         u16 bmap_index;
9842         int status;
9843
9844         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9845
9846         req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9847         req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9848         req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9849         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9850                       vcfg->accept_tag1 ? 1 : 0);
9851         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9852                       vcfg->accept_untag1 ? 1 : 0);
9853         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9854                       vcfg->accept_tag2 ? 1 : 0);
9855         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9856                       vcfg->accept_untag2 ? 1 : 0);
9857         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9858                       vcfg->insert_tag1_en ? 1 : 0);
9859         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9860                       vcfg->insert_tag2_en ? 1 : 0);
9861         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9862                       vcfg->tag_shift_mode_en ? 1 : 0);
9863         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9864
9865         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9866         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9867                         HCLGE_VF_NUM_PER_BYTE;
9868         req->vf_bitmap[bmap_index] =
9869                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9870
9871         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9872         if (status)
9873                 dev_err(&hdev->pdev->dev,
9874                         "Send port txvlan cfg command fail, ret =%d\n",
9875                         status);
9876
9877         return status;
9878 }
9879
9880 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9881 {
9882         struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9883         struct hclge_vport_vtag_rx_cfg_cmd *req;
9884         struct hclge_dev *hdev = vport->back;
9885         struct hclge_desc desc;
9886         u16 bmap_index;
9887         int status;
9888
9889         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9890
9891         req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9892         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9893                       vcfg->strip_tag1_en ? 1 : 0);
9894         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9895                       vcfg->strip_tag2_en ? 1 : 0);
9896         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9897                       vcfg->vlan1_vlan_prionly ? 1 : 0);
9898         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9899                       vcfg->vlan2_vlan_prionly ? 1 : 0);
9900         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9901                       vcfg->strip_tag1_discard_en ? 1 : 0);
9902         hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9903                       vcfg->strip_tag2_discard_en ? 1 : 0);
9904
9905         req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9906         bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9907                         HCLGE_VF_NUM_PER_BYTE;
9908         req->vf_bitmap[bmap_index] =
9909                 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9910
9911         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9912         if (status)
9913                 dev_err(&hdev->pdev->dev,
9914                         "Send port rxvlan cfg command fail, ret =%d\n",
9915                         status);
9916
9917         return status;
9918 }
9919
9920 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9921                                   u16 port_base_vlan_state,
9922                                   u16 vlan_tag, u8 qos)
9923 {
9924         int ret;
9925
9926         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9927                 vport->txvlan_cfg.accept_tag1 = true;
9928                 vport->txvlan_cfg.insert_tag1_en = false;
9929                 vport->txvlan_cfg.default_tag1 = 0;
9930         } else {
9931                 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9932
9933                 vport->txvlan_cfg.accept_tag1 =
9934                         ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9935                 vport->txvlan_cfg.insert_tag1_en = true;
9936                 vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) |
9937                                                  vlan_tag;
9938         }
9939
9940         vport->txvlan_cfg.accept_untag1 = true;
9941
9942         /* accept_tag2 and accept_untag2 are not supported on
9943          * pdev revision(0x20), new revision support them,
9944          * this two fields can not be configured by user.
9945          */
9946         vport->txvlan_cfg.accept_tag2 = true;
9947         vport->txvlan_cfg.accept_untag2 = true;
9948         vport->txvlan_cfg.insert_tag2_en = false;
9949         vport->txvlan_cfg.default_tag2 = 0;
9950         vport->txvlan_cfg.tag_shift_mode_en = true;
9951
9952         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9953                 vport->rxvlan_cfg.strip_tag1_en = false;
9954                 vport->rxvlan_cfg.strip_tag2_en =
9955                                 vport->rxvlan_cfg.rx_vlan_offload_en;
9956                 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9957         } else {
9958                 vport->rxvlan_cfg.strip_tag1_en =
9959                                 vport->rxvlan_cfg.rx_vlan_offload_en;
9960                 vport->rxvlan_cfg.strip_tag2_en = true;
9961                 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9962         }
9963
9964         vport->rxvlan_cfg.strip_tag1_discard_en = false;
9965         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9966         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9967
9968         ret = hclge_set_vlan_tx_offload_cfg(vport);
9969         if (ret)
9970                 return ret;
9971
9972         return hclge_set_vlan_rx_offload_cfg(vport);
9973 }
9974
9975 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9976 {
9977         struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9978         struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9979         struct hclge_desc desc;
9980         int status;
9981
9982         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9983         rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9984         rx_req->ot_fst_vlan_type =
9985                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9986         rx_req->ot_sec_vlan_type =
9987                 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9988         rx_req->in_fst_vlan_type =
9989                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9990         rx_req->in_sec_vlan_type =
9991                 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9992
9993         status = hclge_cmd_send(&hdev->hw, &desc, 1);
9994         if (status) {
9995                 dev_err(&hdev->pdev->dev,
9996                         "Send rxvlan protocol type command fail, ret =%d\n",
9997                         status);
9998                 return status;
9999         }
10000
10001         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
10002
10003         tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
10004         tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
10005         tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
10006
10007         status = hclge_cmd_send(&hdev->hw, &desc, 1);
10008         if (status)
10009                 dev_err(&hdev->pdev->dev,
10010                         "Send txvlan protocol type command fail, ret =%d\n",
10011                         status);
10012
10013         return status;
10014 }
10015
10016 static int hclge_init_vlan_config(struct hclge_dev *hdev)
10017 {
10018 #define HCLGE_DEF_VLAN_TYPE             0x8100
10019
10020         struct hnae3_handle *handle = &hdev->vport[0].nic;
10021         struct hclge_vport *vport;
10022         int ret;
10023         int i;
10024
10025         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
10026                 /* for revision 0x21, vf vlan filter is per function */
10027                 for (i = 0; i < hdev->num_alloc_vport; i++) {
10028                         vport = &hdev->vport[i];
10029                         ret = hclge_set_vlan_filter_ctrl(hdev,
10030                                                          HCLGE_FILTER_TYPE_VF,
10031                                                          HCLGE_FILTER_FE_EGRESS,
10032                                                          true,
10033                                                          vport->vport_id);
10034                         if (ret)
10035                                 return ret;
10036                         vport->cur_vlan_fltr_en = true;
10037                 }
10038
10039                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
10040                                                  HCLGE_FILTER_FE_INGRESS, true,
10041                                                  0);
10042                 if (ret)
10043                         return ret;
10044         } else {
10045                 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10046                                                  HCLGE_FILTER_FE_EGRESS_V1_B,
10047                                                  true, 0);
10048                 if (ret)
10049                         return ret;
10050         }
10051
10052         hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10053         hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10054         hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10055         hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10056         hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
10057         hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
10058
10059         ret = hclge_set_vlan_protocol_type(hdev);
10060         if (ret)
10061                 return ret;
10062
10063         for (i = 0; i < hdev->num_alloc_vport; i++) {
10064                 u16 vlan_tag;
10065                 u8 qos;
10066
10067                 vport = &hdev->vport[i];
10068                 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10069                 qos = vport->port_base_vlan_cfg.vlan_info.qos;
10070
10071                 ret = hclge_vlan_offload_cfg(vport,
10072                                              vport->port_base_vlan_cfg.state,
10073                                              vlan_tag, qos);
10074                 if (ret)
10075                         return ret;
10076         }
10077
10078         return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
10079 }
10080
10081 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10082                                        bool writen_to_tbl)
10083 {
10084         struct hclge_vport_vlan_cfg *vlan, *tmp;
10085
10086         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
10087                 if (vlan->vlan_id == vlan_id)
10088                         return;
10089
10090         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
10091         if (!vlan)
10092                 return;
10093
10094         vlan->hd_tbl_status = writen_to_tbl;
10095         vlan->vlan_id = vlan_id;
10096
10097         list_add_tail(&vlan->node, &vport->vlan_list);
10098 }
10099
10100 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
10101 {
10102         struct hclge_vport_vlan_cfg *vlan, *tmp;
10103         struct hclge_dev *hdev = vport->back;
10104         int ret;
10105
10106         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10107                 if (!vlan->hd_tbl_status) {
10108                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10109                                                        vport->vport_id,
10110                                                        vlan->vlan_id, false);
10111                         if (ret) {
10112                                 dev_err(&hdev->pdev->dev,
10113                                         "restore vport vlan list failed, ret=%d\n",
10114                                         ret);
10115                                 return ret;
10116                         }
10117                 }
10118                 vlan->hd_tbl_status = true;
10119         }
10120
10121         return 0;
10122 }
10123
10124 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10125                                       bool is_write_tbl)
10126 {
10127         struct hclge_vport_vlan_cfg *vlan, *tmp;
10128         struct hclge_dev *hdev = vport->back;
10129
10130         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10131                 if (vlan->vlan_id == vlan_id) {
10132                         if (is_write_tbl && vlan->hd_tbl_status)
10133                                 hclge_set_vlan_filter_hw(hdev,
10134                                                          htons(ETH_P_8021Q),
10135                                                          vport->vport_id,
10136                                                          vlan_id,
10137                                                          true);
10138
10139                         list_del(&vlan->node);
10140                         kfree(vlan);
10141                         break;
10142                 }
10143         }
10144 }
10145
10146 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
10147 {
10148         struct hclge_vport_vlan_cfg *vlan, *tmp;
10149         struct hclge_dev *hdev = vport->back;
10150
10151         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10152                 if (vlan->hd_tbl_status)
10153                         hclge_set_vlan_filter_hw(hdev,
10154                                                  htons(ETH_P_8021Q),
10155                                                  vport->vport_id,
10156                                                  vlan->vlan_id,
10157                                                  true);
10158
10159                 vlan->hd_tbl_status = false;
10160                 if (is_del_list) {
10161                         list_del(&vlan->node);
10162                         kfree(vlan);
10163                 }
10164         }
10165         clear_bit(vport->vport_id, hdev->vf_vlan_full);
10166 }
10167
10168 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
10169 {
10170         struct hclge_vport_vlan_cfg *vlan, *tmp;
10171         struct hclge_vport *vport;
10172         int i;
10173
10174         for (i = 0; i < hdev->num_alloc_vport; i++) {
10175                 vport = &hdev->vport[i];
10176                 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10177                         list_del(&vlan->node);
10178                         kfree(vlan);
10179                 }
10180         }
10181 }
10182
10183 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
10184 {
10185         struct hclge_vport_vlan_cfg *vlan, *tmp;
10186         struct hclge_dev *hdev = vport->back;
10187         u16 vlan_proto;
10188         u16 vlan_id;
10189         u16 state;
10190         int ret;
10191
10192         vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
10193         vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10194         state = vport->port_base_vlan_cfg.state;
10195
10196         if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
10197                 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
10198                 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
10199                                          vport->vport_id, vlan_id,
10200                                          false);
10201                 return;
10202         }
10203
10204         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10205                 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10206                                                vport->vport_id,
10207                                                vlan->vlan_id, false);
10208                 if (ret)
10209                         break;
10210                 vlan->hd_tbl_status = true;
10211         }
10212 }
10213
10214 /* For global reset and imp reset, hardware will clear the mac table,
10215  * so we change the mac address state from ACTIVE to TO_ADD, then they
10216  * can be restored in the service task after reset complete. Furtherly,
10217  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
10218  * be restored after reset, so just remove these mac nodes from mac_list.
10219  */
10220 static void hclge_mac_node_convert_for_reset(struct list_head *list)
10221 {
10222         struct hclge_mac_node *mac_node, *tmp;
10223
10224         list_for_each_entry_safe(mac_node, tmp, list, node) {
10225                 if (mac_node->state == HCLGE_MAC_ACTIVE) {
10226                         mac_node->state = HCLGE_MAC_TO_ADD;
10227                 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
10228                         list_del(&mac_node->node);
10229                         kfree(mac_node);
10230                 }
10231         }
10232 }
10233
10234 void hclge_restore_mac_table_common(struct hclge_vport *vport)
10235 {
10236         spin_lock_bh(&vport->mac_list_lock);
10237
10238         hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
10239         hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
10240         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
10241
10242         spin_unlock_bh(&vport->mac_list_lock);
10243 }
10244
10245 static void hclge_restore_hw_table(struct hclge_dev *hdev)
10246 {
10247         struct hclge_vport *vport = &hdev->vport[0];
10248         struct hnae3_handle *handle = &vport->nic;
10249
10250         hclge_restore_mac_table_common(vport);
10251         hclge_restore_vport_vlan_table(vport);
10252         set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10253         hclge_restore_fd_entries(handle);
10254 }
10255
10256 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10257 {
10258         struct hclge_vport *vport = hclge_get_vport(handle);
10259
10260         if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10261                 vport->rxvlan_cfg.strip_tag1_en = false;
10262                 vport->rxvlan_cfg.strip_tag2_en = enable;
10263                 vport->rxvlan_cfg.strip_tag2_discard_en = false;
10264         } else {
10265                 vport->rxvlan_cfg.strip_tag1_en = enable;
10266                 vport->rxvlan_cfg.strip_tag2_en = true;
10267                 vport->rxvlan_cfg.strip_tag2_discard_en = true;
10268         }
10269
10270         vport->rxvlan_cfg.strip_tag1_discard_en = false;
10271         vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10272         vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10273         vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10274
10275         return hclge_set_vlan_rx_offload_cfg(vport);
10276 }
10277
10278 static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport)
10279 {
10280         struct hclge_dev *hdev = vport->back;
10281
10282         if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
10283                 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state);
10284 }
10285
10286 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10287                                             u16 port_base_vlan_state,
10288                                             struct hclge_vlan_info *new_info,
10289                                             struct hclge_vlan_info *old_info)
10290 {
10291         struct hclge_dev *hdev = vport->back;
10292         int ret;
10293
10294         if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10295                 hclge_rm_vport_all_vlan_table(vport, false);
10296                 /* force clear VLAN 0 */
10297                 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0);
10298                 if (ret)
10299                         return ret;
10300                 return hclge_set_vlan_filter_hw(hdev,
10301                                                  htons(new_info->vlan_proto),
10302                                                  vport->vport_id,
10303                                                  new_info->vlan_tag,
10304                                                  false);
10305         }
10306
10307         /* force add VLAN 0 */
10308         ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
10309         if (ret)
10310                 return ret;
10311
10312         ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10313                                        vport->vport_id, old_info->vlan_tag,
10314                                        true);
10315         if (ret)
10316                 return ret;
10317
10318         return hclge_add_vport_all_vlan_table(vport);
10319 }
10320
10321 static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
10322                                           const struct hclge_vlan_info *old_cfg)
10323 {
10324         if (new_cfg->vlan_tag != old_cfg->vlan_tag)
10325                 return true;
10326
10327         if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0))
10328                 return true;
10329
10330         return false;
10331 }
10332
10333 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10334                                     struct hclge_vlan_info *vlan_info)
10335 {
10336         struct hnae3_handle *nic = &vport->nic;
10337         struct hclge_vlan_info *old_vlan_info;
10338         struct hclge_dev *hdev = vport->back;
10339         int ret;
10340
10341         old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10342
10343         ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
10344                                      vlan_info->qos);
10345         if (ret)
10346                 return ret;
10347
10348         if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
10349                 goto out;
10350
10351         if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
10352                 /* add new VLAN tag */
10353                 ret = hclge_set_vlan_filter_hw(hdev,
10354                                                htons(vlan_info->vlan_proto),
10355                                                vport->vport_id,
10356                                                vlan_info->vlan_tag,
10357                                                false);
10358                 if (ret)
10359                         return ret;
10360
10361                 /* remove old VLAN tag */
10362                 if (old_vlan_info->vlan_tag == 0)
10363                         ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
10364                                                        true, 0);
10365                 else
10366                         ret = hclge_set_vlan_filter_hw(hdev,
10367                                                        htons(ETH_P_8021Q),
10368                                                        vport->vport_id,
10369                                                        old_vlan_info->vlan_tag,
10370                                                        true);
10371                 if (ret) {
10372                         dev_err(&hdev->pdev->dev,
10373                                 "failed to clear vport%u port base vlan %u, ret = %d.\n",
10374                                 vport->vport_id, old_vlan_info->vlan_tag, ret);
10375                         return ret;
10376                 }
10377
10378                 goto out;
10379         }
10380
10381         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10382                                                old_vlan_info);
10383         if (ret)
10384                 return ret;
10385
10386 out:
10387         vport->port_base_vlan_cfg.state = state;
10388         if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10389                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10390         else
10391                 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10392
10393         vport->port_base_vlan_cfg.vlan_info = *vlan_info;
10394         hclge_set_vport_vlan_fltr_change(vport);
10395
10396         return 0;
10397 }
10398
10399 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10400                                           enum hnae3_port_base_vlan_state state,
10401                                           u16 vlan, u8 qos)
10402 {
10403         if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10404                 if (!vlan && !qos)
10405                         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10406
10407                 return HNAE3_PORT_BASE_VLAN_ENABLE;
10408         }
10409
10410         if (!vlan && !qos)
10411                 return HNAE3_PORT_BASE_VLAN_DISABLE;
10412
10413         if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan &&
10414             vport->port_base_vlan_cfg.vlan_info.qos == qos)
10415                 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10416
10417         return HNAE3_PORT_BASE_VLAN_MODIFY;
10418 }
10419
10420 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10421                                     u16 vlan, u8 qos, __be16 proto)
10422 {
10423         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10424         struct hclge_vport *vport = hclge_get_vport(handle);
10425         struct hclge_dev *hdev = vport->back;
10426         struct hclge_vlan_info vlan_info;
10427         u16 state;
10428         int ret;
10429
10430         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10431                 return -EOPNOTSUPP;
10432
10433         vport = hclge_get_vf_vport(hdev, vfid);
10434         if (!vport)
10435                 return -EINVAL;
10436
10437         /* qos is a 3 bits value, so can not be bigger than 7 */
10438         if (vlan > VLAN_N_VID - 1 || qos > 7)
10439                 return -EINVAL;
10440         if (proto != htons(ETH_P_8021Q))
10441                 return -EPROTONOSUPPORT;
10442
10443         state = hclge_get_port_base_vlan_state(vport,
10444                                                vport->port_base_vlan_cfg.state,
10445                                                vlan, qos);
10446         if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10447                 return 0;
10448
10449         vlan_info.vlan_tag = vlan;
10450         vlan_info.qos = qos;
10451         vlan_info.vlan_proto = ntohs(proto);
10452
10453         ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10454         if (ret) {
10455                 dev_err(&hdev->pdev->dev,
10456                         "failed to update port base vlan for vf %d, ret = %d\n",
10457                         vfid, ret);
10458                 return ret;
10459         }
10460
10461         /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10462          * VLAN state.
10463          */
10464         if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
10465             test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10466                 hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10467                                                   vport->vport_id, state,
10468                                                   &vlan_info);
10469
10470         return 0;
10471 }
10472
10473 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10474 {
10475         struct hclge_vlan_info *vlan_info;
10476         struct hclge_vport *vport;
10477         int ret;
10478         int vf;
10479
10480         /* clear port base vlan for all vf */
10481         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10482                 vport = &hdev->vport[vf];
10483                 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10484
10485                 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10486                                                vport->vport_id,
10487                                                vlan_info->vlan_tag, true);
10488                 if (ret)
10489                         dev_err(&hdev->pdev->dev,
10490                                 "failed to clear vf vlan for vf%d, ret = %d\n",
10491                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
10492         }
10493 }
10494
10495 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10496                           u16 vlan_id, bool is_kill)
10497 {
10498         struct hclge_vport *vport = hclge_get_vport(handle);
10499         struct hclge_dev *hdev = vport->back;
10500         bool writen_to_tbl = false;
10501         int ret = 0;
10502
10503         /* When device is resetting or reset failed, firmware is unable to
10504          * handle mailbox. Just record the vlan id, and remove it after
10505          * reset finished.
10506          */
10507         if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10508              test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10509                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10510                 return -EBUSY;
10511         }
10512
10513         /* when port base vlan enabled, we use port base vlan as the vlan
10514          * filter entry. In this case, we don't update vlan filter table
10515          * when user add new vlan or remove exist vlan, just update the vport
10516          * vlan list. The vlan id in vlan list will be writen in vlan filter
10517          * table until port base vlan disabled
10518          */
10519         if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10520                 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10521                                                vlan_id, is_kill);
10522                 writen_to_tbl = true;
10523         }
10524
10525         if (!ret) {
10526                 if (is_kill)
10527                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
10528                 else
10529                         hclge_add_vport_vlan_table(vport, vlan_id,
10530                                                    writen_to_tbl);
10531         } else if (is_kill) {
10532                 /* when remove hw vlan filter failed, record the vlan id,
10533                  * and try to remove it from hw later, to be consistence
10534                  * with stack
10535                  */
10536                 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10537         }
10538
10539         hclge_set_vport_vlan_fltr_change(vport);
10540
10541         return ret;
10542 }
10543
10544 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
10545 {
10546         struct hclge_vport *vport;
10547         int ret;
10548         u16 i;
10549
10550         for (i = 0; i < hdev->num_alloc_vport; i++) {
10551                 vport = &hdev->vport[i];
10552                 if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10553                                         &vport->state))
10554                         continue;
10555
10556                 ret = hclge_enable_vport_vlan_filter(vport,
10557                                                      vport->req_vlan_fltr_en);
10558                 if (ret) {
10559                         dev_err(&hdev->pdev->dev,
10560                                 "failed to sync vlan filter state for vport%u, ret = %d\n",
10561                                 vport->vport_id, ret);
10562                         set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10563                                 &vport->state);
10564                         return;
10565                 }
10566         }
10567 }
10568
10569 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10570 {
10571 #define HCLGE_MAX_SYNC_COUNT    60
10572
10573         int i, ret, sync_cnt = 0;
10574         u16 vlan_id;
10575
10576         /* start from vport 1 for PF is always alive */
10577         for (i = 0; i < hdev->num_alloc_vport; i++) {
10578                 struct hclge_vport *vport = &hdev->vport[i];
10579
10580                 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10581                                          VLAN_N_VID);
10582                 while (vlan_id != VLAN_N_VID) {
10583                         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10584                                                        vport->vport_id, vlan_id,
10585                                                        true);
10586                         if (ret && ret != -EINVAL)
10587                                 return;
10588
10589                         clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10590                         hclge_rm_vport_vlan_table(vport, vlan_id, false);
10591                         hclge_set_vport_vlan_fltr_change(vport);
10592
10593                         sync_cnt++;
10594                         if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
10595                                 return;
10596
10597                         vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10598                                                  VLAN_N_VID);
10599                 }
10600         }
10601
10602         hclge_sync_vlan_fltr_state(hdev);
10603 }
10604
10605 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10606 {
10607         struct hclge_config_max_frm_size_cmd *req;
10608         struct hclge_desc desc;
10609
10610         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10611
10612         req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10613         req->max_frm_size = cpu_to_le16(new_mps);
10614         req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10615
10616         return hclge_cmd_send(&hdev->hw, &desc, 1);
10617 }
10618
10619 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10620 {
10621         struct hclge_vport *vport = hclge_get_vport(handle);
10622
10623         return hclge_set_vport_mtu(vport, new_mtu);
10624 }
10625
10626 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10627 {
10628         struct hclge_dev *hdev = vport->back;
10629         int i, max_frm_size, ret;
10630
10631         /* HW supprt 2 layer vlan */
10632         max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10633         if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10634             max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10635                 return -EINVAL;
10636
10637         max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10638         mutex_lock(&hdev->vport_lock);
10639         /* VF's mps must fit within hdev->mps */
10640         if (vport->vport_id && max_frm_size > hdev->mps) {
10641                 mutex_unlock(&hdev->vport_lock);
10642                 return -EINVAL;
10643         } else if (vport->vport_id) {
10644                 vport->mps = max_frm_size;
10645                 mutex_unlock(&hdev->vport_lock);
10646                 return 0;
10647         }
10648
10649         /* PF's mps must be greater then VF's mps */
10650         for (i = 1; i < hdev->num_alloc_vport; i++)
10651                 if (max_frm_size < hdev->vport[i].mps) {
10652                         mutex_unlock(&hdev->vport_lock);
10653                         return -EINVAL;
10654                 }
10655
10656         hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10657
10658         ret = hclge_set_mac_mtu(hdev, max_frm_size);
10659         if (ret) {
10660                 dev_err(&hdev->pdev->dev,
10661                         "Change mtu fail, ret =%d\n", ret);
10662                 goto out;
10663         }
10664
10665         hdev->mps = max_frm_size;
10666         vport->mps = max_frm_size;
10667
10668         ret = hclge_buffer_alloc(hdev);
10669         if (ret)
10670                 dev_err(&hdev->pdev->dev,
10671                         "Allocate buffer fail, ret =%d\n", ret);
10672
10673 out:
10674         hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10675         mutex_unlock(&hdev->vport_lock);
10676         return ret;
10677 }
10678
10679 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10680                                     bool enable)
10681 {
10682         struct hclge_reset_tqp_queue_cmd *req;
10683         struct hclge_desc desc;
10684         int ret;
10685
10686         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10687
10688         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10689         req->tqp_id = cpu_to_le16(queue_id);
10690         if (enable)
10691                 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10692
10693         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10694         if (ret) {
10695                 dev_err(&hdev->pdev->dev,
10696                         "Send tqp reset cmd error, status =%d\n", ret);
10697                 return ret;
10698         }
10699
10700         return 0;
10701 }
10702
10703 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
10704 {
10705         struct hclge_reset_tqp_queue_cmd *req;
10706         struct hclge_desc desc;
10707         int ret;
10708
10709         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10710
10711         req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10712         req->tqp_id = cpu_to_le16(queue_id);
10713
10714         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10715         if (ret) {
10716                 dev_err(&hdev->pdev->dev,
10717                         "Get reset status error, status =%d\n", ret);
10718                 return ret;
10719         }
10720
10721         return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10722 }
10723
10724 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10725 {
10726         struct hnae3_queue *queue;
10727         struct hclge_tqp *tqp;
10728
10729         queue = handle->kinfo.tqp[queue_id];
10730         tqp = container_of(queue, struct hclge_tqp, q);
10731
10732         return tqp->index;
10733 }
10734
10735 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
10736 {
10737         struct hclge_vport *vport = hclge_get_vport(handle);
10738         struct hclge_dev *hdev = vport->back;
10739         u16 reset_try_times = 0;
10740         int reset_status;
10741         u16 queue_gid;
10742         int ret;
10743         u16 i;
10744
10745         for (i = 0; i < handle->kinfo.num_tqps; i++) {
10746                 queue_gid = hclge_covert_handle_qid_global(handle, i);
10747                 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10748                 if (ret) {
10749                         dev_err(&hdev->pdev->dev,
10750                                 "failed to send reset tqp cmd, ret = %d\n",
10751                                 ret);
10752                         return ret;
10753                 }
10754
10755                 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10756                         reset_status = hclge_get_reset_status(hdev, queue_gid);
10757                         if (reset_status)
10758                                 break;
10759
10760                         /* Wait for tqp hw reset */
10761                         usleep_range(1000, 1200);
10762                 }
10763
10764                 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10765                         dev_err(&hdev->pdev->dev,
10766                                 "wait for tqp hw reset timeout\n");
10767                         return -ETIME;
10768                 }
10769
10770                 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10771                 if (ret) {
10772                         dev_err(&hdev->pdev->dev,
10773                                 "failed to deassert soft reset, ret = %d\n",
10774                                 ret);
10775                         return ret;
10776                 }
10777                 reset_try_times = 0;
10778         }
10779         return 0;
10780 }
10781
10782 static int hclge_reset_rcb(struct hnae3_handle *handle)
10783 {
10784 #define HCLGE_RESET_RCB_NOT_SUPPORT     0U
10785 #define HCLGE_RESET_RCB_SUCCESS         1U
10786
10787         struct hclge_vport *vport = hclge_get_vport(handle);
10788         struct hclge_dev *hdev = vport->back;
10789         struct hclge_reset_cmd *req;
10790         struct hclge_desc desc;
10791         u8 return_status;
10792         u16 queue_gid;
10793         int ret;
10794
10795         queue_gid = hclge_covert_handle_qid_global(handle, 0);
10796
10797         req = (struct hclge_reset_cmd *)desc.data;
10798         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
10799         hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
10800         req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
10801         req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
10802
10803         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10804         if (ret) {
10805                 dev_err(&hdev->pdev->dev,
10806                         "failed to send rcb reset cmd, ret = %d\n", ret);
10807                 return ret;
10808         }
10809
10810         return_status = req->fun_reset_rcb_return_status;
10811         if (return_status == HCLGE_RESET_RCB_SUCCESS)
10812                 return 0;
10813
10814         if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
10815                 dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
10816                         return_status);
10817                 return -EIO;
10818         }
10819
10820         /* if reset rcb cmd is unsupported, we need to send reset tqp cmd
10821          * again to reset all tqps
10822          */
10823         return hclge_reset_tqp_cmd(handle);
10824 }
10825
10826 int hclge_reset_tqp(struct hnae3_handle *handle)
10827 {
10828         struct hclge_vport *vport = hclge_get_vport(handle);
10829         struct hclge_dev *hdev = vport->back;
10830         int ret;
10831
10832         /* only need to disable PF's tqp */
10833         if (!vport->vport_id) {
10834                 ret = hclge_tqp_enable(handle, false);
10835                 if (ret) {
10836                         dev_err(&hdev->pdev->dev,
10837                                 "failed to disable tqp, ret = %d\n", ret);
10838                         return ret;
10839                 }
10840         }
10841
10842         return hclge_reset_rcb(handle);
10843 }
10844
10845 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10846 {
10847         struct hclge_vport *vport = hclge_get_vport(handle);
10848         struct hclge_dev *hdev = vport->back;
10849
10850         return hdev->fw_version;
10851 }
10852
10853 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10854 {
10855         struct phy_device *phydev = hdev->hw.mac.phydev;
10856
10857         if (!phydev)
10858                 return;
10859
10860         phy_set_asym_pause(phydev, rx_en, tx_en);
10861 }
10862
10863 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10864 {
10865         int ret;
10866
10867         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10868                 return 0;
10869
10870         ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10871         if (ret)
10872                 dev_err(&hdev->pdev->dev,
10873                         "configure pauseparam error, ret = %d.\n", ret);
10874
10875         return ret;
10876 }
10877
10878 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10879 {
10880         struct phy_device *phydev = hdev->hw.mac.phydev;
10881         u16 remote_advertising = 0;
10882         u16 local_advertising;
10883         u32 rx_pause, tx_pause;
10884         u8 flowctl;
10885
10886         if (!phydev->link || !phydev->autoneg)
10887                 return 0;
10888
10889         local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10890
10891         if (phydev->pause)
10892                 remote_advertising = LPA_PAUSE_CAP;
10893
10894         if (phydev->asym_pause)
10895                 remote_advertising |= LPA_PAUSE_ASYM;
10896
10897         flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10898                                            remote_advertising);
10899         tx_pause = flowctl & FLOW_CTRL_TX;
10900         rx_pause = flowctl & FLOW_CTRL_RX;
10901
10902         if (phydev->duplex == HCLGE_MAC_HALF) {
10903                 tx_pause = 0;
10904                 rx_pause = 0;
10905         }
10906
10907         return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10908 }
10909
10910 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10911                                  u32 *rx_en, u32 *tx_en)
10912 {
10913         struct hclge_vport *vport = hclge_get_vport(handle);
10914         struct hclge_dev *hdev = vport->back;
10915         u8 media_type = hdev->hw.mac.media_type;
10916
10917         *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
10918                     hclge_get_autoneg(handle) : 0;
10919
10920         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10921                 *rx_en = 0;
10922                 *tx_en = 0;
10923                 return;
10924         }
10925
10926         if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10927                 *rx_en = 1;
10928                 *tx_en = 0;
10929         } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10930                 *tx_en = 1;
10931                 *rx_en = 0;
10932         } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10933                 *rx_en = 1;
10934                 *tx_en = 1;
10935         } else {
10936                 *rx_en = 0;
10937                 *tx_en = 0;
10938         }
10939 }
10940
10941 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10942                                          u32 rx_en, u32 tx_en)
10943 {
10944         if (rx_en && tx_en)
10945                 hdev->fc_mode_last_time = HCLGE_FC_FULL;
10946         else if (rx_en && !tx_en)
10947                 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10948         else if (!rx_en && tx_en)
10949                 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10950         else
10951                 hdev->fc_mode_last_time = HCLGE_FC_NONE;
10952
10953         hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10954 }
10955
10956 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10957                                 u32 rx_en, u32 tx_en)
10958 {
10959         struct hclge_vport *vport = hclge_get_vport(handle);
10960         struct hclge_dev *hdev = vport->back;
10961         struct phy_device *phydev = hdev->hw.mac.phydev;
10962         u32 fc_autoneg;
10963
10964         if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
10965                 fc_autoneg = hclge_get_autoneg(handle);
10966                 if (auto_neg != fc_autoneg) {
10967                         dev_info(&hdev->pdev->dev,
10968                                  "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10969                         return -EOPNOTSUPP;
10970                 }
10971         }
10972
10973         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10974                 dev_info(&hdev->pdev->dev,
10975                          "Priority flow control enabled. Cannot set link flow control.\n");
10976                 return -EOPNOTSUPP;
10977         }
10978
10979         hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10980
10981         hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10982
10983         if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
10984                 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10985
10986         if (phydev)
10987                 return phy_start_aneg(phydev);
10988
10989         return -EOPNOTSUPP;
10990 }
10991
10992 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10993                                           u8 *auto_neg, u32 *speed, u8 *duplex)
10994 {
10995         struct hclge_vport *vport = hclge_get_vport(handle);
10996         struct hclge_dev *hdev = vport->back;
10997
10998         if (speed)
10999                 *speed = hdev->hw.mac.speed;
11000         if (duplex)
11001                 *duplex = hdev->hw.mac.duplex;
11002         if (auto_neg)
11003                 *auto_neg = hdev->hw.mac.autoneg;
11004 }
11005
11006 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
11007                                  u8 *module_type)
11008 {
11009         struct hclge_vport *vport = hclge_get_vport(handle);
11010         struct hclge_dev *hdev = vport->back;
11011
11012         /* When nic is down, the service task is not running, doesn't update
11013          * the port information per second. Query the port information before
11014          * return the media type, ensure getting the correct media information.
11015          */
11016         hclge_update_port_info(hdev);
11017
11018         if (media_type)
11019                 *media_type = hdev->hw.mac.media_type;
11020
11021         if (module_type)
11022                 *module_type = hdev->hw.mac.module_type;
11023 }
11024
11025 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
11026                                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
11027 {
11028         struct hclge_vport *vport = hclge_get_vport(handle);
11029         struct hclge_dev *hdev = vport->back;
11030         struct phy_device *phydev = hdev->hw.mac.phydev;
11031         int mdix_ctrl, mdix, is_resolved;
11032         unsigned int retval;
11033
11034         if (!phydev) {
11035                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11036                 *tp_mdix = ETH_TP_MDI_INVALID;
11037                 return;
11038         }
11039
11040         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
11041
11042         retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
11043         mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
11044                                     HCLGE_PHY_MDIX_CTRL_S);
11045
11046         retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
11047         mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
11048         is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
11049
11050         phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
11051
11052         switch (mdix_ctrl) {
11053         case 0x0:
11054                 *tp_mdix_ctrl = ETH_TP_MDI;
11055                 break;
11056         case 0x1:
11057                 *tp_mdix_ctrl = ETH_TP_MDI_X;
11058                 break;
11059         case 0x3:
11060                 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
11061                 break;
11062         default:
11063                 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11064                 break;
11065         }
11066
11067         if (!is_resolved)
11068                 *tp_mdix = ETH_TP_MDI_INVALID;
11069         else if (mdix)
11070                 *tp_mdix = ETH_TP_MDI_X;
11071         else
11072                 *tp_mdix = ETH_TP_MDI;
11073 }
11074
11075 static void hclge_info_show(struct hclge_dev *hdev)
11076 {
11077         struct device *dev = &hdev->pdev->dev;
11078
11079         dev_info(dev, "PF info begin:\n");
11080
11081         dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
11082         dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
11083         dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
11084         dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
11085         dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
11086         dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
11087         dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
11088         dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
11089         dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
11090         dev_info(dev, "This is %s PF\n",
11091                  hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
11092         dev_info(dev, "DCB %s\n",
11093                  hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
11094         dev_info(dev, "MQPRIO %s\n",
11095                  hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
11096         dev_info(dev, "Default tx spare buffer size: %u\n",
11097                  hdev->tx_spare_buf_size);
11098
11099         dev_info(dev, "PF info end.\n");
11100 }
11101
11102 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
11103                                           struct hclge_vport *vport)
11104 {
11105         struct hnae3_client *client = vport->nic.client;
11106         struct hclge_dev *hdev = ae_dev->priv;
11107         int rst_cnt = hdev->rst_stats.reset_cnt;
11108         int ret;
11109
11110         ret = client->ops->init_instance(&vport->nic);
11111         if (ret)
11112                 return ret;
11113
11114         set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11115         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11116             rst_cnt != hdev->rst_stats.reset_cnt) {
11117                 ret = -EBUSY;
11118                 goto init_nic_err;
11119         }
11120
11121         /* Enable nic hw error interrupts */
11122         ret = hclge_config_nic_hw_error(hdev, true);
11123         if (ret) {
11124                 dev_err(&ae_dev->pdev->dev,
11125                         "fail(%d) to enable hw error interrupts\n", ret);
11126                 goto init_nic_err;
11127         }
11128
11129         hnae3_set_client_init_flag(client, ae_dev, 1);
11130
11131         if (netif_msg_drv(&hdev->vport->nic))
11132                 hclge_info_show(hdev);
11133
11134         return ret;
11135
11136 init_nic_err:
11137         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11138         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11139                 msleep(HCLGE_WAIT_RESET_DONE);
11140
11141         client->ops->uninit_instance(&vport->nic, 0);
11142
11143         return ret;
11144 }
11145
11146 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
11147                                            struct hclge_vport *vport)
11148 {
11149         struct hclge_dev *hdev = ae_dev->priv;
11150         struct hnae3_client *client;
11151         int rst_cnt;
11152         int ret;
11153
11154         if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
11155             !hdev->nic_client)
11156                 return 0;
11157
11158         client = hdev->roce_client;
11159         ret = hclge_init_roce_base_info(vport);
11160         if (ret)
11161                 return ret;
11162
11163         rst_cnt = hdev->rst_stats.reset_cnt;
11164         ret = client->ops->init_instance(&vport->roce);
11165         if (ret)
11166                 return ret;
11167
11168         set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11169         if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11170             rst_cnt != hdev->rst_stats.reset_cnt) {
11171                 ret = -EBUSY;
11172                 goto init_roce_err;
11173         }
11174
11175         /* Enable roce ras interrupts */
11176         ret = hclge_config_rocee_ras_interrupt(hdev, true);
11177         if (ret) {
11178                 dev_err(&ae_dev->pdev->dev,
11179                         "fail(%d) to enable roce ras interrupts\n", ret);
11180                 goto init_roce_err;
11181         }
11182
11183         hnae3_set_client_init_flag(client, ae_dev, 1);
11184
11185         return 0;
11186
11187 init_roce_err:
11188         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11189         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11190                 msleep(HCLGE_WAIT_RESET_DONE);
11191
11192         hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11193
11194         return ret;
11195 }
11196
11197 static int hclge_init_client_instance(struct hnae3_client *client,
11198                                       struct hnae3_ae_dev *ae_dev)
11199 {
11200         struct hclge_dev *hdev = ae_dev->priv;
11201         struct hclge_vport *vport = &hdev->vport[0];
11202         int ret;
11203
11204         switch (client->type) {
11205         case HNAE3_CLIENT_KNIC:
11206                 hdev->nic_client = client;
11207                 vport->nic.client = client;
11208                 ret = hclge_init_nic_client_instance(ae_dev, vport);
11209                 if (ret)
11210                         goto clear_nic;
11211
11212                 ret = hclge_init_roce_client_instance(ae_dev, vport);
11213                 if (ret)
11214                         goto clear_roce;
11215
11216                 break;
11217         case HNAE3_CLIENT_ROCE:
11218                 if (hnae3_dev_roce_supported(hdev)) {
11219                         hdev->roce_client = client;
11220                         vport->roce.client = client;
11221                 }
11222
11223                 ret = hclge_init_roce_client_instance(ae_dev, vport);
11224                 if (ret)
11225                         goto clear_roce;
11226
11227                 break;
11228         default:
11229                 return -EINVAL;
11230         }
11231
11232         return 0;
11233
11234 clear_nic:
11235         hdev->nic_client = NULL;
11236         vport->nic.client = NULL;
11237         return ret;
11238 clear_roce:
11239         hdev->roce_client = NULL;
11240         vport->roce.client = NULL;
11241         return ret;
11242 }
11243
11244 static void hclge_uninit_client_instance(struct hnae3_client *client,
11245                                          struct hnae3_ae_dev *ae_dev)
11246 {
11247         struct hclge_dev *hdev = ae_dev->priv;
11248         struct hclge_vport *vport = &hdev->vport[0];
11249
11250         if (hdev->roce_client) {
11251                 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11252                 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11253                         msleep(HCLGE_WAIT_RESET_DONE);
11254
11255                 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11256                 hdev->roce_client = NULL;
11257                 vport->roce.client = NULL;
11258         }
11259         if (client->type == HNAE3_CLIENT_ROCE)
11260                 return;
11261         if (hdev->nic_client && client->ops->uninit_instance) {
11262                 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11263                 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11264                         msleep(HCLGE_WAIT_RESET_DONE);
11265
11266                 client->ops->uninit_instance(&vport->nic, 0);
11267                 hdev->nic_client = NULL;
11268                 vport->nic.client = NULL;
11269         }
11270 }
11271
11272 static int hclge_dev_mem_map(struct hclge_dev *hdev)
11273 {
11274 #define HCLGE_MEM_BAR           4
11275
11276         struct pci_dev *pdev = hdev->pdev;
11277         struct hclge_hw *hw = &hdev->hw;
11278
11279         /* for device does not have device memory, return directly */
11280         if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
11281                 return 0;
11282
11283         hw->mem_base = devm_ioremap_wc(&pdev->dev,
11284                                        pci_resource_start(pdev, HCLGE_MEM_BAR),
11285                                        pci_resource_len(pdev, HCLGE_MEM_BAR));
11286         if (!hw->mem_base) {
11287                 dev_err(&pdev->dev, "failed to map device memory\n");
11288                 return -EFAULT;
11289         }
11290
11291         return 0;
11292 }
11293
11294 static int hclge_pci_init(struct hclge_dev *hdev)
11295 {
11296         struct pci_dev *pdev = hdev->pdev;
11297         struct hclge_hw *hw;
11298         int ret;
11299
11300         ret = pci_enable_device(pdev);
11301         if (ret) {
11302                 dev_err(&pdev->dev, "failed to enable PCI device\n");
11303                 return ret;
11304         }
11305
11306         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11307         if (ret) {
11308                 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11309                 if (ret) {
11310                         dev_err(&pdev->dev,
11311                                 "can't set consistent PCI DMA");
11312                         goto err_disable_device;
11313                 }
11314                 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
11315         }
11316
11317         ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
11318         if (ret) {
11319                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
11320                 goto err_disable_device;
11321         }
11322
11323         pci_set_master(pdev);
11324         hw = &hdev->hw;
11325         hw->io_base = pcim_iomap(pdev, 2, 0);
11326         if (!hw->io_base) {
11327                 dev_err(&pdev->dev, "Can't map configuration register space\n");
11328                 ret = -ENOMEM;
11329                 goto err_clr_master;
11330         }
11331
11332         ret = hclge_dev_mem_map(hdev);
11333         if (ret)
11334                 goto err_unmap_io_base;
11335
11336         hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11337
11338         return 0;
11339
11340 err_unmap_io_base:
11341         pcim_iounmap(pdev, hdev->hw.io_base);
11342 err_clr_master:
11343         pci_clear_master(pdev);
11344         pci_release_regions(pdev);
11345 err_disable_device:
11346         pci_disable_device(pdev);
11347
11348         return ret;
11349 }
11350
11351 static void hclge_pci_uninit(struct hclge_dev *hdev)
11352 {
11353         struct pci_dev *pdev = hdev->pdev;
11354
11355         if (hdev->hw.mem_base)
11356                 devm_iounmap(&pdev->dev, hdev->hw.mem_base);
11357
11358         pcim_iounmap(pdev, hdev->hw.io_base);
11359         pci_free_irq_vectors(pdev);
11360         pci_clear_master(pdev);
11361         pci_release_mem_regions(pdev);
11362         pci_disable_device(pdev);
11363 }
11364
11365 static void hclge_state_init(struct hclge_dev *hdev)
11366 {
11367         set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11368         set_bit(HCLGE_STATE_DOWN, &hdev->state);
11369         clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11370         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11371         clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11372         clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11373         clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11374 }
11375
11376 static void hclge_state_uninit(struct hclge_dev *hdev)
11377 {
11378         set_bit(HCLGE_STATE_DOWN, &hdev->state);
11379         set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11380
11381         if (hdev->reset_timer.function)
11382                 del_timer_sync(&hdev->reset_timer);
11383         if (hdev->service_task.work.func)
11384                 cancel_delayed_work_sync(&hdev->service_task);
11385 }
11386
11387 static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
11388                                         enum hnae3_reset_type rst_type)
11389 {
11390 #define HCLGE_RESET_RETRY_WAIT_MS       500
11391 #define HCLGE_RESET_RETRY_CNT   5
11392
11393         struct hclge_dev *hdev = ae_dev->priv;
11394         int retry_cnt = 0;
11395         int ret;
11396
11397 retry:
11398         down(&hdev->reset_sem);
11399         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11400         hdev->reset_type = rst_type;
11401         ret = hclge_reset_prepare(hdev);
11402         if (ret || hdev->reset_pending) {
11403                 dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
11404                         ret);
11405                 if (hdev->reset_pending ||
11406                     retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
11407                         dev_err(&hdev->pdev->dev,
11408                                 "reset_pending:0x%lx, retry_cnt:%d\n",
11409                                 hdev->reset_pending, retry_cnt);
11410                         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11411                         up(&hdev->reset_sem);
11412                         msleep(HCLGE_RESET_RETRY_WAIT_MS);
11413                         goto retry;
11414                 }
11415         }
11416
11417         /* disable misc vector before reset done */
11418         hclge_enable_vector(&hdev->misc_vector, false);
11419         set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
11420
11421         if (hdev->reset_type == HNAE3_FLR_RESET)
11422                 hdev->rst_stats.flr_rst_cnt++;
11423 }
11424
11425 static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
11426 {
11427         struct hclge_dev *hdev = ae_dev->priv;
11428         int ret;
11429
11430         hclge_enable_vector(&hdev->misc_vector, true);
11431
11432         ret = hclge_reset_rebuild(hdev);
11433         if (ret)
11434                 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11435
11436         hdev->reset_type = HNAE3_NONE_RESET;
11437         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11438         up(&hdev->reset_sem);
11439 }
11440
11441 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11442 {
11443         u16 i;
11444
11445         for (i = 0; i < hdev->num_alloc_vport; i++) {
11446                 struct hclge_vport *vport = &hdev->vport[i];
11447                 int ret;
11448
11449                  /* Send cmd to clear VF's FUNC_RST_ING */
11450                 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11451                 if (ret)
11452                         dev_warn(&hdev->pdev->dev,
11453                                  "clear vf(%u) rst failed %d!\n",
11454                                  vport->vport_id, ret);
11455         }
11456 }
11457
11458 static int hclge_clear_hw_resource(struct hclge_dev *hdev)
11459 {
11460         struct hclge_desc desc;
11461         int ret;
11462
11463         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
11464
11465         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11466         /* This new command is only supported by new firmware, it will
11467          * fail with older firmware. Error value -EOPNOSUPP can only be
11468          * returned by older firmware running this command, to keep code
11469          * backward compatible we will override this value and return
11470          * success.
11471          */
11472         if (ret && ret != -EOPNOTSUPP) {
11473                 dev_err(&hdev->pdev->dev,
11474                         "failed to clear hw resource, ret = %d\n", ret);
11475                 return ret;
11476         }
11477         return 0;
11478 }
11479
11480 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
11481 {
11482         if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11483                 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
11484 }
11485
11486 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
11487 {
11488         if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11489                 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
11490 }
11491
11492 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11493 {
11494         struct pci_dev *pdev = ae_dev->pdev;
11495         struct hclge_dev *hdev;
11496         int ret;
11497
11498         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11499         if (!hdev)
11500                 return -ENOMEM;
11501
11502         hdev->pdev = pdev;
11503         hdev->ae_dev = ae_dev;
11504         hdev->reset_type = HNAE3_NONE_RESET;
11505         hdev->reset_level = HNAE3_FUNC_RESET;
11506         ae_dev->priv = hdev;
11507
11508         /* HW supprt 2 layer vlan */
11509         hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11510
11511         mutex_init(&hdev->vport_lock);
11512         spin_lock_init(&hdev->fd_rule_lock);
11513         sema_init(&hdev->reset_sem, 1);
11514
11515         ret = hclge_pci_init(hdev);
11516         if (ret)
11517                 goto out;
11518
11519         ret = hclge_devlink_init(hdev);
11520         if (ret)
11521                 goto err_pci_uninit;
11522
11523         /* Firmware command queue initialize */
11524         ret = hclge_cmd_queue_init(hdev);
11525         if (ret)
11526                 goto err_devlink_uninit;
11527
11528         /* Firmware command initialize */
11529         ret = hclge_cmd_init(hdev);
11530         if (ret)
11531                 goto err_cmd_uninit;
11532
11533         ret  = hclge_clear_hw_resource(hdev);
11534         if (ret)
11535                 goto err_cmd_uninit;
11536
11537         ret = hclge_get_cap(hdev);
11538         if (ret)
11539                 goto err_cmd_uninit;
11540
11541         ret = hclge_query_dev_specs(hdev);
11542         if (ret) {
11543                 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11544                         ret);
11545                 goto err_cmd_uninit;
11546         }
11547
11548         ret = hclge_configure(hdev);
11549         if (ret) {
11550                 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11551                 goto err_cmd_uninit;
11552         }
11553
11554         ret = hclge_init_msi(hdev);
11555         if (ret) {
11556                 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11557                 goto err_cmd_uninit;
11558         }
11559
11560         ret = hclge_misc_irq_init(hdev);
11561         if (ret)
11562                 goto err_msi_uninit;
11563
11564         ret = hclge_alloc_tqps(hdev);
11565         if (ret) {
11566                 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11567                 goto err_msi_irq_uninit;
11568         }
11569
11570         ret = hclge_alloc_vport(hdev);
11571         if (ret)
11572                 goto err_msi_irq_uninit;
11573
11574         ret = hclge_map_tqp(hdev);
11575         if (ret)
11576                 goto err_msi_irq_uninit;
11577
11578         if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
11579             !hnae3_dev_phy_imp_supported(hdev)) {
11580                 ret = hclge_mac_mdio_config(hdev);
11581                 if (ret)
11582                         goto err_msi_irq_uninit;
11583         }
11584
11585         ret = hclge_init_umv_space(hdev);
11586         if (ret)
11587                 goto err_mdiobus_unreg;
11588
11589         ret = hclge_mac_init(hdev);
11590         if (ret) {
11591                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11592                 goto err_mdiobus_unreg;
11593         }
11594
11595         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11596         if (ret) {
11597                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11598                 goto err_mdiobus_unreg;
11599         }
11600
11601         ret = hclge_config_gro(hdev);
11602         if (ret)
11603                 goto err_mdiobus_unreg;
11604
11605         ret = hclge_init_vlan_config(hdev);
11606         if (ret) {
11607                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11608                 goto err_mdiobus_unreg;
11609         }
11610
11611         ret = hclge_tm_schd_init(hdev);
11612         if (ret) {
11613                 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11614                 goto err_mdiobus_unreg;
11615         }
11616
11617         ret = hclge_rss_init_cfg(hdev);
11618         if (ret) {
11619                 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11620                 goto err_mdiobus_unreg;
11621         }
11622
11623         ret = hclge_rss_init_hw(hdev);
11624         if (ret) {
11625                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11626                 goto err_mdiobus_unreg;
11627         }
11628
11629         ret = init_mgr_tbl(hdev);
11630         if (ret) {
11631                 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11632                 goto err_mdiobus_unreg;
11633         }
11634
11635         ret = hclge_init_fd_config(hdev);
11636         if (ret) {
11637                 dev_err(&pdev->dev,
11638                         "fd table init fail, ret=%d\n", ret);
11639                 goto err_mdiobus_unreg;
11640         }
11641
11642         ret = hclge_ptp_init(hdev);
11643         if (ret)
11644                 goto err_mdiobus_unreg;
11645
11646         INIT_KFIFO(hdev->mac_tnl_log);
11647
11648         hclge_dcb_ops_set(hdev);
11649
11650         timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11651         INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11652
11653         /* Setup affinity after service timer setup because add_timer_on
11654          * is called in affinity notify.
11655          */
11656         hclge_misc_affinity_setup(hdev);
11657
11658         hclge_clear_all_event_cause(hdev);
11659         hclge_clear_resetting_state(hdev);
11660
11661         /* Log and clear the hw errors those already occurred */
11662         if (hnae3_dev_ras_imp_supported(hdev))
11663                 hclge_handle_occurred_error(hdev);
11664         else
11665                 hclge_handle_all_hns_hw_errors(ae_dev);
11666
11667         /* request delayed reset for the error recovery because an immediate
11668          * global reset on a PF affecting pending initialization of other PFs
11669          */
11670         if (ae_dev->hw_err_reset_req) {
11671                 enum hnae3_reset_type reset_level;
11672
11673                 reset_level = hclge_get_reset_level(ae_dev,
11674                                                     &ae_dev->hw_err_reset_req);
11675                 hclge_set_def_reset_request(ae_dev, reset_level);
11676                 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11677         }
11678
11679         hclge_init_rxd_adv_layout(hdev);
11680
11681         /* Enable MISC vector(vector0) */
11682         hclge_enable_vector(&hdev->misc_vector, true);
11683
11684         hclge_state_init(hdev);
11685         hdev->last_reset_time = jiffies;
11686
11687         dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11688                  HCLGE_DRIVER_NAME);
11689
11690         hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11691
11692         return 0;
11693
11694 err_mdiobus_unreg:
11695         if (hdev->hw.mac.phydev)
11696                 mdiobus_unregister(hdev->hw.mac.mdio_bus);
11697 err_msi_irq_uninit:
11698         hclge_misc_irq_uninit(hdev);
11699 err_msi_uninit:
11700         pci_free_irq_vectors(pdev);
11701 err_cmd_uninit:
11702         hclge_cmd_uninit(hdev);
11703 err_devlink_uninit:
11704         hclge_devlink_uninit(hdev);
11705 err_pci_uninit:
11706         pcim_iounmap(pdev, hdev->hw.io_base);
11707         pci_clear_master(pdev);
11708         pci_release_regions(pdev);
11709         pci_disable_device(pdev);
11710 out:
11711         mutex_destroy(&hdev->vport_lock);
11712         return ret;
11713 }
11714
11715 static void hclge_stats_clear(struct hclge_dev *hdev)
11716 {
11717         memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11718 }
11719
11720 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11721 {
11722         return hclge_config_switch_param(hdev, vf, enable,
11723                                          HCLGE_SWITCH_ANTI_SPOOF_MASK);
11724 }
11725
11726 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11727 {
11728         return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11729                                           HCLGE_FILTER_FE_NIC_INGRESS_B,
11730                                           enable, vf);
11731 }
11732
11733 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11734 {
11735         int ret;
11736
11737         ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11738         if (ret) {
11739                 dev_err(&hdev->pdev->dev,
11740                         "Set vf %d mac spoof check %s failed, ret=%d\n",
11741                         vf, enable ? "on" : "off", ret);
11742                 return ret;
11743         }
11744
11745         ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11746         if (ret)
11747                 dev_err(&hdev->pdev->dev,
11748                         "Set vf %d vlan spoof check %s failed, ret=%d\n",
11749                         vf, enable ? "on" : "off", ret);
11750
11751         return ret;
11752 }
11753
11754 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11755                                  bool enable)
11756 {
11757         struct hclge_vport *vport = hclge_get_vport(handle);
11758         struct hclge_dev *hdev = vport->back;
11759         u32 new_spoofchk = enable ? 1 : 0;
11760         int ret;
11761
11762         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11763                 return -EOPNOTSUPP;
11764
11765         vport = hclge_get_vf_vport(hdev, vf);
11766         if (!vport)
11767                 return -EINVAL;
11768
11769         if (vport->vf_info.spoofchk == new_spoofchk)
11770                 return 0;
11771
11772         if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11773                 dev_warn(&hdev->pdev->dev,
11774                          "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11775                          vf);
11776         else if (enable && hclge_is_umv_space_full(vport, true))
11777                 dev_warn(&hdev->pdev->dev,
11778                          "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11779                          vf);
11780
11781         ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11782         if (ret)
11783                 return ret;
11784
11785         vport->vf_info.spoofchk = new_spoofchk;
11786         return 0;
11787 }
11788
11789 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11790 {
11791         struct hclge_vport *vport = hdev->vport;
11792         int ret;
11793         int i;
11794
11795         if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11796                 return 0;
11797
11798         /* resume the vf spoof check state after reset */
11799         for (i = 0; i < hdev->num_alloc_vport; i++) {
11800                 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11801                                                vport->vf_info.spoofchk);
11802                 if (ret)
11803                         return ret;
11804
11805                 vport++;
11806         }
11807
11808         return 0;
11809 }
11810
11811 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11812 {
11813         struct hclge_vport *vport = hclge_get_vport(handle);
11814         struct hclge_dev *hdev = vport->back;
11815         u32 new_trusted = enable ? 1 : 0;
11816
11817         vport = hclge_get_vf_vport(hdev, vf);
11818         if (!vport)
11819                 return -EINVAL;
11820
11821         if (vport->vf_info.trusted == new_trusted)
11822                 return 0;
11823
11824         vport->vf_info.trusted = new_trusted;
11825         set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
11826         hclge_task_schedule(hdev, 0);
11827
11828         return 0;
11829 }
11830
11831 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11832 {
11833         int ret;
11834         int vf;
11835
11836         /* reset vf rate to default value */
11837         for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11838                 struct hclge_vport *vport = &hdev->vport[vf];
11839
11840                 vport->vf_info.max_tx_rate = 0;
11841                 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11842                 if (ret)
11843                         dev_err(&hdev->pdev->dev,
11844                                 "vf%d failed to reset to default, ret=%d\n",
11845                                 vf - HCLGE_VF_VPORT_START_NUM, ret);
11846         }
11847 }
11848
11849 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11850                                      int min_tx_rate, int max_tx_rate)
11851 {
11852         if (min_tx_rate != 0 ||
11853             max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11854                 dev_err(&hdev->pdev->dev,
11855                         "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11856                         min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11857                 return -EINVAL;
11858         }
11859
11860         return 0;
11861 }
11862
11863 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
11864                              int min_tx_rate, int max_tx_rate, bool force)
11865 {
11866         struct hclge_vport *vport = hclge_get_vport(handle);
11867         struct hclge_dev *hdev = vport->back;
11868         int ret;
11869
11870         ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
11871         if (ret)
11872                 return ret;
11873
11874         vport = hclge_get_vf_vport(hdev, vf);
11875         if (!vport)
11876                 return -EINVAL;
11877
11878         if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
11879                 return 0;
11880
11881         ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
11882         if (ret)
11883                 return ret;
11884
11885         vport->vf_info.max_tx_rate = max_tx_rate;
11886
11887         return 0;
11888 }
11889
11890 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
11891 {
11892         struct hnae3_handle *handle = &hdev->vport->nic;
11893         struct hclge_vport *vport;
11894         int ret;
11895         int vf;
11896
11897         /* resume the vf max_tx_rate after reset */
11898         for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
11899                 vport = hclge_get_vf_vport(hdev, vf);
11900                 if (!vport)
11901                         return -EINVAL;
11902
11903                 /* zero means max rate, after reset, firmware already set it to
11904                  * max rate, so just continue.
11905                  */
11906                 if (!vport->vf_info.max_tx_rate)
11907                         continue;
11908
11909                 ret = hclge_set_vf_rate(handle, vf, 0,
11910                                         vport->vf_info.max_tx_rate, true);
11911                 if (ret) {
11912                         dev_err(&hdev->pdev->dev,
11913                                 "vf%d failed to resume tx_rate:%u, ret=%d\n",
11914                                 vf, vport->vf_info.max_tx_rate, ret);
11915                         return ret;
11916                 }
11917         }
11918
11919         return 0;
11920 }
11921
11922 static void hclge_reset_vport_state(struct hclge_dev *hdev)
11923 {
11924         struct hclge_vport *vport = hdev->vport;
11925         int i;
11926
11927         for (i = 0; i < hdev->num_alloc_vport; i++) {
11928                 hclge_vport_stop(vport);
11929                 vport++;
11930         }
11931 }
11932
11933 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
11934 {
11935         struct hclge_dev *hdev = ae_dev->priv;
11936         struct pci_dev *pdev = ae_dev->pdev;
11937         int ret;
11938
11939         set_bit(HCLGE_STATE_DOWN, &hdev->state);
11940
11941         hclge_stats_clear(hdev);
11942         /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
11943          * so here should not clean table in memory.
11944          */
11945         if (hdev->reset_type == HNAE3_IMP_RESET ||
11946             hdev->reset_type == HNAE3_GLOBAL_RESET) {
11947                 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11948                 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
11949                 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11950                 hclge_reset_umv_space(hdev);
11951         }
11952
11953         ret = hclge_cmd_init(hdev);
11954         if (ret) {
11955                 dev_err(&pdev->dev, "Cmd queue init failed\n");
11956                 return ret;
11957         }
11958
11959         ret = hclge_map_tqp(hdev);
11960         if (ret) {
11961                 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11962                 return ret;
11963         }
11964
11965         ret = hclge_mac_init(hdev);
11966         if (ret) {
11967                 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11968                 return ret;
11969         }
11970
11971         ret = hclge_tp_port_init(hdev);
11972         if (ret) {
11973                 dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
11974                         ret);
11975                 return ret;
11976         }
11977
11978         ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11979         if (ret) {
11980                 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11981                 return ret;
11982         }
11983
11984         ret = hclge_config_gro(hdev);
11985         if (ret)
11986                 return ret;
11987
11988         ret = hclge_init_vlan_config(hdev);
11989         if (ret) {
11990                 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11991                 return ret;
11992         }
11993
11994         ret = hclge_tm_init_hw(hdev, true);
11995         if (ret) {
11996                 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
11997                 return ret;
11998         }
11999
12000         ret = hclge_rss_init_hw(hdev);
12001         if (ret) {
12002                 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
12003                 return ret;
12004         }
12005
12006         ret = init_mgr_tbl(hdev);
12007         if (ret) {
12008                 dev_err(&pdev->dev,
12009                         "failed to reinit manager table, ret = %d\n", ret);
12010                 return ret;
12011         }
12012
12013         ret = hclge_init_fd_config(hdev);
12014         if (ret) {
12015                 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
12016                 return ret;
12017         }
12018
12019         ret = hclge_ptp_init(hdev);
12020         if (ret)
12021                 return ret;
12022
12023         /* Log and clear the hw errors those already occurred */
12024         if (hnae3_dev_ras_imp_supported(hdev))
12025                 hclge_handle_occurred_error(hdev);
12026         else
12027                 hclge_handle_all_hns_hw_errors(ae_dev);
12028
12029         /* Re-enable the hw error interrupts because
12030          * the interrupts get disabled on global reset.
12031          */
12032         ret = hclge_config_nic_hw_error(hdev, true);
12033         if (ret) {
12034                 dev_err(&pdev->dev,
12035                         "fail(%d) to re-enable NIC hw error interrupts\n",
12036                         ret);
12037                 return ret;
12038         }
12039
12040         if (hdev->roce_client) {
12041                 ret = hclge_config_rocee_ras_interrupt(hdev, true);
12042                 if (ret) {
12043                         dev_err(&pdev->dev,
12044                                 "fail(%d) to re-enable roce ras interrupts\n",
12045                                 ret);
12046                         return ret;
12047                 }
12048         }
12049
12050         hclge_reset_vport_state(hdev);
12051         ret = hclge_reset_vport_spoofchk(hdev);
12052         if (ret)
12053                 return ret;
12054
12055         ret = hclge_resume_vf_rate(hdev);
12056         if (ret)
12057                 return ret;
12058
12059         hclge_init_rxd_adv_layout(hdev);
12060
12061         dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
12062                  HCLGE_DRIVER_NAME);
12063
12064         return 0;
12065 }
12066
12067 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
12068 {
12069         struct hclge_dev *hdev = ae_dev->priv;
12070         struct hclge_mac *mac = &hdev->hw.mac;
12071
12072         hclge_reset_vf_rate(hdev);
12073         hclge_clear_vf_vlan(hdev);
12074         hclge_misc_affinity_teardown(hdev);
12075         hclge_state_uninit(hdev);
12076         hclge_ptp_uninit(hdev);
12077         hclge_uninit_rxd_adv_layout(hdev);
12078         hclge_uninit_mac_table(hdev);
12079         hclge_del_all_fd_entries(hdev);
12080
12081         if (mac->phydev)
12082                 mdiobus_unregister(mac->mdio_bus);
12083
12084         /* Disable MISC vector(vector0) */
12085         hclge_enable_vector(&hdev->misc_vector, false);
12086         synchronize_irq(hdev->misc_vector.vector_irq);
12087
12088         /* Disable all hw interrupts */
12089         hclge_config_mac_tnl_int(hdev, false);
12090         hclge_config_nic_hw_error(hdev, false);
12091         hclge_config_rocee_ras_interrupt(hdev, false);
12092
12093         hclge_cmd_uninit(hdev);
12094         hclge_misc_irq_uninit(hdev);
12095         hclge_devlink_uninit(hdev);
12096         hclge_pci_uninit(hdev);
12097         mutex_destroy(&hdev->vport_lock);
12098         hclge_uninit_vport_vlan_table(hdev);
12099         ae_dev->priv = NULL;
12100 }
12101
12102 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
12103 {
12104         struct hclge_vport *vport = hclge_get_vport(handle);
12105         struct hclge_dev *hdev = vport->back;
12106
12107         return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
12108 }
12109
12110 static void hclge_get_channels(struct hnae3_handle *handle,
12111                                struct ethtool_channels *ch)
12112 {
12113         ch->max_combined = hclge_get_max_channels(handle);
12114         ch->other_count = 1;
12115         ch->max_other = 1;
12116         ch->combined_count = handle->kinfo.rss_size;
12117 }
12118
12119 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
12120                                         u16 *alloc_tqps, u16 *max_rss_size)
12121 {
12122         struct hclge_vport *vport = hclge_get_vport(handle);
12123         struct hclge_dev *hdev = vport->back;
12124
12125         *alloc_tqps = vport->alloc_tqps;
12126         *max_rss_size = hdev->pf_rss_size_max;
12127 }
12128
12129 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
12130                               bool rxfh_configured)
12131 {
12132         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
12133         struct hclge_vport *vport = hclge_get_vport(handle);
12134         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
12135         u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
12136         struct hclge_dev *hdev = vport->back;
12137         u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
12138         u16 cur_rss_size = kinfo->rss_size;
12139         u16 cur_tqps = kinfo->num_tqps;
12140         u16 tc_valid[HCLGE_MAX_TC_NUM];
12141         u16 roundup_size;
12142         u32 *rss_indir;
12143         unsigned int i;
12144         int ret;
12145
12146         kinfo->req_rss_size = new_tqps_num;
12147
12148         ret = hclge_tm_vport_map_update(hdev);
12149         if (ret) {
12150                 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
12151                 return ret;
12152         }
12153
12154         roundup_size = roundup_pow_of_two(kinfo->rss_size);
12155         roundup_size = ilog2(roundup_size);
12156         /* Set the RSS TC mode according to the new RSS size */
12157         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
12158                 tc_valid[i] = 0;
12159
12160                 if (!(hdev->hw_tc_map & BIT(i)))
12161                         continue;
12162
12163                 tc_valid[i] = 1;
12164                 tc_size[i] = roundup_size;
12165                 tc_offset[i] = kinfo->rss_size * i;
12166         }
12167         ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
12168         if (ret)
12169                 return ret;
12170
12171         /* RSS indirection table has been configured by user */
12172         if (rxfh_configured)
12173                 goto out;
12174
12175         /* Reinitializes the rss indirect table according to the new RSS size */
12176         rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
12177                             GFP_KERNEL);
12178         if (!rss_indir)
12179                 return -ENOMEM;
12180
12181         for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
12182                 rss_indir[i] = i % kinfo->rss_size;
12183
12184         ret = hclge_set_rss(handle, rss_indir, NULL, 0);
12185         if (ret)
12186                 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
12187                         ret);
12188
12189         kfree(rss_indir);
12190
12191 out:
12192         if (!ret)
12193                 dev_info(&hdev->pdev->dev,
12194                          "Channels changed, rss_size from %u to %u, tqps from %u to %u",
12195                          cur_rss_size, kinfo->rss_size,
12196                          cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
12197
12198         return ret;
12199 }
12200
12201 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
12202                               u32 *regs_num_64_bit)
12203 {
12204         struct hclge_desc desc;
12205         u32 total_num;
12206         int ret;
12207
12208         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
12209         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12210         if (ret) {
12211                 dev_err(&hdev->pdev->dev,
12212                         "Query register number cmd failed, ret = %d.\n", ret);
12213                 return ret;
12214         }
12215
12216         *regs_num_32_bit = le32_to_cpu(desc.data[0]);
12217         *regs_num_64_bit = le32_to_cpu(desc.data[1]);
12218
12219         total_num = *regs_num_32_bit + *regs_num_64_bit;
12220         if (!total_num)
12221                 return -EINVAL;
12222
12223         return 0;
12224 }
12225
12226 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12227                                  void *data)
12228 {
12229 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
12230 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
12231
12232         struct hclge_desc *desc;
12233         u32 *reg_val = data;
12234         __le32 *desc_data;
12235         int nodata_num;
12236         int cmd_num;
12237         int i, k, n;
12238         int ret;
12239
12240         if (regs_num == 0)
12241                 return 0;
12242
12243         nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
12244         cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
12245                                HCLGE_32_BIT_REG_RTN_DATANUM);
12246         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12247         if (!desc)
12248                 return -ENOMEM;
12249
12250         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
12251         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12252         if (ret) {
12253                 dev_err(&hdev->pdev->dev,
12254                         "Query 32 bit register cmd failed, ret = %d.\n", ret);
12255                 kfree(desc);
12256                 return ret;
12257         }
12258
12259         for (i = 0; i < cmd_num; i++) {
12260                 if (i == 0) {
12261                         desc_data = (__le32 *)(&desc[i].data[0]);
12262                         n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
12263                 } else {
12264                         desc_data = (__le32 *)(&desc[i]);
12265                         n = HCLGE_32_BIT_REG_RTN_DATANUM;
12266                 }
12267                 for (k = 0; k < n; k++) {
12268                         *reg_val++ = le32_to_cpu(*desc_data++);
12269
12270                         regs_num--;
12271                         if (!regs_num)
12272                                 break;
12273                 }
12274         }
12275
12276         kfree(desc);
12277         return 0;
12278 }
12279
12280 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12281                                  void *data)
12282 {
12283 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
12284 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
12285
12286         struct hclge_desc *desc;
12287         u64 *reg_val = data;
12288         __le64 *desc_data;
12289         int nodata_len;
12290         int cmd_num;
12291         int i, k, n;
12292         int ret;
12293
12294         if (regs_num == 0)
12295                 return 0;
12296
12297         nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
12298         cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
12299                                HCLGE_64_BIT_REG_RTN_DATANUM);
12300         desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12301         if (!desc)
12302                 return -ENOMEM;
12303
12304         hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
12305         ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12306         if (ret) {
12307                 dev_err(&hdev->pdev->dev,
12308                         "Query 64 bit register cmd failed, ret = %d.\n", ret);
12309                 kfree(desc);
12310                 return ret;
12311         }
12312
12313         for (i = 0; i < cmd_num; i++) {
12314                 if (i == 0) {
12315                         desc_data = (__le64 *)(&desc[i].data[0]);
12316                         n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
12317                 } else {
12318                         desc_data = (__le64 *)(&desc[i]);
12319                         n = HCLGE_64_BIT_REG_RTN_DATANUM;
12320                 }
12321                 for (k = 0; k < n; k++) {
12322                         *reg_val++ = le64_to_cpu(*desc_data++);
12323
12324                         regs_num--;
12325                         if (!regs_num)
12326                                 break;
12327                 }
12328         }
12329
12330         kfree(desc);
12331         return 0;
12332 }
12333
12334 #define MAX_SEPARATE_NUM        4
12335 #define SEPARATOR_VALUE         0xFDFCFBFA
12336 #define REG_NUM_PER_LINE        4
12337 #define REG_LEN_PER_LINE        (REG_NUM_PER_LINE * sizeof(u32))
12338 #define REG_SEPARATOR_LINE      1
12339 #define REG_NUM_REMAIN_MASK     3
12340
12341 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
12342 {
12343         int i;
12344
12345         /* initialize command BD except the last one */
12346         for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
12347                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
12348                                            true);
12349                 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12350         }
12351
12352         /* initialize the last command BD */
12353         hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
12354
12355         return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
12356 }
12357
12358 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
12359                                     int *bd_num_list,
12360                                     u32 type_num)
12361 {
12362         u32 entries_per_desc, desc_index, index, offset, i;
12363         struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
12364         int ret;
12365
12366         ret = hclge_query_bd_num_cmd_send(hdev, desc);
12367         if (ret) {
12368                 dev_err(&hdev->pdev->dev,
12369                         "Get dfx bd num fail, status is %d.\n", ret);
12370                 return ret;
12371         }
12372
12373         entries_per_desc = ARRAY_SIZE(desc[0].data);
12374         for (i = 0; i < type_num; i++) {
12375                 offset = hclge_dfx_bd_offset_list[i];
12376                 index = offset % entries_per_desc;
12377                 desc_index = offset / entries_per_desc;
12378                 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
12379         }
12380
12381         return ret;
12382 }
12383
12384 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
12385                                   struct hclge_desc *desc_src, int bd_num,
12386                                   enum hclge_opcode_type cmd)
12387 {
12388         struct hclge_desc *desc = desc_src;
12389         int i, ret;
12390
12391         hclge_cmd_setup_basic_desc(desc, cmd, true);
12392         for (i = 0; i < bd_num - 1; i++) {
12393                 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12394                 desc++;
12395                 hclge_cmd_setup_basic_desc(desc, cmd, true);
12396         }
12397
12398         desc = desc_src;
12399         ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
12400         if (ret)
12401                 dev_err(&hdev->pdev->dev,
12402                         "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
12403                         cmd, ret);
12404
12405         return ret;
12406 }
12407
12408 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
12409                                     void *data)
12410 {
12411         int entries_per_desc, reg_num, separator_num, desc_index, index, i;
12412         struct hclge_desc *desc = desc_src;
12413         u32 *reg = data;
12414
12415         entries_per_desc = ARRAY_SIZE(desc->data);
12416         reg_num = entries_per_desc * bd_num;
12417         separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
12418         for (i = 0; i < reg_num; i++) {
12419                 index = i % entries_per_desc;
12420                 desc_index = i / entries_per_desc;
12421                 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
12422         }
12423         for (i = 0; i < separator_num; i++)
12424                 *reg++ = SEPARATOR_VALUE;
12425
12426         return reg_num + separator_num;
12427 }
12428
12429 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
12430 {
12431         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12432         int data_len_per_desc, bd_num, i;
12433         int *bd_num_list;
12434         u32 data_len;
12435         int ret;
12436
12437         bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12438         if (!bd_num_list)
12439                 return -ENOMEM;
12440
12441         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12442         if (ret) {
12443                 dev_err(&hdev->pdev->dev,
12444                         "Get dfx reg bd num fail, status is %d.\n", ret);
12445                 goto out;
12446         }
12447
12448         data_len_per_desc = sizeof_field(struct hclge_desc, data);
12449         *len = 0;
12450         for (i = 0; i < dfx_reg_type_num; i++) {
12451                 bd_num = bd_num_list[i];
12452                 data_len = data_len_per_desc * bd_num;
12453                 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
12454         }
12455
12456 out:
12457         kfree(bd_num_list);
12458         return ret;
12459 }
12460
12461 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
12462 {
12463         u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12464         int bd_num, bd_num_max, buf_len, i;
12465         struct hclge_desc *desc_src;
12466         int *bd_num_list;
12467         u32 *reg = data;
12468         int ret;
12469
12470         bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12471         if (!bd_num_list)
12472                 return -ENOMEM;
12473
12474         ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12475         if (ret) {
12476                 dev_err(&hdev->pdev->dev,
12477                         "Get dfx reg bd num fail, status is %d.\n", ret);
12478                 goto out;
12479         }
12480
12481         bd_num_max = bd_num_list[0];
12482         for (i = 1; i < dfx_reg_type_num; i++)
12483                 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
12484
12485         buf_len = sizeof(*desc_src) * bd_num_max;
12486         desc_src = kzalloc(buf_len, GFP_KERNEL);
12487         if (!desc_src) {
12488                 ret = -ENOMEM;
12489                 goto out;
12490         }
12491
12492         for (i = 0; i < dfx_reg_type_num; i++) {
12493                 bd_num = bd_num_list[i];
12494                 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
12495                                              hclge_dfx_reg_opcode_list[i]);
12496                 if (ret) {
12497                         dev_err(&hdev->pdev->dev,
12498                                 "Get dfx reg fail, status is %d.\n", ret);
12499                         break;
12500                 }
12501
12502                 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
12503         }
12504
12505         kfree(desc_src);
12506 out:
12507         kfree(bd_num_list);
12508         return ret;
12509 }
12510
12511 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
12512                               struct hnae3_knic_private_info *kinfo)
12513 {
12514 #define HCLGE_RING_REG_OFFSET           0x200
12515 #define HCLGE_RING_INT_REG_OFFSET       0x4
12516
12517         int i, j, reg_num, separator_num;
12518         int data_num_sum;
12519         u32 *reg = data;
12520
12521         /* fetching per-PF registers valus from PF PCIe register space */
12522         reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
12523         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12524         for (i = 0; i < reg_num; i++)
12525                 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
12526         for (i = 0; i < separator_num; i++)
12527                 *reg++ = SEPARATOR_VALUE;
12528         data_num_sum = reg_num + separator_num;
12529
12530         reg_num = ARRAY_SIZE(common_reg_addr_list);
12531         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12532         for (i = 0; i < reg_num; i++)
12533                 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
12534         for (i = 0; i < separator_num; i++)
12535                 *reg++ = SEPARATOR_VALUE;
12536         data_num_sum += reg_num + separator_num;
12537
12538         reg_num = ARRAY_SIZE(ring_reg_addr_list);
12539         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12540         for (j = 0; j < kinfo->num_tqps; j++) {
12541                 for (i = 0; i < reg_num; i++)
12542                         *reg++ = hclge_read_dev(&hdev->hw,
12543                                                 ring_reg_addr_list[i] +
12544                                                 HCLGE_RING_REG_OFFSET * j);
12545                 for (i = 0; i < separator_num; i++)
12546                         *reg++ = SEPARATOR_VALUE;
12547         }
12548         data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
12549
12550         reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
12551         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12552         for (j = 0; j < hdev->num_msi_used - 1; j++) {
12553                 for (i = 0; i < reg_num; i++)
12554                         *reg++ = hclge_read_dev(&hdev->hw,
12555                                                 tqp_intr_reg_addr_list[i] +
12556                                                 HCLGE_RING_INT_REG_OFFSET * j);
12557                 for (i = 0; i < separator_num; i++)
12558                         *reg++ = SEPARATOR_VALUE;
12559         }
12560         data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
12561
12562         return data_num_sum;
12563 }
12564
12565 static int hclge_get_regs_len(struct hnae3_handle *handle)
12566 {
12567         int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
12568         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12569         struct hclge_vport *vport = hclge_get_vport(handle);
12570         struct hclge_dev *hdev = vport->back;
12571         int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
12572         int regs_lines_32_bit, regs_lines_64_bit;
12573         int ret;
12574
12575         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12576         if (ret) {
12577                 dev_err(&hdev->pdev->dev,
12578                         "Get register number failed, ret = %d.\n", ret);
12579                 return ret;
12580         }
12581
12582         ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
12583         if (ret) {
12584                 dev_err(&hdev->pdev->dev,
12585                         "Get dfx reg len failed, ret = %d.\n", ret);
12586                 return ret;
12587         }
12588
12589         cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
12590                 REG_SEPARATOR_LINE;
12591         common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
12592                 REG_SEPARATOR_LINE;
12593         ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
12594                 REG_SEPARATOR_LINE;
12595         tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
12596                 REG_SEPARATOR_LINE;
12597         regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
12598                 REG_SEPARATOR_LINE;
12599         regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
12600                 REG_SEPARATOR_LINE;
12601
12602         return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
12603                 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
12604                 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
12605 }
12606
12607 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
12608                            void *data)
12609 {
12610         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12611         struct hclge_vport *vport = hclge_get_vport(handle);
12612         struct hclge_dev *hdev = vport->back;
12613         u32 regs_num_32_bit, regs_num_64_bit;
12614         int i, reg_num, separator_num, ret;
12615         u32 *reg = data;
12616
12617         *version = hdev->fw_version;
12618
12619         ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12620         if (ret) {
12621                 dev_err(&hdev->pdev->dev,
12622                         "Get register number failed, ret = %d.\n", ret);
12623                 return;
12624         }
12625
12626         reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
12627
12628         ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
12629         if (ret) {
12630                 dev_err(&hdev->pdev->dev,
12631                         "Get 32 bit register failed, ret = %d.\n", ret);
12632                 return;
12633         }
12634         reg_num = regs_num_32_bit;
12635         reg += reg_num;
12636         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12637         for (i = 0; i < separator_num; i++)
12638                 *reg++ = SEPARATOR_VALUE;
12639
12640         ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
12641         if (ret) {
12642                 dev_err(&hdev->pdev->dev,
12643                         "Get 64 bit register failed, ret = %d.\n", ret);
12644                 return;
12645         }
12646         reg_num = regs_num_64_bit * 2;
12647         reg += reg_num;
12648         separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12649         for (i = 0; i < separator_num; i++)
12650                 *reg++ = SEPARATOR_VALUE;
12651
12652         ret = hclge_get_dfx_reg(hdev, reg);
12653         if (ret)
12654                 dev_err(&hdev->pdev->dev,
12655                         "Get dfx register failed, ret = %d.\n", ret);
12656 }
12657
12658 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12659 {
12660         struct hclge_set_led_state_cmd *req;
12661         struct hclge_desc desc;
12662         int ret;
12663
12664         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12665
12666         req = (struct hclge_set_led_state_cmd *)desc.data;
12667         hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12668                         HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12669
12670         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12671         if (ret)
12672                 dev_err(&hdev->pdev->dev,
12673                         "Send set led state cmd error, ret =%d\n", ret);
12674
12675         return ret;
12676 }
12677
12678 enum hclge_led_status {
12679         HCLGE_LED_OFF,
12680         HCLGE_LED_ON,
12681         HCLGE_LED_NO_CHANGE = 0xFF,
12682 };
12683
12684 static int hclge_set_led_id(struct hnae3_handle *handle,
12685                             enum ethtool_phys_id_state status)
12686 {
12687         struct hclge_vport *vport = hclge_get_vport(handle);
12688         struct hclge_dev *hdev = vport->back;
12689
12690         switch (status) {
12691         case ETHTOOL_ID_ACTIVE:
12692                 return hclge_set_led_status(hdev, HCLGE_LED_ON);
12693         case ETHTOOL_ID_INACTIVE:
12694                 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12695         default:
12696                 return -EINVAL;
12697         }
12698 }
12699
12700 static void hclge_get_link_mode(struct hnae3_handle *handle,
12701                                 unsigned long *supported,
12702                                 unsigned long *advertising)
12703 {
12704         unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12705         struct hclge_vport *vport = hclge_get_vport(handle);
12706         struct hclge_dev *hdev = vport->back;
12707         unsigned int idx = 0;
12708
12709         for (; idx < size; idx++) {
12710                 supported[idx] = hdev->hw.mac.supported[idx];
12711                 advertising[idx] = hdev->hw.mac.advertising[idx];
12712         }
12713 }
12714
12715 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12716 {
12717         struct hclge_vport *vport = hclge_get_vport(handle);
12718         struct hclge_dev *hdev = vport->back;
12719         bool gro_en_old = hdev->gro_en;
12720         int ret;
12721
12722         hdev->gro_en = enable;
12723         ret = hclge_config_gro(hdev);
12724         if (ret)
12725                 hdev->gro_en = gro_en_old;
12726
12727         return ret;
12728 }
12729
12730 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12731 {
12732         struct hclge_vport *vport = &hdev->vport[0];
12733         struct hnae3_handle *handle = &vport->nic;
12734         u8 tmp_flags;
12735         int ret;
12736         u16 i;
12737
12738         if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12739                 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12740                 vport->last_promisc_flags = vport->overflow_promisc_flags;
12741         }
12742
12743         if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) {
12744                 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12745                 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12746                                              tmp_flags & HNAE3_MPE);
12747                 if (!ret) {
12748                         clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12749                                   &vport->state);
12750                         set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
12751                                 &vport->state);
12752                 }
12753         }
12754
12755         for (i = 1; i < hdev->num_alloc_vport; i++) {
12756                 bool uc_en = false;
12757                 bool mc_en = false;
12758                 bool bc_en;
12759
12760                 vport = &hdev->vport[i];
12761
12762                 if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12763                                         &vport->state))
12764                         continue;
12765
12766                 if (vport->vf_info.trusted) {
12767                         uc_en = vport->vf_info.request_uc_en > 0;
12768                         mc_en = vport->vf_info.request_mc_en > 0;
12769                 }
12770                 bc_en = vport->vf_info.request_bc_en > 0;
12771
12772                 ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
12773                                                  mc_en, bc_en);
12774                 if (ret) {
12775                         set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12776                                 &vport->state);
12777                         return;
12778                 }
12779                 hclge_set_vport_vlan_fltr_change(vport);
12780         }
12781 }
12782
12783 static bool hclge_module_existed(struct hclge_dev *hdev)
12784 {
12785         struct hclge_desc desc;
12786         u32 existed;
12787         int ret;
12788
12789         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12790         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12791         if (ret) {
12792                 dev_err(&hdev->pdev->dev,
12793                         "failed to get SFP exist state, ret = %d\n", ret);
12794                 return false;
12795         }
12796
12797         existed = le32_to_cpu(desc.data[0]);
12798
12799         return existed != 0;
12800 }
12801
12802 /* need 6 bds(total 140 bytes) in one reading
12803  * return the number of bytes actually read, 0 means read failed.
12804  */
12805 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12806                                      u32 len, u8 *data)
12807 {
12808         struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12809         struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12810         u16 read_len;
12811         u16 copy_len;
12812         int ret;
12813         int i;
12814
12815         /* setup all 6 bds to read module eeprom info. */
12816         for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12817                 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12818                                            true);
12819
12820                 /* bd0~bd4 need next flag */
12821                 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12822                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12823         }
12824
12825         /* setup bd0, this bd contains offset and read length. */
12826         sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12827         sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12828         read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12829         sfp_info_bd0->read_len = cpu_to_le16(read_len);
12830
12831         ret = hclge_cmd_send(&hdev->hw, desc, i);
12832         if (ret) {
12833                 dev_err(&hdev->pdev->dev,
12834                         "failed to get SFP eeprom info, ret = %d\n", ret);
12835                 return 0;
12836         }
12837
12838         /* copy sfp info from bd0 to out buffer. */
12839         copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12840         memcpy(data, sfp_info_bd0->data, copy_len);
12841         read_len = copy_len;
12842
12843         /* copy sfp info from bd1~bd5 to out buffer if needed. */
12844         for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12845                 if (read_len >= len)
12846                         return read_len;
12847
12848                 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12849                 memcpy(data + read_len, desc[i].data, copy_len);
12850                 read_len += copy_len;
12851         }
12852
12853         return read_len;
12854 }
12855
12856 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12857                                    u32 len, u8 *data)
12858 {
12859         struct hclge_vport *vport = hclge_get_vport(handle);
12860         struct hclge_dev *hdev = vport->back;
12861         u32 read_len = 0;
12862         u16 data_len;
12863
12864         if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12865                 return -EOPNOTSUPP;
12866
12867         if (!hclge_module_existed(hdev))
12868                 return -ENXIO;
12869
12870         while (read_len < len) {
12871                 data_len = hclge_get_sfp_eeprom_info(hdev,
12872                                                      offset + read_len,
12873                                                      len - read_len,
12874                                                      data + read_len);
12875                 if (!data_len)
12876                         return -EIO;
12877
12878                 read_len += data_len;
12879         }
12880
12881         return 0;
12882 }
12883
12884 static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle,
12885                                          u32 *status_code)
12886 {
12887         struct hclge_vport *vport = hclge_get_vport(handle);
12888         struct hclge_dev *hdev = vport->back;
12889         struct hclge_desc desc;
12890         int ret;
12891
12892         if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2)
12893                 return -EOPNOTSUPP;
12894
12895         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true);
12896         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12897         if (ret) {
12898                 dev_err(&hdev->pdev->dev,
12899                         "failed to query link diagnosis info, ret = %d\n", ret);
12900                 return ret;
12901         }
12902
12903         *status_code = le32_to_cpu(desc.data[0]);
12904         return 0;
12905 }
12906
12907 static const struct hnae3_ae_ops hclge_ops = {
12908         .init_ae_dev = hclge_init_ae_dev,
12909         .uninit_ae_dev = hclge_uninit_ae_dev,
12910         .reset_prepare = hclge_reset_prepare_general,
12911         .reset_done = hclge_reset_done,
12912         .init_client_instance = hclge_init_client_instance,
12913         .uninit_client_instance = hclge_uninit_client_instance,
12914         .map_ring_to_vector = hclge_map_ring_to_vector,
12915         .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12916         .get_vector = hclge_get_vector,
12917         .put_vector = hclge_put_vector,
12918         .set_promisc_mode = hclge_set_promisc_mode,
12919         .request_update_promisc_mode = hclge_request_update_promisc_mode,
12920         .set_loopback = hclge_set_loopback,
12921         .start = hclge_ae_start,
12922         .stop = hclge_ae_stop,
12923         .client_start = hclge_client_start,
12924         .client_stop = hclge_client_stop,
12925         .get_status = hclge_get_status,
12926         .get_ksettings_an_result = hclge_get_ksettings_an_result,
12927         .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12928         .get_media_type = hclge_get_media_type,
12929         .check_port_speed = hclge_check_port_speed,
12930         .get_fec = hclge_get_fec,
12931         .set_fec = hclge_set_fec,
12932         .get_rss_key_size = hclge_get_rss_key_size,
12933         .get_rss = hclge_get_rss,
12934         .set_rss = hclge_set_rss,
12935         .set_rss_tuple = hclge_set_rss_tuple,
12936         .get_rss_tuple = hclge_get_rss_tuple,
12937         .get_tc_size = hclge_get_tc_size,
12938         .get_mac_addr = hclge_get_mac_addr,
12939         .set_mac_addr = hclge_set_mac_addr,
12940         .do_ioctl = hclge_do_ioctl,
12941         .add_uc_addr = hclge_add_uc_addr,
12942         .rm_uc_addr = hclge_rm_uc_addr,
12943         .add_mc_addr = hclge_add_mc_addr,
12944         .rm_mc_addr = hclge_rm_mc_addr,
12945         .set_autoneg = hclge_set_autoneg,
12946         .get_autoneg = hclge_get_autoneg,
12947         .restart_autoneg = hclge_restart_autoneg,
12948         .halt_autoneg = hclge_halt_autoneg,
12949         .get_pauseparam = hclge_get_pauseparam,
12950         .set_pauseparam = hclge_set_pauseparam,
12951         .set_mtu = hclge_set_mtu,
12952         .reset_queue = hclge_reset_tqp,
12953         .get_stats = hclge_get_stats,
12954         .get_mac_stats = hclge_get_mac_stat,
12955         .update_stats = hclge_update_stats,
12956         .get_strings = hclge_get_strings,
12957         .get_sset_count = hclge_get_sset_count,
12958         .get_fw_version = hclge_get_fw_version,
12959         .get_mdix_mode = hclge_get_mdix_mode,
12960         .enable_vlan_filter = hclge_enable_vlan_filter,
12961         .set_vlan_filter = hclge_set_vlan_filter,
12962         .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
12963         .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
12964         .reset_event = hclge_reset_event,
12965         .get_reset_level = hclge_get_reset_level,
12966         .set_default_reset_request = hclge_set_def_reset_request,
12967         .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
12968         .set_channels = hclge_set_channels,
12969         .get_channels = hclge_get_channels,
12970         .get_regs_len = hclge_get_regs_len,
12971         .get_regs = hclge_get_regs,
12972         .set_led_id = hclge_set_led_id,
12973         .get_link_mode = hclge_get_link_mode,
12974         .add_fd_entry = hclge_add_fd_entry,
12975         .del_fd_entry = hclge_del_fd_entry,
12976         .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
12977         .get_fd_rule_info = hclge_get_fd_rule_info,
12978         .get_fd_all_rules = hclge_get_all_rules,
12979         .enable_fd = hclge_enable_fd,
12980         .add_arfs_entry = hclge_add_fd_entry_by_arfs,
12981         .dbg_read_cmd = hclge_dbg_read_cmd,
12982         .handle_hw_ras_error = hclge_handle_hw_ras_error,
12983         .get_hw_reset_stat = hclge_get_hw_reset_stat,
12984         .ae_dev_resetting = hclge_ae_dev_resetting,
12985         .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
12986         .set_gro_en = hclge_gro_en,
12987         .get_global_queue_id = hclge_covert_handle_qid_global,
12988         .set_timer_task = hclge_set_timer_task,
12989         .mac_connect_phy = hclge_mac_connect_phy,
12990         .mac_disconnect_phy = hclge_mac_disconnect_phy,
12991         .get_vf_config = hclge_get_vf_config,
12992         .set_vf_link_state = hclge_set_vf_link_state,
12993         .set_vf_spoofchk = hclge_set_vf_spoofchk,
12994         .set_vf_trust = hclge_set_vf_trust,
12995         .set_vf_rate = hclge_set_vf_rate,
12996         .set_vf_mac = hclge_set_vf_mac,
12997         .get_module_eeprom = hclge_get_module_eeprom,
12998         .get_cmdq_stat = hclge_get_cmdq_stat,
12999         .add_cls_flower = hclge_add_cls_flower,
13000         .del_cls_flower = hclge_del_cls_flower,
13001         .cls_flower_active = hclge_is_cls_flower_active,
13002         .get_phy_link_ksettings = hclge_get_phy_link_ksettings,
13003         .set_phy_link_ksettings = hclge_set_phy_link_ksettings,
13004         .set_tx_hwts_info = hclge_ptp_set_tx_info,
13005         .get_rx_hwts = hclge_ptp_get_rx_hwts,
13006         .get_ts_info = hclge_ptp_get_ts_info,
13007         .get_link_diagnosis_info = hclge_get_link_diagnosis_info,
13008 };
13009
13010 static struct hnae3_ae_algo ae_algo = {
13011         .ops = &hclge_ops,
13012         .pdev_id_table = ae_algo_pci_tbl,
13013 };
13014
13015 static int hclge_init(void)
13016 {
13017         pr_info("%s is initializing\n", HCLGE_NAME);
13018
13019         hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
13020         if (!hclge_wq) {
13021                 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
13022                 return -ENOMEM;
13023         }
13024
13025         hnae3_register_ae_algo(&ae_algo);
13026
13027         return 0;
13028 }
13029
13030 static void hclge_exit(void)
13031 {
13032         hnae3_unregister_ae_algo(&ae_algo);
13033         destroy_workqueue(hclge_wq);
13034 }
13035 module_init(hclge_init);
13036 module_exit(hclge_exit);
13037
13038 MODULE_LICENSE("GPL");
13039 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
13040 MODULE_DESCRIPTION("HCLGE Driver");
13041 MODULE_VERSION(HCLGE_MOD_VERSION);