1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
24 #include "hclge_err.h"
26 #include "hclge_devlink.h"
28 #define HCLGE_NAME "hclge"
29 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
30 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
32 #define HCLGE_BUF_SIZE_UNIT 256U
33 #define HCLGE_BUF_MUL_BY 2
34 #define HCLGE_BUF_DIV_BY 2
35 #define NEED_RESERVE_TC_NUM 2
36 #define BUF_MAX_PERCENT 100
37 #define BUF_RESERVE_PERCENT 90
39 #define HCLGE_RESET_MAX_FAIL_CNT 5
40 #define HCLGE_RESET_SYNC_TIME 100
41 #define HCLGE_PF_RESET_SYNC_TIME 20
42 #define HCLGE_PF_RESET_SYNC_CNT 1500
44 /* Get DFX BD number offset */
45 #define HCLGE_DFX_BIOS_BD_OFFSET 1
46 #define HCLGE_DFX_SSU_0_BD_OFFSET 2
47 #define HCLGE_DFX_SSU_1_BD_OFFSET 3
48 #define HCLGE_DFX_IGU_BD_OFFSET 4
49 #define HCLGE_DFX_RPU_0_BD_OFFSET 5
50 #define HCLGE_DFX_RPU_1_BD_OFFSET 6
51 #define HCLGE_DFX_NCSI_BD_OFFSET 7
52 #define HCLGE_DFX_RTC_BD_OFFSET 8
53 #define HCLGE_DFX_PPP_BD_OFFSET 9
54 #define HCLGE_DFX_RCB_BD_OFFSET 10
55 #define HCLGE_DFX_TQP_BD_OFFSET 11
56 #define HCLGE_DFX_SSU_2_BD_OFFSET 12
58 #define HCLGE_LINK_STATUS_MS 10
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
71 static void hclge_sync_mac_table(struct hclge_dev *hdev);
72 static void hclge_restore_hw_table(struct hclge_dev *hdev);
73 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
74 static void hclge_sync_fd_table(struct hclge_dev *hdev);
76 static struct hnae3_ae_algo ae_algo;
78 static struct workqueue_struct *hclge_wq;
80 static const struct pci_device_id ae_algo_pci_tbl[] = {
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
85 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
87 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
88 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
89 /* required last entry */
93 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
95 static const u32 cmdq_reg_addr_list[] = {HCLGE_NIC_CSQ_BASEADDR_L_REG,
96 HCLGE_NIC_CSQ_BASEADDR_H_REG,
97 HCLGE_NIC_CSQ_DEPTH_REG,
98 HCLGE_NIC_CSQ_TAIL_REG,
99 HCLGE_NIC_CSQ_HEAD_REG,
100 HCLGE_NIC_CRQ_BASEADDR_L_REG,
101 HCLGE_NIC_CRQ_BASEADDR_H_REG,
102 HCLGE_NIC_CRQ_DEPTH_REG,
103 HCLGE_NIC_CRQ_TAIL_REG,
104 HCLGE_NIC_CRQ_HEAD_REG,
105 HCLGE_VECTOR0_CMDQ_SRC_REG,
106 HCLGE_CMDQ_INTR_STS_REG,
107 HCLGE_CMDQ_INTR_EN_REG,
108 HCLGE_CMDQ_INTR_GEN_REG};
110 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
111 HCLGE_PF_OTHER_INT_REG,
112 HCLGE_MISC_RESET_STS_REG,
113 HCLGE_MISC_VECTOR_INT_STS,
114 HCLGE_GLOBAL_RESET_REG,
118 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
119 HCLGE_RING_RX_ADDR_H_REG,
120 HCLGE_RING_RX_BD_NUM_REG,
121 HCLGE_RING_RX_BD_LENGTH_REG,
122 HCLGE_RING_RX_MERGE_EN_REG,
123 HCLGE_RING_RX_TAIL_REG,
124 HCLGE_RING_RX_HEAD_REG,
125 HCLGE_RING_RX_FBD_NUM_REG,
126 HCLGE_RING_RX_OFFSET_REG,
127 HCLGE_RING_RX_FBD_OFFSET_REG,
128 HCLGE_RING_RX_STASH_REG,
129 HCLGE_RING_RX_BD_ERR_REG,
130 HCLGE_RING_TX_ADDR_L_REG,
131 HCLGE_RING_TX_ADDR_H_REG,
132 HCLGE_RING_TX_BD_NUM_REG,
133 HCLGE_RING_TX_PRIORITY_REG,
134 HCLGE_RING_TX_TC_REG,
135 HCLGE_RING_TX_MERGE_EN_REG,
136 HCLGE_RING_TX_TAIL_REG,
137 HCLGE_RING_TX_HEAD_REG,
138 HCLGE_RING_TX_FBD_NUM_REG,
139 HCLGE_RING_TX_OFFSET_REG,
140 HCLGE_RING_TX_EBD_NUM_REG,
141 HCLGE_RING_TX_EBD_OFFSET_REG,
142 HCLGE_RING_TX_BD_ERR_REG,
145 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
146 HCLGE_TQP_INTR_GL0_REG,
147 HCLGE_TQP_INTR_GL1_REG,
148 HCLGE_TQP_INTR_GL2_REG,
149 HCLGE_TQP_INTR_RL_REG};
151 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
153 "Serdes serial Loopback test",
154 "Serdes parallel Loopback test",
158 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
159 {"mac_tx_mac_pause_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
161 {"mac_rx_mac_pause_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
163 {"mac_tx_control_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
165 {"mac_rx_control_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
167 {"mac_tx_pfc_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
169 {"mac_tx_pfc_pri0_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
171 {"mac_tx_pfc_pri1_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
173 {"mac_tx_pfc_pri2_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
175 {"mac_tx_pfc_pri3_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
177 {"mac_tx_pfc_pri4_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
179 {"mac_tx_pfc_pri5_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
181 {"mac_tx_pfc_pri6_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
183 {"mac_tx_pfc_pri7_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
185 {"mac_rx_pfc_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
187 {"mac_rx_pfc_pri0_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
189 {"mac_rx_pfc_pri1_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
191 {"mac_rx_pfc_pri2_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
193 {"mac_rx_pfc_pri3_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
195 {"mac_rx_pfc_pri4_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
197 {"mac_rx_pfc_pri5_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
199 {"mac_rx_pfc_pri6_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
201 {"mac_rx_pfc_pri7_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
203 {"mac_tx_total_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
205 {"mac_tx_total_oct_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
207 {"mac_tx_good_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
209 {"mac_tx_bad_pkt_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
211 {"mac_tx_good_oct_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
213 {"mac_tx_bad_oct_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
215 {"mac_tx_uni_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
217 {"mac_tx_multi_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
219 {"mac_tx_broad_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
221 {"mac_tx_undersize_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
223 {"mac_tx_oversize_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
225 {"mac_tx_64_oct_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
227 {"mac_tx_65_127_oct_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
229 {"mac_tx_128_255_oct_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
231 {"mac_tx_256_511_oct_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
233 {"mac_tx_512_1023_oct_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
235 {"mac_tx_1024_1518_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
237 {"mac_tx_1519_2047_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
239 {"mac_tx_2048_4095_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
241 {"mac_tx_4096_8191_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
243 {"mac_tx_8192_9216_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
245 {"mac_tx_9217_12287_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
247 {"mac_tx_12288_16383_oct_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
249 {"mac_tx_1519_max_good_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
251 {"mac_tx_1519_max_bad_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
253 {"mac_rx_total_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
255 {"mac_rx_total_oct_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
257 {"mac_rx_good_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
259 {"mac_rx_bad_pkt_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
261 {"mac_rx_good_oct_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
263 {"mac_rx_bad_oct_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
265 {"mac_rx_uni_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
267 {"mac_rx_multi_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
269 {"mac_rx_broad_pkt_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
271 {"mac_rx_undersize_pkt_num",
272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
273 {"mac_rx_oversize_pkt_num",
274 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
275 {"mac_rx_64_oct_pkt_num",
276 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
277 {"mac_rx_65_127_oct_pkt_num",
278 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
279 {"mac_rx_128_255_oct_pkt_num",
280 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
281 {"mac_rx_256_511_oct_pkt_num",
282 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
283 {"mac_rx_512_1023_oct_pkt_num",
284 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
285 {"mac_rx_1024_1518_oct_pkt_num",
286 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
287 {"mac_rx_1519_2047_oct_pkt_num",
288 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
289 {"mac_rx_2048_4095_oct_pkt_num",
290 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
291 {"mac_rx_4096_8191_oct_pkt_num",
292 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
293 {"mac_rx_8192_9216_oct_pkt_num",
294 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
295 {"mac_rx_9217_12287_oct_pkt_num",
296 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
297 {"mac_rx_12288_16383_oct_pkt_num",
298 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
299 {"mac_rx_1519_max_good_pkt_num",
300 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
301 {"mac_rx_1519_max_bad_pkt_num",
302 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
304 {"mac_tx_fragment_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
306 {"mac_tx_undermin_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
308 {"mac_tx_jabber_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
310 {"mac_tx_err_all_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
312 {"mac_tx_from_app_good_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
314 {"mac_tx_from_app_bad_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
316 {"mac_rx_fragment_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
318 {"mac_rx_undermin_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
320 {"mac_rx_jabber_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
322 {"mac_rx_fcs_err_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
324 {"mac_rx_send_app_good_pkt_num",
325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
326 {"mac_rx_send_app_bad_pkt_num",
327 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
330 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
332 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
333 .ethter_type = cpu_to_le16(ETH_P_LLDP),
334 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
335 .i_port_bitmap = 0x1,
339 static const u8 hclge_hash_key[] = {
340 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
341 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
342 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
343 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
344 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
347 static const u32 hclge_dfx_bd_offset_list[] = {
348 HCLGE_DFX_BIOS_BD_OFFSET,
349 HCLGE_DFX_SSU_0_BD_OFFSET,
350 HCLGE_DFX_SSU_1_BD_OFFSET,
351 HCLGE_DFX_IGU_BD_OFFSET,
352 HCLGE_DFX_RPU_0_BD_OFFSET,
353 HCLGE_DFX_RPU_1_BD_OFFSET,
354 HCLGE_DFX_NCSI_BD_OFFSET,
355 HCLGE_DFX_RTC_BD_OFFSET,
356 HCLGE_DFX_PPP_BD_OFFSET,
357 HCLGE_DFX_RCB_BD_OFFSET,
358 HCLGE_DFX_TQP_BD_OFFSET,
359 HCLGE_DFX_SSU_2_BD_OFFSET
362 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
363 HCLGE_OPC_DFX_BIOS_COMMON_REG,
364 HCLGE_OPC_DFX_SSU_REG_0,
365 HCLGE_OPC_DFX_SSU_REG_1,
366 HCLGE_OPC_DFX_IGU_EGU_REG,
367 HCLGE_OPC_DFX_RPU_REG_0,
368 HCLGE_OPC_DFX_RPU_REG_1,
369 HCLGE_OPC_DFX_NCSI_REG,
370 HCLGE_OPC_DFX_RTC_REG,
371 HCLGE_OPC_DFX_PPP_REG,
372 HCLGE_OPC_DFX_RCB_REG,
373 HCLGE_OPC_DFX_TQP_REG,
374 HCLGE_OPC_DFX_SSU_REG_2
377 static const struct key_info meta_data_key_info[] = {
378 { PACKET_TYPE_ID, 6 },
385 { TUNNEL_PACKET, 1 },
388 static const struct key_info tuple_key_info[] = {
389 { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
390 { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
391 { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
392 { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
393 { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
394 { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
395 { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
396 { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
397 { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
398 { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
399 { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
400 { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
401 { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
402 { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
403 { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
404 { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
405 { INNER_DST_MAC, 48, KEY_OPT_MAC,
406 offsetof(struct hclge_fd_rule, tuples.dst_mac),
407 offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
408 { INNER_SRC_MAC, 48, KEY_OPT_MAC,
409 offsetof(struct hclge_fd_rule, tuples.src_mac),
410 offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
411 { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
412 offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
413 offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
414 { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
415 { INNER_ETH_TYPE, 16, KEY_OPT_LE16,
416 offsetof(struct hclge_fd_rule, tuples.ether_proto),
417 offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
418 { INNER_L2_RSV, 16, KEY_OPT_LE16,
419 offsetof(struct hclge_fd_rule, tuples.l2_user_def),
420 offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
421 { INNER_IP_TOS, 8, KEY_OPT_U8,
422 offsetof(struct hclge_fd_rule, tuples.ip_tos),
423 offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
424 { INNER_IP_PROTO, 8, KEY_OPT_U8,
425 offsetof(struct hclge_fd_rule, tuples.ip_proto),
426 offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
427 { INNER_SRC_IP, 32, KEY_OPT_IP,
428 offsetof(struct hclge_fd_rule, tuples.src_ip),
429 offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
430 { INNER_DST_IP, 32, KEY_OPT_IP,
431 offsetof(struct hclge_fd_rule, tuples.dst_ip),
432 offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
433 { INNER_L3_RSV, 16, KEY_OPT_LE16,
434 offsetof(struct hclge_fd_rule, tuples.l3_user_def),
435 offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
436 { INNER_SRC_PORT, 16, KEY_OPT_LE16,
437 offsetof(struct hclge_fd_rule, tuples.src_port),
438 offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
439 { INNER_DST_PORT, 16, KEY_OPT_LE16,
440 offsetof(struct hclge_fd_rule, tuples.dst_port),
441 offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
442 { INNER_L4_RSV, 32, KEY_OPT_LE32,
443 offsetof(struct hclge_fd_rule, tuples.l4_user_def),
444 offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
447 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
449 #define HCLGE_MAC_CMD_NUM 21
451 u64 *data = (u64 *)(&hdev->mac_stats);
452 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
457 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
458 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
460 dev_err(&hdev->pdev->dev,
461 "Get MAC pkt stats fail, status = %d.\n", ret);
466 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
467 /* for special opcode 0032, only the first desc has the head */
468 if (unlikely(i == 0)) {
469 desc_data = (__le64 *)(&desc[i].data[0]);
470 n = HCLGE_RD_FIRST_STATS_NUM;
472 desc_data = (__le64 *)(&desc[i]);
473 n = HCLGE_RD_OTHER_STATS_NUM;
476 for (k = 0; k < n; k++) {
477 *data += le64_to_cpu(*desc_data);
486 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
488 u64 *data = (u64 *)(&hdev->mac_stats);
489 struct hclge_desc *desc;
494 /* This may be called inside atomic sections,
495 * so GFP_ATOMIC is more suitalbe here
497 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
501 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
502 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
508 for (i = 0; i < desc_num; i++) {
509 /* for special opcode 0034, only the first desc has the head */
511 desc_data = (__le64 *)(&desc[i].data[0]);
512 n = HCLGE_RD_FIRST_STATS_NUM;
514 desc_data = (__le64 *)(&desc[i]);
515 n = HCLGE_RD_OTHER_STATS_NUM;
518 for (k = 0; k < n; k++) {
519 *data += le64_to_cpu(*desc_data);
530 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
532 struct hclge_desc desc;
537 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
538 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
542 desc_data = (__le32 *)(&desc.data[0]);
543 reg_num = le32_to_cpu(*desc_data);
545 *desc_num = 1 + ((reg_num - 3) >> 2) +
546 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
551 static int hclge_mac_update_stats(struct hclge_dev *hdev)
556 ret = hclge_mac_query_reg_num(hdev, &desc_num);
557 /* The firmware supports the new statistics acquisition method */
559 ret = hclge_mac_update_stats_complete(hdev, desc_num);
560 else if (ret == -EOPNOTSUPP)
561 ret = hclge_mac_update_stats_defective(hdev);
563 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
568 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
570 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
571 struct hclge_vport *vport = hclge_get_vport(handle);
572 struct hclge_dev *hdev = vport->back;
573 struct hnae3_queue *queue;
574 struct hclge_desc desc[1];
575 struct hclge_tqp *tqp;
578 for (i = 0; i < kinfo->num_tqps; i++) {
579 queue = handle->kinfo.tqp[i];
580 tqp = container_of(queue, struct hclge_tqp, q);
581 /* command : HCLGE_OPC_QUERY_IGU_STAT */
582 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
585 desc[0].data[0] = cpu_to_le32(tqp->index);
586 ret = hclge_cmd_send(&hdev->hw, desc, 1);
588 dev_err(&hdev->pdev->dev,
589 "Query tqp stat fail, status = %d,queue = %d\n",
593 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
594 le32_to_cpu(desc[0].data[1]);
597 for (i = 0; i < kinfo->num_tqps; i++) {
598 queue = handle->kinfo.tqp[i];
599 tqp = container_of(queue, struct hclge_tqp, q);
600 /* command : HCLGE_OPC_QUERY_IGU_STAT */
601 hclge_cmd_setup_basic_desc(&desc[0],
602 HCLGE_OPC_QUERY_TX_STATS,
605 desc[0].data[0] = cpu_to_le32(tqp->index);
606 ret = hclge_cmd_send(&hdev->hw, desc, 1);
608 dev_err(&hdev->pdev->dev,
609 "Query tqp stat fail, status = %d,queue = %d\n",
613 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
614 le32_to_cpu(desc[0].data[1]);
620 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
622 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
623 struct hclge_tqp *tqp;
627 for (i = 0; i < kinfo->num_tqps; i++) {
628 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
629 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
632 for (i = 0; i < kinfo->num_tqps; i++) {
633 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
634 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
640 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
642 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
644 /* each tqp has TX & RX two queues */
645 return kinfo->num_tqps * (2);
648 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
650 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
654 for (i = 0; i < kinfo->num_tqps; i++) {
655 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
656 struct hclge_tqp, q);
657 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
659 buff = buff + ETH_GSTRING_LEN;
662 for (i = 0; i < kinfo->num_tqps; i++) {
663 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
664 struct hclge_tqp, q);
665 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
667 buff = buff + ETH_GSTRING_LEN;
673 static u64 *hclge_comm_get_stats(const void *comm_stats,
674 const struct hclge_comm_stats_str strs[],
680 for (i = 0; i < size; i++)
681 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
686 static u8 *hclge_comm_get_strings(u32 stringset,
687 const struct hclge_comm_stats_str strs[],
690 char *buff = (char *)data;
693 if (stringset != ETH_SS_STATS)
696 for (i = 0; i < size; i++) {
697 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
698 buff = buff + ETH_GSTRING_LEN;
704 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
706 struct hnae3_handle *handle;
709 handle = &hdev->vport[0].nic;
710 if (handle->client) {
711 status = hclge_tqps_update_stats(handle);
713 dev_err(&hdev->pdev->dev,
714 "Update TQPS stats fail, status = %d.\n",
719 status = hclge_mac_update_stats(hdev);
721 dev_err(&hdev->pdev->dev,
722 "Update MAC stats fail, status = %d.\n", status);
725 static void hclge_update_stats(struct hnae3_handle *handle,
726 struct net_device_stats *net_stats)
728 struct hclge_vport *vport = hclge_get_vport(handle);
729 struct hclge_dev *hdev = vport->back;
732 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
735 status = hclge_mac_update_stats(hdev);
737 dev_err(&hdev->pdev->dev,
738 "Update MAC stats fail, status = %d.\n",
741 status = hclge_tqps_update_stats(handle);
743 dev_err(&hdev->pdev->dev,
744 "Update TQPS stats fail, status = %d.\n",
747 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
750 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
752 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
753 HNAE3_SUPPORT_PHY_LOOPBACK | \
754 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
755 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
757 struct hclge_vport *vport = hclge_get_vport(handle);
758 struct hclge_dev *hdev = vport->back;
761 /* Loopback test support rules:
762 * mac: only GE mode support
763 * serdes: all mac mode will support include GE/XGE/LGE/CGE
764 * phy: only support when phy device exist on board
766 if (stringset == ETH_SS_TEST) {
767 /* clear loopback bit flags at first */
768 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
769 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
770 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
771 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
772 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
774 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
778 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
779 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
781 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
782 hdev->hw.mac.phydev->drv->set_loopback) ||
783 hnae3_dev_phy_imp_supported(hdev)) {
785 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
787 } else if (stringset == ETH_SS_STATS) {
788 count = ARRAY_SIZE(g_mac_stats_string) +
789 hclge_tqps_get_sset_count(handle, stringset);
795 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
798 u8 *p = (char *)data;
801 if (stringset == ETH_SS_STATS) {
802 size = ARRAY_SIZE(g_mac_stats_string);
803 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
805 p = hclge_tqps_get_strings(handle, p);
806 } else if (stringset == ETH_SS_TEST) {
807 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
808 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
810 p += ETH_GSTRING_LEN;
812 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
813 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
815 p += ETH_GSTRING_LEN;
817 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
819 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
821 p += ETH_GSTRING_LEN;
823 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
824 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
826 p += ETH_GSTRING_LEN;
831 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
833 struct hclge_vport *vport = hclge_get_vport(handle);
834 struct hclge_dev *hdev = vport->back;
837 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
838 ARRAY_SIZE(g_mac_stats_string), data);
839 p = hclge_tqps_get_stats(handle, p);
842 static void hclge_get_mac_stat(struct hnae3_handle *handle,
843 struct hns3_mac_stats *mac_stats)
845 struct hclge_vport *vport = hclge_get_vport(handle);
846 struct hclge_dev *hdev = vport->back;
848 hclge_update_stats(handle, NULL);
850 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
851 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
854 static int hclge_parse_func_status(struct hclge_dev *hdev,
855 struct hclge_func_status_cmd *status)
857 #define HCLGE_MAC_ID_MASK 0xF
859 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
862 /* Set the pf to main pf */
863 if (status->pf_state & HCLGE_PF_STATE_MAIN)
864 hdev->flag |= HCLGE_FLAG_MAIN;
866 hdev->flag &= ~HCLGE_FLAG_MAIN;
868 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
872 static int hclge_query_function_status(struct hclge_dev *hdev)
874 #define HCLGE_QUERY_MAX_CNT 5
876 struct hclge_func_status_cmd *req;
877 struct hclge_desc desc;
881 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
882 req = (struct hclge_func_status_cmd *)desc.data;
885 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
887 dev_err(&hdev->pdev->dev,
888 "query function status failed %d.\n", ret);
892 /* Check pf reset is done */
895 usleep_range(1000, 2000);
896 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
898 return hclge_parse_func_status(hdev, req);
901 static int hclge_query_pf_resource(struct hclge_dev *hdev)
903 struct hclge_pf_res_cmd *req;
904 struct hclge_desc desc;
907 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
908 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
910 dev_err(&hdev->pdev->dev,
911 "query pf resource failed %d.\n", ret);
915 req = (struct hclge_pf_res_cmd *)desc.data;
916 hdev->num_tqps = le16_to_cpu(req->tqp_num) +
917 le16_to_cpu(req->ext_tqp_num);
918 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
920 if (req->tx_buf_size)
922 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
924 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
926 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
928 if (req->dv_buf_size)
930 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
932 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
934 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
936 hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
937 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
938 dev_err(&hdev->pdev->dev,
939 "only %u msi resources available, not enough for pf(min:2).\n",
944 if (hnae3_dev_roce_supported(hdev)) {
946 le16_to_cpu(req->pf_intr_vector_number_roce);
948 /* PF should have NIC vectors and Roce vectors,
949 * NIC vectors are queued before Roce vectors.
951 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
953 hdev->num_msi = hdev->num_nic_msi;
959 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
962 case HCLGE_FW_MAC_SPEED_10M:
963 *speed = HCLGE_MAC_SPEED_10M;
965 case HCLGE_FW_MAC_SPEED_100M:
966 *speed = HCLGE_MAC_SPEED_100M;
968 case HCLGE_FW_MAC_SPEED_1G:
969 *speed = HCLGE_MAC_SPEED_1G;
971 case HCLGE_FW_MAC_SPEED_10G:
972 *speed = HCLGE_MAC_SPEED_10G;
974 case HCLGE_FW_MAC_SPEED_25G:
975 *speed = HCLGE_MAC_SPEED_25G;
977 case HCLGE_FW_MAC_SPEED_40G:
978 *speed = HCLGE_MAC_SPEED_40G;
980 case HCLGE_FW_MAC_SPEED_50G:
981 *speed = HCLGE_MAC_SPEED_50G;
983 case HCLGE_FW_MAC_SPEED_100G:
984 *speed = HCLGE_MAC_SPEED_100G;
986 case HCLGE_FW_MAC_SPEED_200G:
987 *speed = HCLGE_MAC_SPEED_200G;
996 static const struct hclge_speed_bit_map speed_bit_map[] = {
997 {HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT},
998 {HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT},
999 {HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT},
1000 {HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT},
1001 {HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT},
1002 {HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT},
1003 {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BIT},
1004 {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BIT},
1005 {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT},
1008 static int hclge_get_speed_bit(u32 speed, u32 *speed_bit)
1012 for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) {
1013 if (speed == speed_bit_map[i].speed) {
1014 *speed_bit = speed_bit_map[i].speed_bit;
1022 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
1024 struct hclge_vport *vport = hclge_get_vport(handle);
1025 struct hclge_dev *hdev = vport->back;
1026 u32 speed_ability = hdev->hw.mac.speed_ability;
1030 ret = hclge_get_speed_bit(speed, &speed_bit);
1034 if (speed_bit & speed_ability)
1040 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1042 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1043 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1045 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1046 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1048 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1049 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1051 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1052 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1054 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1055 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1057 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1058 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1062 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1064 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1065 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1067 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1068 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1070 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1071 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1073 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1074 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1076 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1077 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1079 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1081 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1085 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1087 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1088 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1090 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1091 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1093 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1094 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1096 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1097 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1099 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1100 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1102 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1103 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1107 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1109 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1110 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1112 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1113 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1115 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1116 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1118 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1119 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1121 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1122 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1124 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1125 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1127 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1128 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1132 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1134 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1135 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1137 switch (mac->speed) {
1138 case HCLGE_MAC_SPEED_10G:
1139 case HCLGE_MAC_SPEED_40G:
1140 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1143 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1145 case HCLGE_MAC_SPEED_25G:
1146 case HCLGE_MAC_SPEED_50G:
1147 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1150 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1151 BIT(HNAE3_FEC_AUTO);
1153 case HCLGE_MAC_SPEED_100G:
1154 case HCLGE_MAC_SPEED_200G:
1155 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1156 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1159 mac->fec_ability = 0;
1164 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1167 struct hclge_mac *mac = &hdev->hw.mac;
1169 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1173 hclge_convert_setting_sr(mac, speed_ability);
1174 hclge_convert_setting_lr(mac, speed_ability);
1175 hclge_convert_setting_cr(mac, speed_ability);
1176 if (hnae3_dev_fec_supported(hdev))
1177 hclge_convert_setting_fec(mac);
1179 if (hnae3_dev_pause_supported(hdev))
1180 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1182 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1183 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1186 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1189 struct hclge_mac *mac = &hdev->hw.mac;
1191 hclge_convert_setting_kr(mac, speed_ability);
1192 if (hnae3_dev_fec_supported(hdev))
1193 hclge_convert_setting_fec(mac);
1195 if (hnae3_dev_pause_supported(hdev))
1196 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1198 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1199 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1202 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1205 unsigned long *supported = hdev->hw.mac.supported;
1207 /* default to support all speed for GE port */
1209 speed_ability = HCLGE_SUPPORT_GE;
1211 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1212 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1215 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1216 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1218 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1222 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1223 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1224 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1227 if (hnae3_dev_pause_supported(hdev)) {
1228 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1229 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1232 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1233 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1236 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1238 u8 media_type = hdev->hw.mac.media_type;
1240 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1241 hclge_parse_fiber_link_mode(hdev, speed_ability);
1242 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1243 hclge_parse_copper_link_mode(hdev, speed_ability);
1244 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1245 hclge_parse_backplane_link_mode(hdev, speed_ability);
1248 static u32 hclge_get_max_speed(u16 speed_ability)
1250 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1251 return HCLGE_MAC_SPEED_200G;
1253 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1254 return HCLGE_MAC_SPEED_100G;
1256 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1257 return HCLGE_MAC_SPEED_50G;
1259 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1260 return HCLGE_MAC_SPEED_40G;
1262 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1263 return HCLGE_MAC_SPEED_25G;
1265 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1266 return HCLGE_MAC_SPEED_10G;
1268 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1269 return HCLGE_MAC_SPEED_1G;
1271 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1272 return HCLGE_MAC_SPEED_100M;
1274 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1275 return HCLGE_MAC_SPEED_10M;
1277 return HCLGE_MAC_SPEED_1G;
1280 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1282 #define HCLGE_TX_SPARE_SIZE_UNIT 4096
1283 #define SPEED_ABILITY_EXT_SHIFT 8
1285 struct hclge_cfg_param_cmd *req;
1286 u64 mac_addr_tmp_high;
1287 u16 speed_ability_ext;
1291 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1293 /* get the configuration */
1294 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1295 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1296 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1297 HCLGE_CFG_TQP_DESC_N_M,
1298 HCLGE_CFG_TQP_DESC_N_S);
1300 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1301 HCLGE_CFG_PHY_ADDR_M,
1302 HCLGE_CFG_PHY_ADDR_S);
1303 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1304 HCLGE_CFG_MEDIA_TP_M,
1305 HCLGE_CFG_MEDIA_TP_S);
1306 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1307 HCLGE_CFG_RX_BUF_LEN_M,
1308 HCLGE_CFG_RX_BUF_LEN_S);
1309 /* get mac_address */
1310 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1311 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1312 HCLGE_CFG_MAC_ADDR_H_M,
1313 HCLGE_CFG_MAC_ADDR_H_S);
1315 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1317 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1318 HCLGE_CFG_DEFAULT_SPEED_M,
1319 HCLGE_CFG_DEFAULT_SPEED_S);
1320 cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1321 HCLGE_CFG_RSS_SIZE_M,
1322 HCLGE_CFG_RSS_SIZE_S);
1324 for (i = 0; i < ETH_ALEN; i++)
1325 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1327 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1328 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1330 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1331 HCLGE_CFG_SPEED_ABILITY_M,
1332 HCLGE_CFG_SPEED_ABILITY_S);
1333 speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1334 HCLGE_CFG_SPEED_ABILITY_EXT_M,
1335 HCLGE_CFG_SPEED_ABILITY_EXT_S);
1336 cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1338 cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
1339 HCLGE_CFG_VLAN_FLTR_CAP_M,
1340 HCLGE_CFG_VLAN_FLTR_CAP_S);
1342 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1343 HCLGE_CFG_UMV_TBL_SPACE_M,
1344 HCLGE_CFG_UMV_TBL_SPACE_S);
1345 if (!cfg->umv_space)
1346 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1348 cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1349 HCLGE_CFG_PF_RSS_SIZE_M,
1350 HCLGE_CFG_PF_RSS_SIZE_S);
1352 /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1353 * power of 2, instead of reading out directly. This would
1354 * be more flexible for future changes and expansions.
1355 * When VF max rss size field is HCLGE_CFG_RSS_SIZE_S,
1356 * it does not make sense if PF's field is 0. In this case, PF and VF
1357 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1359 cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1360 1U << cfg->pf_rss_size_max :
1361 cfg->vf_rss_size_max;
1363 /* The unit of the tx spare buffer size queried from configuration
1364 * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is
1367 cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]),
1368 HCLGE_CFG_TX_SPARE_BUF_SIZE_M,
1369 HCLGE_CFG_TX_SPARE_BUF_SIZE_S);
1370 cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT;
1373 /* hclge_get_cfg: query the static parameter from flash
1374 * @hdev: pointer to struct hclge_dev
1375 * @hcfg: the config structure to be getted
1377 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1379 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1380 struct hclge_cfg_param_cmd *req;
1384 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1387 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1388 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1390 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1391 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1392 /* Len should be united by 4 bytes when send to hardware */
1393 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1394 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1395 req->offset = cpu_to_le32(offset);
1398 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1400 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1404 hclge_parse_cfg(hcfg, desc);
1409 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1411 #define HCLGE_MAX_NON_TSO_BD_NUM 8U
1413 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1415 ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1416 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1417 ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1418 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1419 ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1420 ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1421 ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1424 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1425 struct hclge_desc *desc)
1427 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1428 struct hclge_dev_specs_0_cmd *req0;
1429 struct hclge_dev_specs_1_cmd *req1;
1431 req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1432 req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1434 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1435 ae_dev->dev_specs.rss_ind_tbl_size =
1436 le16_to_cpu(req0->rss_ind_tbl_size);
1437 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1438 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1439 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1440 ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1441 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1442 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1445 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1447 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1449 if (!dev_specs->max_non_tso_bd_num)
1450 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1451 if (!dev_specs->rss_ind_tbl_size)
1452 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1453 if (!dev_specs->rss_key_size)
1454 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1455 if (!dev_specs->max_tm_rate)
1456 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1457 if (!dev_specs->max_qset_num)
1458 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1459 if (!dev_specs->max_int_gl)
1460 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1461 if (!dev_specs->max_frm_size)
1462 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1465 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1467 struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1471 /* set default specifications as devices lower than version V3 do not
1472 * support querying specifications from firmware.
1474 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1475 hclge_set_default_dev_specs(hdev);
1479 for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1480 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1482 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1484 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1486 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1490 hclge_parse_dev_specs(hdev, desc);
1491 hclge_check_dev_specs(hdev);
1496 static int hclge_get_cap(struct hclge_dev *hdev)
1500 ret = hclge_query_function_status(hdev);
1502 dev_err(&hdev->pdev->dev,
1503 "query function status error %d.\n", ret);
1507 /* get pf resource */
1508 return hclge_query_pf_resource(hdev);
1511 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1513 #define HCLGE_MIN_TX_DESC 64
1514 #define HCLGE_MIN_RX_DESC 64
1516 if (!is_kdump_kernel())
1519 dev_info(&hdev->pdev->dev,
1520 "Running kdump kernel. Using minimal resources\n");
1522 /* minimal queue pairs equals to the number of vports */
1523 hdev->num_tqps = hdev->num_req_vfs + 1;
1524 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1525 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1528 static int hclge_configure(struct hclge_dev *hdev)
1530 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1531 const struct cpumask *cpumask = cpu_online_mask;
1532 struct hclge_cfg cfg;
1536 ret = hclge_get_cfg(hdev, &cfg);
1540 hdev->base_tqp_pid = 0;
1541 hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1542 hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1543 hdev->rx_buf_len = cfg.rx_buf_len;
1544 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1545 hdev->hw.mac.media_type = cfg.media_type;
1546 hdev->hw.mac.phy_addr = cfg.phy_addr;
1547 hdev->num_tx_desc = cfg.tqp_desc_num;
1548 hdev->num_rx_desc = cfg.tqp_desc_num;
1549 hdev->tm_info.num_pg = 1;
1550 hdev->tc_max = cfg.tc_num;
1551 hdev->tm_info.hw_pfc_map = 0;
1552 hdev->wanted_umv_size = cfg.umv_space;
1553 hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
1554 hdev->gro_en = true;
1555 if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
1556 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
1558 if (hnae3_dev_fd_supported(hdev)) {
1560 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1563 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1565 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1566 cfg.default_speed, ret);
1570 hclge_parse_link_mode(hdev, cfg.speed_ability);
1572 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1574 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1575 (hdev->tc_max < 1)) {
1576 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1581 /* Dev does not support DCB */
1582 if (!hnae3_dev_dcb_supported(hdev)) {
1586 hdev->pfc_max = hdev->tc_max;
1589 hdev->tm_info.num_tc = 1;
1591 /* Currently not support uncontiuous tc */
1592 for (i = 0; i < hdev->tm_info.num_tc; i++)
1593 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1595 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1597 hclge_init_kdump_kernel_config(hdev);
1599 /* Set the affinity based on numa node */
1600 node = dev_to_node(&hdev->pdev->dev);
1601 if (node != NUMA_NO_NODE)
1602 cpumask = cpumask_of_node(node);
1604 cpumask_copy(&hdev->affinity_mask, cpumask);
1609 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1612 struct hclge_cfg_tso_status_cmd *req;
1613 struct hclge_desc desc;
1615 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1617 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1618 req->tso_mss_min = cpu_to_le16(tso_mss_min);
1619 req->tso_mss_max = cpu_to_le16(tso_mss_max);
1621 return hclge_cmd_send(&hdev->hw, &desc, 1);
1624 static int hclge_config_gro(struct hclge_dev *hdev)
1626 struct hclge_cfg_gro_status_cmd *req;
1627 struct hclge_desc desc;
1630 if (!hnae3_dev_gro_supported(hdev))
1633 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1634 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1636 req->gro_en = hdev->gro_en ? 1 : 0;
1638 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1640 dev_err(&hdev->pdev->dev,
1641 "GRO hardware config cmd failed, ret = %d\n", ret);
1646 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1648 struct hclge_tqp *tqp;
1651 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1652 sizeof(struct hclge_tqp), GFP_KERNEL);
1658 for (i = 0; i < hdev->num_tqps; i++) {
1659 tqp->dev = &hdev->pdev->dev;
1662 tqp->q.ae_algo = &ae_algo;
1663 tqp->q.buf_size = hdev->rx_buf_len;
1664 tqp->q.tx_desc_num = hdev->num_tx_desc;
1665 tqp->q.rx_desc_num = hdev->num_rx_desc;
1667 /* need an extended offset to configure queues >=
1668 * HCLGE_TQP_MAX_SIZE_DEV_V2
1670 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1671 tqp->q.io_base = hdev->hw.io_base +
1672 HCLGE_TQP_REG_OFFSET +
1673 i * HCLGE_TQP_REG_SIZE;
1675 tqp->q.io_base = hdev->hw.io_base +
1676 HCLGE_TQP_REG_OFFSET +
1677 HCLGE_TQP_EXT_REG_OFFSET +
1678 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1687 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1688 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1690 struct hclge_tqp_map_cmd *req;
1691 struct hclge_desc desc;
1694 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1696 req = (struct hclge_tqp_map_cmd *)desc.data;
1697 req->tqp_id = cpu_to_le16(tqp_pid);
1698 req->tqp_vf = func_id;
1699 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1701 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1702 req->tqp_vid = cpu_to_le16(tqp_vid);
1704 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1706 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1711 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1713 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1714 struct hclge_dev *hdev = vport->back;
1717 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1718 alloced < num_tqps; i++) {
1719 if (!hdev->htqp[i].alloced) {
1720 hdev->htqp[i].q.handle = &vport->nic;
1721 hdev->htqp[i].q.tqp_index = alloced;
1722 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1723 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1724 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1725 hdev->htqp[i].alloced = true;
1729 vport->alloc_tqps = alloced;
1730 kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1731 vport->alloc_tqps / hdev->tm_info.num_tc);
1733 /* ensure one to one mapping between irq and queue at default */
1734 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1735 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1740 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1741 u16 num_tx_desc, u16 num_rx_desc)
1744 struct hnae3_handle *nic = &vport->nic;
1745 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1746 struct hclge_dev *hdev = vport->back;
1749 kinfo->num_tx_desc = num_tx_desc;
1750 kinfo->num_rx_desc = num_rx_desc;
1752 kinfo->rx_buf_len = hdev->rx_buf_len;
1753 kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size;
1755 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1756 sizeof(struct hnae3_queue *), GFP_KERNEL);
1760 ret = hclge_assign_tqp(vport, num_tqps);
1762 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1767 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1768 struct hclge_vport *vport)
1770 struct hnae3_handle *nic = &vport->nic;
1771 struct hnae3_knic_private_info *kinfo;
1774 kinfo = &nic->kinfo;
1775 for (i = 0; i < vport->alloc_tqps; i++) {
1776 struct hclge_tqp *q =
1777 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1781 is_pf = !(vport->vport_id);
1782 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1791 static int hclge_map_tqp(struct hclge_dev *hdev)
1793 struct hclge_vport *vport = hdev->vport;
1796 num_vport = hdev->num_req_vfs + 1;
1797 for (i = 0; i < num_vport; i++) {
1800 ret = hclge_map_tqp_to_vport(hdev, vport);
1810 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1812 struct hnae3_handle *nic = &vport->nic;
1813 struct hclge_dev *hdev = vport->back;
1816 nic->pdev = hdev->pdev;
1817 nic->ae_algo = &ae_algo;
1818 nic->numa_node_mask = hdev->numa_node_mask;
1819 nic->kinfo.io_base = hdev->hw.io_base;
1821 ret = hclge_knic_setup(vport, num_tqps,
1822 hdev->num_tx_desc, hdev->num_rx_desc);
1824 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1829 static int hclge_alloc_vport(struct hclge_dev *hdev)
1831 struct pci_dev *pdev = hdev->pdev;
1832 struct hclge_vport *vport;
1838 /* We need to alloc a vport for main NIC of PF */
1839 num_vport = hdev->num_req_vfs + 1;
1841 if (hdev->num_tqps < num_vport) {
1842 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1843 hdev->num_tqps, num_vport);
1847 /* Alloc the same number of TQPs for every vport */
1848 tqp_per_vport = hdev->num_tqps / num_vport;
1849 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1851 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1856 hdev->vport = vport;
1857 hdev->num_alloc_vport = num_vport;
1859 if (IS_ENABLED(CONFIG_PCI_IOV))
1860 hdev->num_alloc_vfs = hdev->num_req_vfs;
1862 for (i = 0; i < num_vport; i++) {
1864 vport->vport_id = i;
1865 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1866 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1867 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1868 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1869 vport->req_vlan_fltr_en = true;
1870 INIT_LIST_HEAD(&vport->vlan_list);
1871 INIT_LIST_HEAD(&vport->uc_mac_list);
1872 INIT_LIST_HEAD(&vport->mc_mac_list);
1873 spin_lock_init(&vport->mac_list_lock);
1876 ret = hclge_vport_setup(vport, tqp_main_vport);
1878 ret = hclge_vport_setup(vport, tqp_per_vport);
1881 "vport setup failed for vport %d, %d\n",
1892 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1893 struct hclge_pkt_buf_alloc *buf_alloc)
1895 /* TX buffer size is unit by 128 byte */
1896 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1897 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1898 struct hclge_tx_buff_alloc_cmd *req;
1899 struct hclge_desc desc;
1903 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1905 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1906 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1907 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1909 req->tx_pkt_buff[i] =
1910 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1911 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1914 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1916 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1922 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1923 struct hclge_pkt_buf_alloc *buf_alloc)
1925 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1928 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1933 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1938 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1939 if (hdev->hw_tc_map & BIT(i))
1944 /* Get the number of pfc enabled TCs, which have private buffer */
1945 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1946 struct hclge_pkt_buf_alloc *buf_alloc)
1948 struct hclge_priv_buf *priv;
1952 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1953 priv = &buf_alloc->priv_buf[i];
1954 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1962 /* Get the number of pfc disabled TCs, which have private buffer */
1963 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1964 struct hclge_pkt_buf_alloc *buf_alloc)
1966 struct hclge_priv_buf *priv;
1970 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1971 priv = &buf_alloc->priv_buf[i];
1972 if (hdev->hw_tc_map & BIT(i) &&
1973 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1981 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1983 struct hclge_priv_buf *priv;
1987 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1988 priv = &buf_alloc->priv_buf[i];
1990 rx_priv += priv->buf_size;
1995 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1997 u32 i, total_tx_size = 0;
1999 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
2000 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
2002 return total_tx_size;
2005 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
2006 struct hclge_pkt_buf_alloc *buf_alloc,
2009 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
2010 u32 tc_num = hclge_get_tc_num(hdev);
2011 u32 shared_buf, aligned_mps;
2015 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2017 if (hnae3_dev_dcb_supported(hdev))
2018 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
2021 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
2022 + hdev->dv_buf_size;
2024 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2025 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
2026 HCLGE_BUF_SIZE_UNIT);
2028 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
2029 if (rx_all < rx_priv + shared_std)
2032 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
2033 buf_alloc->s_buf.buf_size = shared_buf;
2034 if (hnae3_dev_dcb_supported(hdev)) {
2035 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2036 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2037 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
2038 HCLGE_BUF_SIZE_UNIT);
2040 buf_alloc->s_buf.self.high = aligned_mps +
2041 HCLGE_NON_DCB_ADDITIONAL_BUF;
2042 buf_alloc->s_buf.self.low = aligned_mps;
2045 if (hnae3_dev_dcb_supported(hdev)) {
2046 hi_thrd = shared_buf - hdev->dv_buf_size;
2048 if (tc_num <= NEED_RESERVE_TC_NUM)
2049 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2053 hi_thrd = hi_thrd / tc_num;
2055 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2056 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2057 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2059 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2060 lo_thrd = aligned_mps;
2063 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2064 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2065 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2071 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2072 struct hclge_pkt_buf_alloc *buf_alloc)
2076 total_size = hdev->pkt_buf_size;
2078 /* alloc tx buffer for all enabled tc */
2079 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2080 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2082 if (hdev->hw_tc_map & BIT(i)) {
2083 if (total_size < hdev->tx_buf_size)
2086 priv->tx_buf_size = hdev->tx_buf_size;
2088 priv->tx_buf_size = 0;
2091 total_size -= priv->tx_buf_size;
2097 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2098 struct hclge_pkt_buf_alloc *buf_alloc)
2100 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2101 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2104 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2105 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2112 if (!(hdev->hw_tc_map & BIT(i)))
2117 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2118 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2119 priv->wl.high = roundup(priv->wl.low + aligned_mps,
2120 HCLGE_BUF_SIZE_UNIT);
2123 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2127 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2130 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2133 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2134 struct hclge_pkt_buf_alloc *buf_alloc)
2136 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2137 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2140 /* let the last to be cleared first */
2141 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2142 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2143 unsigned int mask = BIT((unsigned int)i);
2145 if (hdev->hw_tc_map & mask &&
2146 !(hdev->tm_info.hw_pfc_map & mask)) {
2147 /* Clear the no pfc TC private buffer */
2155 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2156 no_pfc_priv_num == 0)
2160 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2163 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2164 struct hclge_pkt_buf_alloc *buf_alloc)
2166 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2167 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2170 /* let the last to be cleared first */
2171 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2172 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2173 unsigned int mask = BIT((unsigned int)i);
2175 if (hdev->hw_tc_map & mask &&
2176 hdev->tm_info.hw_pfc_map & mask) {
2177 /* Reduce the number of pfc TC with private buffer */
2185 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2190 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2193 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2194 struct hclge_pkt_buf_alloc *buf_alloc)
2196 #define COMPENSATE_BUFFER 0x3C00
2197 #define COMPENSATE_HALF_MPS_NUM 5
2198 #define PRIV_WL_GAP 0x1800
2200 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2201 u32 tc_num = hclge_get_tc_num(hdev);
2202 u32 half_mps = hdev->mps >> 1;
2207 rx_priv = rx_priv / tc_num;
2209 if (tc_num <= NEED_RESERVE_TC_NUM)
2210 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2212 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2213 COMPENSATE_HALF_MPS_NUM * half_mps;
2214 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2215 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2216 if (rx_priv < min_rx_priv)
2219 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2220 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2227 if (!(hdev->hw_tc_map & BIT(i)))
2231 priv->buf_size = rx_priv;
2232 priv->wl.high = rx_priv - hdev->dv_buf_size;
2233 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2236 buf_alloc->s_buf.buf_size = 0;
2241 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2242 * @hdev: pointer to struct hclge_dev
2243 * @buf_alloc: pointer to buffer calculation data
2244 * @return: 0: calculate successful, negative: fail
2246 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2247 struct hclge_pkt_buf_alloc *buf_alloc)
2249 /* When DCB is not supported, rx private buffer is not allocated. */
2250 if (!hnae3_dev_dcb_supported(hdev)) {
2251 u32 rx_all = hdev->pkt_buf_size;
2253 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2254 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2260 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2263 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2266 /* try to decrease the buffer size */
2267 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2270 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2273 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2279 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2280 struct hclge_pkt_buf_alloc *buf_alloc)
2282 struct hclge_rx_priv_buff_cmd *req;
2283 struct hclge_desc desc;
2287 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2288 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2290 /* Alloc private buffer TCs */
2291 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2292 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2295 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2297 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2301 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2302 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2304 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2306 dev_err(&hdev->pdev->dev,
2307 "rx private buffer alloc cmd failed %d\n", ret);
2312 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2313 struct hclge_pkt_buf_alloc *buf_alloc)
2315 struct hclge_rx_priv_wl_buf *req;
2316 struct hclge_priv_buf *priv;
2317 struct hclge_desc desc[2];
2321 for (i = 0; i < 2; i++) {
2322 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2324 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2326 /* The first descriptor set the NEXT bit to 1 */
2328 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2330 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2332 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2333 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2335 priv = &buf_alloc->priv_buf[idx];
2336 req->tc_wl[j].high =
2337 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2338 req->tc_wl[j].high |=
2339 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2341 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2342 req->tc_wl[j].low |=
2343 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2347 /* Send 2 descriptor at one time */
2348 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2350 dev_err(&hdev->pdev->dev,
2351 "rx private waterline config cmd failed %d\n",
2356 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2357 struct hclge_pkt_buf_alloc *buf_alloc)
2359 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2360 struct hclge_rx_com_thrd *req;
2361 struct hclge_desc desc[2];
2362 struct hclge_tc_thrd *tc;
2366 for (i = 0; i < 2; i++) {
2367 hclge_cmd_setup_basic_desc(&desc[i],
2368 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2369 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2371 /* The first descriptor set the NEXT bit to 1 */
2373 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2375 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2377 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2378 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2380 req->com_thrd[j].high =
2381 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2382 req->com_thrd[j].high |=
2383 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2384 req->com_thrd[j].low =
2385 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2386 req->com_thrd[j].low |=
2387 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2391 /* Send 2 descriptors at one time */
2392 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2394 dev_err(&hdev->pdev->dev,
2395 "common threshold config cmd failed %d\n", ret);
2399 static int hclge_common_wl_config(struct hclge_dev *hdev,
2400 struct hclge_pkt_buf_alloc *buf_alloc)
2402 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2403 struct hclge_rx_com_wl *req;
2404 struct hclge_desc desc;
2407 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2409 req = (struct hclge_rx_com_wl *)desc.data;
2410 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2411 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2413 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2414 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2416 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2418 dev_err(&hdev->pdev->dev,
2419 "common waterline config cmd failed %d\n", ret);
2424 int hclge_buffer_alloc(struct hclge_dev *hdev)
2426 struct hclge_pkt_buf_alloc *pkt_buf;
2429 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2433 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2435 dev_err(&hdev->pdev->dev,
2436 "could not calc tx buffer size for all TCs %d\n", ret);
2440 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2442 dev_err(&hdev->pdev->dev,
2443 "could not alloc tx buffers %d\n", ret);
2447 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2449 dev_err(&hdev->pdev->dev,
2450 "could not calc rx priv buffer size for all TCs %d\n",
2455 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2457 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2462 if (hnae3_dev_dcb_supported(hdev)) {
2463 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2465 dev_err(&hdev->pdev->dev,
2466 "could not configure rx private waterline %d\n",
2471 ret = hclge_common_thrd_config(hdev, pkt_buf);
2473 dev_err(&hdev->pdev->dev,
2474 "could not configure common threshold %d\n",
2480 ret = hclge_common_wl_config(hdev, pkt_buf);
2482 dev_err(&hdev->pdev->dev,
2483 "could not configure common waterline %d\n", ret);
2490 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2492 struct hnae3_handle *roce = &vport->roce;
2493 struct hnae3_handle *nic = &vport->nic;
2494 struct hclge_dev *hdev = vport->back;
2496 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2498 if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2501 roce->rinfo.base_vector = hdev->roce_base_vector;
2503 roce->rinfo.netdev = nic->kinfo.netdev;
2504 roce->rinfo.roce_io_base = hdev->hw.io_base;
2505 roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2507 roce->pdev = nic->pdev;
2508 roce->ae_algo = nic->ae_algo;
2509 roce->numa_node_mask = nic->numa_node_mask;
2514 static int hclge_init_msi(struct hclge_dev *hdev)
2516 struct pci_dev *pdev = hdev->pdev;
2520 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2522 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2525 "failed(%d) to allocate MSI/MSI-X vectors\n",
2529 if (vectors < hdev->num_msi)
2530 dev_warn(&hdev->pdev->dev,
2531 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2532 hdev->num_msi, vectors);
2534 hdev->num_msi = vectors;
2535 hdev->num_msi_left = vectors;
2537 hdev->base_msi_vector = pdev->irq;
2538 hdev->roce_base_vector = hdev->base_msi_vector +
2541 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2542 sizeof(u16), GFP_KERNEL);
2543 if (!hdev->vector_status) {
2544 pci_free_irq_vectors(pdev);
2548 for (i = 0; i < hdev->num_msi; i++)
2549 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2551 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2552 sizeof(int), GFP_KERNEL);
2553 if (!hdev->vector_irq) {
2554 pci_free_irq_vectors(pdev);
2561 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2563 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2564 duplex = HCLGE_MAC_FULL;
2569 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2572 struct hclge_config_mac_speed_dup_cmd *req;
2573 struct hclge_desc desc;
2576 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2578 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2581 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2584 case HCLGE_MAC_SPEED_10M:
2585 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2586 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10M);
2588 case HCLGE_MAC_SPEED_100M:
2589 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2590 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100M);
2592 case HCLGE_MAC_SPEED_1G:
2593 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2594 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_1G);
2596 case HCLGE_MAC_SPEED_10G:
2597 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2598 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10G);
2600 case HCLGE_MAC_SPEED_25G:
2601 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2602 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_25G);
2604 case HCLGE_MAC_SPEED_40G:
2605 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2606 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_40G);
2608 case HCLGE_MAC_SPEED_50G:
2609 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2610 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_50G);
2612 case HCLGE_MAC_SPEED_100G:
2613 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2614 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100G);
2616 case HCLGE_MAC_SPEED_200G:
2617 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2618 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_200G);
2621 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2625 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2628 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2630 dev_err(&hdev->pdev->dev,
2631 "mac speed/duplex config cmd failed %d.\n", ret);
2638 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2640 struct hclge_mac *mac = &hdev->hw.mac;
2643 duplex = hclge_check_speed_dup(duplex, speed);
2644 if (!mac->support_autoneg && mac->speed == speed &&
2645 mac->duplex == duplex)
2648 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2652 hdev->hw.mac.speed = speed;
2653 hdev->hw.mac.duplex = duplex;
2658 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2661 struct hclge_vport *vport = hclge_get_vport(handle);
2662 struct hclge_dev *hdev = vport->back;
2664 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2667 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2669 struct hclge_config_auto_neg_cmd *req;
2670 struct hclge_desc desc;
2674 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2676 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2678 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2679 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2681 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2683 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2689 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2691 struct hclge_vport *vport = hclge_get_vport(handle);
2692 struct hclge_dev *hdev = vport->back;
2694 if (!hdev->hw.mac.support_autoneg) {
2696 dev_err(&hdev->pdev->dev,
2697 "autoneg is not supported by current port\n");
2704 return hclge_set_autoneg_en(hdev, enable);
2707 static int hclge_get_autoneg(struct hnae3_handle *handle)
2709 struct hclge_vport *vport = hclge_get_vport(handle);
2710 struct hclge_dev *hdev = vport->back;
2711 struct phy_device *phydev = hdev->hw.mac.phydev;
2714 return phydev->autoneg;
2716 return hdev->hw.mac.autoneg;
2719 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2721 struct hclge_vport *vport = hclge_get_vport(handle);
2722 struct hclge_dev *hdev = vport->back;
2725 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2727 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2730 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2733 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2735 struct hclge_vport *vport = hclge_get_vport(handle);
2736 struct hclge_dev *hdev = vport->back;
2738 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2739 return hclge_set_autoneg_en(hdev, !halt);
2744 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2746 struct hclge_config_fec_cmd *req;
2747 struct hclge_desc desc;
2750 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2752 req = (struct hclge_config_fec_cmd *)desc.data;
2753 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2754 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2755 if (fec_mode & BIT(HNAE3_FEC_RS))
2756 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2757 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2758 if (fec_mode & BIT(HNAE3_FEC_BASER))
2759 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2760 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2762 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2764 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2769 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2771 struct hclge_vport *vport = hclge_get_vport(handle);
2772 struct hclge_dev *hdev = vport->back;
2773 struct hclge_mac *mac = &hdev->hw.mac;
2776 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2777 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2781 ret = hclge_set_fec_hw(hdev, fec_mode);
2785 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2789 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2792 struct hclge_vport *vport = hclge_get_vport(handle);
2793 struct hclge_dev *hdev = vport->back;
2794 struct hclge_mac *mac = &hdev->hw.mac;
2797 *fec_ability = mac->fec_ability;
2799 *fec_mode = mac->fec_mode;
2802 static int hclge_mac_init(struct hclge_dev *hdev)
2804 struct hclge_mac *mac = &hdev->hw.mac;
2807 hdev->support_sfp_query = true;
2808 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2809 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2810 hdev->hw.mac.duplex);
2814 if (hdev->hw.mac.support_autoneg) {
2815 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2822 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2823 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2828 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2830 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2834 ret = hclge_set_default_loopback(hdev);
2838 ret = hclge_buffer_alloc(hdev);
2840 dev_err(&hdev->pdev->dev,
2841 "allocate buffer fail, ret=%d\n", ret);
2846 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2848 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2849 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2850 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2851 hclge_wq, &hdev->service_task, 0);
2854 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2856 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2857 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2858 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2859 hclge_wq, &hdev->service_task, 0);
2862 static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
2864 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2865 !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
2866 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2867 hclge_wq, &hdev->service_task, 0);
2870 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2872 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2873 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2874 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2875 hclge_wq, &hdev->service_task,
2879 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2881 struct hclge_link_status_cmd *req;
2882 struct hclge_desc desc;
2885 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2886 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2888 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2893 req = (struct hclge_link_status_cmd *)desc.data;
2894 *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2895 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2900 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2902 struct phy_device *phydev = hdev->hw.mac.phydev;
2904 *link_status = HCLGE_LINK_STATUS_DOWN;
2906 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2909 if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2912 return hclge_get_mac_link_status(hdev, link_status);
2915 static void hclge_push_link_status(struct hclge_dev *hdev)
2917 struct hclge_vport *vport;
2921 for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
2922 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
2924 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
2925 vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
2928 ret = hclge_push_vf_link_status(vport);
2930 dev_err(&hdev->pdev->dev,
2931 "failed to push link status to vf%u, ret = %d\n",
2937 static void hclge_update_link_status(struct hclge_dev *hdev)
2939 struct hnae3_handle *rhandle = &hdev->vport[0].roce;
2940 struct hnae3_handle *handle = &hdev->vport[0].nic;
2941 struct hnae3_client *rclient = hdev->roce_client;
2942 struct hnae3_client *client = hdev->nic_client;
2949 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2952 ret = hclge_get_mac_phy_link(hdev, &state);
2954 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2958 if (state != hdev->hw.mac.link) {
2959 hdev->hw.mac.link = state;
2960 client->ops->link_status_change(handle, state);
2961 hclge_config_mac_tnl_int(hdev, state);
2962 if (rclient && rclient->ops->link_status_change)
2963 rclient->ops->link_status_change(rhandle, state);
2965 hclge_push_link_status(hdev);
2968 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2971 static void hclge_update_port_capability(struct hclge_dev *hdev,
2972 struct hclge_mac *mac)
2974 if (hnae3_dev_fec_supported(hdev))
2975 /* update fec ability by speed */
2976 hclge_convert_setting_fec(mac);
2978 /* firmware can not identify back plane type, the media type
2979 * read from configuration can help deal it
2981 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2982 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2983 mac->module_type = HNAE3_MODULE_TYPE_KR;
2984 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2985 mac->module_type = HNAE3_MODULE_TYPE_TP;
2987 if (mac->support_autoneg) {
2988 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2989 linkmode_copy(mac->advertising, mac->supported);
2991 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2993 linkmode_zero(mac->advertising);
2997 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2999 struct hclge_sfp_info_cmd *resp;
3000 struct hclge_desc desc;
3003 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3004 resp = (struct hclge_sfp_info_cmd *)desc.data;
3005 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3006 if (ret == -EOPNOTSUPP) {
3007 dev_warn(&hdev->pdev->dev,
3008 "IMP do not support get SFP speed %d\n", ret);
3011 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
3015 *speed = le32_to_cpu(resp->speed);
3020 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
3022 struct hclge_sfp_info_cmd *resp;
3023 struct hclge_desc desc;
3026 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3027 resp = (struct hclge_sfp_info_cmd *)desc.data;
3029 resp->query_type = QUERY_ACTIVE_SPEED;
3031 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3032 if (ret == -EOPNOTSUPP) {
3033 dev_warn(&hdev->pdev->dev,
3034 "IMP does not support get SFP info %d\n", ret);
3037 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
3041 /* In some case, mac speed get from IMP may be 0, it shouldn't be
3042 * set to mac->speed.
3044 if (!le32_to_cpu(resp->speed))
3047 mac->speed = le32_to_cpu(resp->speed);
3048 /* if resp->speed_ability is 0, it means it's an old version
3049 * firmware, do not update these params
3051 if (resp->speed_ability) {
3052 mac->module_type = le32_to_cpu(resp->module_type);
3053 mac->speed_ability = le32_to_cpu(resp->speed_ability);
3054 mac->autoneg = resp->autoneg;
3055 mac->support_autoneg = resp->autoneg_ability;
3056 mac->speed_type = QUERY_ACTIVE_SPEED;
3057 if (!resp->active_fec)
3060 mac->fec_mode = BIT(resp->active_fec);
3062 mac->speed_type = QUERY_SFP_SPEED;
3068 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3069 struct ethtool_link_ksettings *cmd)
3071 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3072 struct hclge_vport *vport = hclge_get_vport(handle);
3073 struct hclge_phy_link_ksetting_0_cmd *req0;
3074 struct hclge_phy_link_ksetting_1_cmd *req1;
3075 u32 supported, advertising, lp_advertising;
3076 struct hclge_dev *hdev = vport->back;
3079 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3081 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3082 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3085 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3087 dev_err(&hdev->pdev->dev,
3088 "failed to get phy link ksetting, ret = %d.\n", ret);
3092 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3093 cmd->base.autoneg = req0->autoneg;
3094 cmd->base.speed = le32_to_cpu(req0->speed);
3095 cmd->base.duplex = req0->duplex;
3096 cmd->base.port = req0->port;
3097 cmd->base.transceiver = req0->transceiver;
3098 cmd->base.phy_address = req0->phy_address;
3099 cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3100 cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3101 supported = le32_to_cpu(req0->supported);
3102 advertising = le32_to_cpu(req0->advertising);
3103 lp_advertising = le32_to_cpu(req0->lp_advertising);
3104 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3106 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3108 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3111 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3112 cmd->base.master_slave_cfg = req1->master_slave_cfg;
3113 cmd->base.master_slave_state = req1->master_slave_state;
3119 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3120 const struct ethtool_link_ksettings *cmd)
3122 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3123 struct hclge_vport *vport = hclge_get_vport(handle);
3124 struct hclge_phy_link_ksetting_0_cmd *req0;
3125 struct hclge_phy_link_ksetting_1_cmd *req1;
3126 struct hclge_dev *hdev = vport->back;
3130 if (cmd->base.autoneg == AUTONEG_DISABLE &&
3131 ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3132 (cmd->base.duplex != DUPLEX_HALF &&
3133 cmd->base.duplex != DUPLEX_FULL)))
3136 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3138 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3139 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3142 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3143 req0->autoneg = cmd->base.autoneg;
3144 req0->speed = cpu_to_le32(cmd->base.speed);
3145 req0->duplex = cmd->base.duplex;
3146 ethtool_convert_link_mode_to_legacy_u32(&advertising,
3147 cmd->link_modes.advertising);
3148 req0->advertising = cpu_to_le32(advertising);
3149 req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3151 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3152 req1->master_slave_cfg = cmd->base.master_slave_cfg;
3154 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3156 dev_err(&hdev->pdev->dev,
3157 "failed to set phy link ksettings, ret = %d.\n", ret);
3161 hdev->hw.mac.autoneg = cmd->base.autoneg;
3162 hdev->hw.mac.speed = cmd->base.speed;
3163 hdev->hw.mac.duplex = cmd->base.duplex;
3164 linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3169 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3171 struct ethtool_link_ksettings cmd;
3174 if (!hnae3_dev_phy_imp_supported(hdev))
3177 ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3181 hdev->hw.mac.autoneg = cmd.base.autoneg;
3182 hdev->hw.mac.speed = cmd.base.speed;
3183 hdev->hw.mac.duplex = cmd.base.duplex;
3188 static int hclge_tp_port_init(struct hclge_dev *hdev)
3190 struct ethtool_link_ksettings cmd;
3192 if (!hnae3_dev_phy_imp_supported(hdev))
3195 cmd.base.autoneg = hdev->hw.mac.autoneg;
3196 cmd.base.speed = hdev->hw.mac.speed;
3197 cmd.base.duplex = hdev->hw.mac.duplex;
3198 linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3200 return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3203 static int hclge_update_port_info(struct hclge_dev *hdev)
3205 struct hclge_mac *mac = &hdev->hw.mac;
3206 int speed = HCLGE_MAC_SPEED_UNKNOWN;
3209 /* get the port info from SFP cmd if not copper port */
3210 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3211 return hclge_update_tp_port_info(hdev);
3213 /* if IMP does not support get SFP/qSFP info, return directly */
3214 if (!hdev->support_sfp_query)
3217 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3218 ret = hclge_get_sfp_info(hdev, mac);
3220 ret = hclge_get_sfp_speed(hdev, &speed);
3222 if (ret == -EOPNOTSUPP) {
3223 hdev->support_sfp_query = false;
3229 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3230 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3231 hclge_update_port_capability(hdev, mac);
3234 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3237 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3238 return 0; /* do nothing if no SFP */
3240 /* must config full duplex for SFP */
3241 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3245 static int hclge_get_status(struct hnae3_handle *handle)
3247 struct hclge_vport *vport = hclge_get_vport(handle);
3248 struct hclge_dev *hdev = vport->back;
3250 hclge_update_link_status(hdev);
3252 return hdev->hw.mac.link;
3255 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3257 if (!pci_num_vf(hdev->pdev)) {
3258 dev_err(&hdev->pdev->dev,
3259 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3263 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3264 dev_err(&hdev->pdev->dev,
3265 "vf id(%d) is out of range(0 <= vfid < %d)\n",
3266 vf, pci_num_vf(hdev->pdev));
3270 /* VF start from 1 in vport */
3271 vf += HCLGE_VF_VPORT_START_NUM;
3272 return &hdev->vport[vf];
3275 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3276 struct ifla_vf_info *ivf)
3278 struct hclge_vport *vport = hclge_get_vport(handle);
3279 struct hclge_dev *hdev = vport->back;
3281 vport = hclge_get_vf_vport(hdev, vf);
3286 ivf->linkstate = vport->vf_info.link_state;
3287 ivf->spoofchk = vport->vf_info.spoofchk;
3288 ivf->trusted = vport->vf_info.trusted;
3289 ivf->min_tx_rate = 0;
3290 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3291 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3292 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3293 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3294 ether_addr_copy(ivf->mac, vport->vf_info.mac);
3299 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3302 struct hclge_vport *vport = hclge_get_vport(handle);
3303 struct hclge_dev *hdev = vport->back;
3307 vport = hclge_get_vf_vport(hdev, vf);
3311 link_state_old = vport->vf_info.link_state;
3312 vport->vf_info.link_state = link_state;
3314 ret = hclge_push_vf_link_status(vport);
3316 vport->vf_info.link_state = link_state_old;
3317 dev_err(&hdev->pdev->dev,
3318 "failed to push vf%d link status, ret = %d\n", vf, ret);
3324 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3326 u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
3328 /* fetch the events from their corresponding regs */
3329 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3330 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3331 hw_err_src_reg = hclge_read_dev(&hdev->hw,
3332 HCLGE_RAS_PF_OTHER_INT_STS_REG);
3334 /* Assumption: If by any chance reset and mailbox events are reported
3335 * together then we will only process reset event in this go and will
3336 * defer the processing of the mailbox events. Since, we would have not
3337 * cleared RX CMDQ event this time we would receive again another
3338 * interrupt from H/W just for the mailbox.
3340 * check for vector0 reset event sources
3342 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3343 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3344 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3345 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3346 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3347 hdev->rst_stats.imp_rst_cnt++;
3348 return HCLGE_VECTOR0_EVENT_RST;
3351 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3352 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3353 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3354 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3355 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3356 hdev->rst_stats.global_rst_cnt++;
3357 return HCLGE_VECTOR0_EVENT_RST;
3360 /* check for vector0 msix event and hardware error event source */
3361 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK ||
3362 hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK)
3363 return HCLGE_VECTOR0_EVENT_ERR;
3365 /* check for vector0 ptp event source */
3366 if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) {
3367 *clearval = msix_src_reg;
3368 return HCLGE_VECTOR0_EVENT_PTP;
3371 /* check for vector0 mailbox(=CMDQ RX) event source */
3372 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3373 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3374 *clearval = cmdq_src_reg;
3375 return HCLGE_VECTOR0_EVENT_MBX;
3378 /* print other vector0 event source */
3379 dev_info(&hdev->pdev->dev,
3380 "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n",
3381 cmdq_src_reg, hw_err_src_reg, msix_src_reg);
3383 return HCLGE_VECTOR0_EVENT_OTHER;
3386 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3389 switch (event_type) {
3390 case HCLGE_VECTOR0_EVENT_PTP:
3391 case HCLGE_VECTOR0_EVENT_RST:
3392 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3394 case HCLGE_VECTOR0_EVENT_MBX:
3395 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3402 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3404 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3405 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3406 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3407 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3408 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3411 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3413 writel(enable ? 1 : 0, vector->addr);
3416 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3418 struct hclge_dev *hdev = data;
3419 unsigned long flags;
3423 hclge_enable_vector(&hdev->misc_vector, false);
3424 event_cause = hclge_check_event_cause(hdev, &clearval);
3426 /* vector 0 interrupt is shared with reset and mailbox source events. */
3427 switch (event_cause) {
3428 case HCLGE_VECTOR0_EVENT_ERR:
3429 hclge_errhand_task_schedule(hdev);
3431 case HCLGE_VECTOR0_EVENT_RST:
3432 hclge_reset_task_schedule(hdev);
3434 case HCLGE_VECTOR0_EVENT_PTP:
3435 spin_lock_irqsave(&hdev->ptp->lock, flags);
3436 hclge_ptp_clean_tx_hwts(hdev);
3437 spin_unlock_irqrestore(&hdev->ptp->lock, flags);
3439 case HCLGE_VECTOR0_EVENT_MBX:
3440 /* If we are here then,
3441 * 1. Either we are not handling any mbx task and we are not
3444 * 2. We could be handling a mbx task but nothing more is
3446 * In both cases, we should schedule mbx task as there are more
3447 * mbx messages reported by this interrupt.
3449 hclge_mbx_task_schedule(hdev);
3452 dev_warn(&hdev->pdev->dev,
3453 "received unknown or unhandled event of vector0\n");
3457 hclge_clear_event_cause(hdev, event_cause, clearval);
3459 /* Enable interrupt if it is not caused by reset event or error event */
3460 if (event_cause == HCLGE_VECTOR0_EVENT_PTP ||
3461 event_cause == HCLGE_VECTOR0_EVENT_MBX ||
3462 event_cause == HCLGE_VECTOR0_EVENT_OTHER)
3463 hclge_enable_vector(&hdev->misc_vector, true);
3468 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3470 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3471 dev_warn(&hdev->pdev->dev,
3472 "vector(vector_id %d) has been freed.\n", vector_id);
3476 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3477 hdev->num_msi_left += 1;
3478 hdev->num_msi_used -= 1;
3481 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3483 struct hclge_misc_vector *vector = &hdev->misc_vector;
3485 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3487 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3488 hdev->vector_status[0] = 0;
3490 hdev->num_msi_left -= 1;
3491 hdev->num_msi_used += 1;
3494 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3495 const cpumask_t *mask)
3497 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3500 cpumask_copy(&hdev->affinity_mask, mask);
3503 static void hclge_irq_affinity_release(struct kref *ref)
3507 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3509 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3510 &hdev->affinity_mask);
3512 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3513 hdev->affinity_notify.release = hclge_irq_affinity_release;
3514 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3515 &hdev->affinity_notify);
3518 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3520 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3521 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3524 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3528 hclge_get_misc_vector(hdev);
3530 /* this would be explicitly freed in the end */
3531 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3532 HCLGE_NAME, pci_name(hdev->pdev));
3533 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3534 0, hdev->misc_vector.name, hdev);
3536 hclge_free_vector(hdev, 0);
3537 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3538 hdev->misc_vector.vector_irq);
3544 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3546 free_irq(hdev->misc_vector.vector_irq, hdev);
3547 hclge_free_vector(hdev, 0);
3550 int hclge_notify_client(struct hclge_dev *hdev,
3551 enum hnae3_reset_notify_type type)
3553 struct hnae3_handle *handle = &hdev->vport[0].nic;
3554 struct hnae3_client *client = hdev->nic_client;
3557 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3560 if (!client->ops->reset_notify)
3563 ret = client->ops->reset_notify(handle, type);
3565 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3571 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3572 enum hnae3_reset_notify_type type)
3574 struct hnae3_handle *handle = &hdev->vport[0].roce;
3575 struct hnae3_client *client = hdev->roce_client;
3578 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3581 if (!client->ops->reset_notify)
3584 ret = client->ops->reset_notify(handle, type);
3586 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3592 static int hclge_reset_wait(struct hclge_dev *hdev)
3594 #define HCLGE_RESET_WATI_MS 100
3595 #define HCLGE_RESET_WAIT_CNT 350
3597 u32 val, reg, reg_bit;
3600 switch (hdev->reset_type) {
3601 case HNAE3_IMP_RESET:
3602 reg = HCLGE_GLOBAL_RESET_REG;
3603 reg_bit = HCLGE_IMP_RESET_BIT;
3605 case HNAE3_GLOBAL_RESET:
3606 reg = HCLGE_GLOBAL_RESET_REG;
3607 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3609 case HNAE3_FUNC_RESET:
3610 reg = HCLGE_FUN_RST_ING;
3611 reg_bit = HCLGE_FUN_RST_ING_B;
3614 dev_err(&hdev->pdev->dev,
3615 "Wait for unsupported reset type: %d\n",
3620 val = hclge_read_dev(&hdev->hw, reg);
3621 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3622 msleep(HCLGE_RESET_WATI_MS);
3623 val = hclge_read_dev(&hdev->hw, reg);
3627 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3628 dev_warn(&hdev->pdev->dev,
3629 "Wait for reset timeout: %d\n", hdev->reset_type);
3636 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3638 struct hclge_vf_rst_cmd *req;
3639 struct hclge_desc desc;
3641 req = (struct hclge_vf_rst_cmd *)desc.data;
3642 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3643 req->dest_vfid = func_id;
3648 return hclge_cmd_send(&hdev->hw, &desc, 1);
3651 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3655 for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3656 struct hclge_vport *vport = &hdev->vport[i];
3659 /* Send cmd to set/clear VF's FUNC_RST_ING */
3660 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3662 dev_err(&hdev->pdev->dev,
3663 "set vf(%u) rst failed %d!\n",
3664 vport->vport_id - HCLGE_VF_VPORT_START_NUM,
3669 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3672 /* Inform VF to process the reset.
3673 * hclge_inform_reset_assert_to_vf may fail if VF
3674 * driver is not loaded.
3676 ret = hclge_inform_reset_assert_to_vf(vport);
3678 dev_warn(&hdev->pdev->dev,
3679 "inform reset to vf(%u) failed %d!\n",
3680 vport->vport_id - HCLGE_VF_VPORT_START_NUM,
3687 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3689 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3690 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3691 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3694 hclge_mbx_handler(hdev);
3696 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3699 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3701 struct hclge_pf_rst_sync_cmd *req;
3702 struct hclge_desc desc;
3706 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3707 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3710 /* vf need to down netdev by mbx during PF or FLR reset */
3711 hclge_mailbox_service_task(hdev);
3713 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3714 /* for compatible with old firmware, wait
3715 * 100 ms for VF to stop IO
3717 if (ret == -EOPNOTSUPP) {
3718 msleep(HCLGE_RESET_SYNC_TIME);
3721 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3724 } else if (req->all_vf_ready) {
3727 msleep(HCLGE_PF_RESET_SYNC_TIME);
3728 hclge_cmd_reuse_desc(&desc, true);
3729 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3731 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3734 void hclge_report_hw_error(struct hclge_dev *hdev,
3735 enum hnae3_hw_error_type type)
3737 struct hnae3_client *client = hdev->nic_client;
3739 if (!client || !client->ops->process_hw_error ||
3740 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3743 client->ops->process_hw_error(&hdev->vport[0].nic, type);
3746 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3750 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3751 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3752 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3753 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3754 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3757 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3758 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3759 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3760 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3764 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3766 struct hclge_desc desc;
3767 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3770 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3771 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3772 req->fun_reset_vfid = func_id;
3774 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3776 dev_err(&hdev->pdev->dev,
3777 "send function reset cmd fail, status =%d\n", ret);
3782 static void hclge_do_reset(struct hclge_dev *hdev)
3784 struct hnae3_handle *handle = &hdev->vport[0].nic;
3785 struct pci_dev *pdev = hdev->pdev;
3788 if (hclge_get_hw_reset_stat(handle)) {
3789 dev_info(&pdev->dev, "hardware reset not finish\n");
3790 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3791 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3792 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3796 switch (hdev->reset_type) {
3797 case HNAE3_IMP_RESET:
3798 dev_info(&pdev->dev, "IMP reset requested\n");
3799 val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3800 hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1);
3801 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val);
3803 case HNAE3_GLOBAL_RESET:
3804 dev_info(&pdev->dev, "global reset requested\n");
3805 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3806 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3807 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3809 case HNAE3_FUNC_RESET:
3810 dev_info(&pdev->dev, "PF reset requested\n");
3811 /* schedule again to check later */
3812 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3813 hclge_reset_task_schedule(hdev);
3816 dev_warn(&pdev->dev,
3817 "unsupported reset type: %d\n", hdev->reset_type);
3822 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3823 unsigned long *addr)
3825 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3826 struct hclge_dev *hdev = ae_dev->priv;
3828 /* return the highest priority reset level amongst all */
3829 if (test_bit(HNAE3_IMP_RESET, addr)) {
3830 rst_level = HNAE3_IMP_RESET;
3831 clear_bit(HNAE3_IMP_RESET, addr);
3832 clear_bit(HNAE3_GLOBAL_RESET, addr);
3833 clear_bit(HNAE3_FUNC_RESET, addr);
3834 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3835 rst_level = HNAE3_GLOBAL_RESET;
3836 clear_bit(HNAE3_GLOBAL_RESET, addr);
3837 clear_bit(HNAE3_FUNC_RESET, addr);
3838 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3839 rst_level = HNAE3_FUNC_RESET;
3840 clear_bit(HNAE3_FUNC_RESET, addr);
3841 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3842 rst_level = HNAE3_FLR_RESET;
3843 clear_bit(HNAE3_FLR_RESET, addr);
3846 if (hdev->reset_type != HNAE3_NONE_RESET &&
3847 rst_level < hdev->reset_type)
3848 return HNAE3_NONE_RESET;
3853 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3857 switch (hdev->reset_type) {
3858 case HNAE3_IMP_RESET:
3859 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3861 case HNAE3_GLOBAL_RESET:
3862 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3871 /* For revision 0x20, the reset interrupt source
3872 * can only be cleared after hardware reset done
3874 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3875 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3878 hclge_enable_vector(&hdev->misc_vector, true);
3881 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3885 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3887 reg_val |= HCLGE_NIC_SW_RST_RDY;
3889 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3891 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3894 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3898 ret = hclge_set_all_vf_rst(hdev, true);
3902 hclge_func_reset_sync_vf(hdev);
3907 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3912 switch (hdev->reset_type) {
3913 case HNAE3_FUNC_RESET:
3914 ret = hclge_func_reset_notify_vf(hdev);
3918 ret = hclge_func_reset_cmd(hdev, 0);
3920 dev_err(&hdev->pdev->dev,
3921 "asserting function reset fail %d!\n", ret);
3925 /* After performaning pf reset, it is not necessary to do the
3926 * mailbox handling or send any command to firmware, because
3927 * any mailbox handling or command to firmware is only valid
3928 * after hclge_cmd_init is called.
3930 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3931 hdev->rst_stats.pf_rst_cnt++;
3933 case HNAE3_FLR_RESET:
3934 ret = hclge_func_reset_notify_vf(hdev);
3938 case HNAE3_IMP_RESET:
3939 hclge_handle_imp_error(hdev);
3940 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3941 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3942 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3948 /* inform hardware that preparatory work is done */
3949 msleep(HCLGE_RESET_SYNC_TIME);
3950 hclge_reset_handshake(hdev, true);
3951 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3956 static void hclge_show_rst_info(struct hclge_dev *hdev)
3960 buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL);
3964 hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
3966 dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
3971 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3973 #define MAX_RESET_FAIL_CNT 5
3975 if (hdev->reset_pending) {
3976 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3977 hdev->reset_pending);
3979 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3980 HCLGE_RESET_INT_M) {
3981 dev_info(&hdev->pdev->dev,
3982 "reset failed because new reset interrupt\n");
3983 hclge_clear_reset_cause(hdev);
3985 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3986 hdev->rst_stats.reset_fail_cnt++;
3987 set_bit(hdev->reset_type, &hdev->reset_pending);
3988 dev_info(&hdev->pdev->dev,
3989 "re-schedule reset task(%u)\n",
3990 hdev->rst_stats.reset_fail_cnt);
3994 hclge_clear_reset_cause(hdev);
3996 /* recover the handshake status when reset fail */
3997 hclge_reset_handshake(hdev, true);
3999 dev_err(&hdev->pdev->dev, "Reset fail!\n");
4001 hclge_show_rst_info(hdev);
4003 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4008 static void hclge_update_reset_level(struct hclge_dev *hdev)
4010 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4011 enum hnae3_reset_type reset_level;
4013 /* reset request will not be set during reset, so clear
4014 * pending reset request to avoid unnecessary reset
4015 * caused by the same reason.
4017 hclge_get_reset_level(ae_dev, &hdev->reset_request);
4019 /* if default_reset_request has a higher level reset request,
4020 * it should be handled as soon as possible. since some errors
4021 * need this kind of reset to fix.
4023 reset_level = hclge_get_reset_level(ae_dev,
4024 &hdev->default_reset_request);
4025 if (reset_level != HNAE3_NONE_RESET)
4026 set_bit(reset_level, &hdev->reset_request);
4029 static int hclge_set_rst_done(struct hclge_dev *hdev)
4031 struct hclge_pf_rst_done_cmd *req;
4032 struct hclge_desc desc;
4035 req = (struct hclge_pf_rst_done_cmd *)desc.data;
4036 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
4037 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
4039 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4040 /* To be compatible with the old firmware, which does not support
4041 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
4044 if (ret == -EOPNOTSUPP) {
4045 dev_warn(&hdev->pdev->dev,
4046 "current firmware does not support command(0x%x)!\n",
4047 HCLGE_OPC_PF_RST_DONE);
4050 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
4057 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
4061 switch (hdev->reset_type) {
4062 case HNAE3_FUNC_RESET:
4063 case HNAE3_FLR_RESET:
4064 ret = hclge_set_all_vf_rst(hdev, false);
4066 case HNAE3_GLOBAL_RESET:
4067 case HNAE3_IMP_RESET:
4068 ret = hclge_set_rst_done(hdev);
4074 /* clear up the handshake status after re-initialize done */
4075 hclge_reset_handshake(hdev, false);
4080 static int hclge_reset_stack(struct hclge_dev *hdev)
4084 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4088 ret = hclge_reset_ae_dev(hdev->ae_dev);
4092 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4095 static int hclge_reset_prepare(struct hclge_dev *hdev)
4099 hdev->rst_stats.reset_cnt++;
4100 /* perform reset of the stack & ae device for a client */
4101 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4106 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4111 return hclge_reset_prepare_wait(hdev);
4114 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4118 hdev->rst_stats.hw_reset_done_cnt++;
4120 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4125 ret = hclge_reset_stack(hdev);
4130 hclge_clear_reset_cause(hdev);
4132 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4133 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4137 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4140 ret = hclge_reset_prepare_up(hdev);
4145 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4150 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4154 hdev->last_reset_time = jiffies;
4155 hdev->rst_stats.reset_fail_cnt = 0;
4156 hdev->rst_stats.reset_done_cnt++;
4157 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4159 hclge_update_reset_level(hdev);
4164 static void hclge_reset(struct hclge_dev *hdev)
4166 if (hclge_reset_prepare(hdev))
4169 if (hclge_reset_wait(hdev))
4172 if (hclge_reset_rebuild(hdev))
4178 if (hclge_reset_err_handle(hdev))
4179 hclge_reset_task_schedule(hdev);
4182 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4184 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4185 struct hclge_dev *hdev = ae_dev->priv;
4187 /* We might end up getting called broadly because of 2 below cases:
4188 * 1. Recoverable error was conveyed through APEI and only way to bring
4189 * normalcy is to reset.
4190 * 2. A new reset request from the stack due to timeout
4192 * check if this is a new reset request and we are not here just because
4193 * last reset attempt did not succeed and watchdog hit us again. We will
4194 * know this if last reset request did not occur very recently (watchdog
4195 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4196 * In case of new request we reset the "reset level" to PF reset.
4197 * And if it is a repeat reset request of the most recent one then we
4198 * want to make sure we throttle the reset request. Therefore, we will
4199 * not allow it again before 3*HZ times.
4202 if (time_before(jiffies, (hdev->last_reset_time +
4203 HCLGE_RESET_INTERVAL))) {
4204 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4208 if (hdev->default_reset_request) {
4210 hclge_get_reset_level(ae_dev,
4211 &hdev->default_reset_request);
4212 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4213 hdev->reset_level = HNAE3_FUNC_RESET;
4216 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4219 /* request reset & schedule reset task */
4220 set_bit(hdev->reset_level, &hdev->reset_request);
4221 hclge_reset_task_schedule(hdev);
4223 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4224 hdev->reset_level++;
4227 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4228 enum hnae3_reset_type rst_type)
4230 struct hclge_dev *hdev = ae_dev->priv;
4232 set_bit(rst_type, &hdev->default_reset_request);
4235 static void hclge_reset_timer(struct timer_list *t)
4237 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4239 /* if default_reset_request has no value, it means that this reset
4240 * request has already be handled, so just return here
4242 if (!hdev->default_reset_request)
4245 dev_info(&hdev->pdev->dev,
4246 "triggering reset in reset timer\n");
4247 hclge_reset_event(hdev->pdev, NULL);
4250 static void hclge_reset_subtask(struct hclge_dev *hdev)
4252 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4254 /* check if there is any ongoing reset in the hardware. This status can
4255 * be checked from reset_pending. If there is then, we need to wait for
4256 * hardware to complete reset.
4257 * a. If we are able to figure out in reasonable time that hardware
4258 * has fully resetted then, we can proceed with driver, client
4260 * b. else, we can come back later to check this status so re-sched
4263 hdev->last_reset_time = jiffies;
4264 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4265 if (hdev->reset_type != HNAE3_NONE_RESET)
4268 /* check if we got any *new* reset requests to be honored */
4269 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4270 if (hdev->reset_type != HNAE3_NONE_RESET)
4271 hclge_do_reset(hdev);
4273 hdev->reset_type = HNAE3_NONE_RESET;
4276 static void hclge_handle_err_reset_request(struct hclge_dev *hdev)
4278 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4279 enum hnae3_reset_type reset_type;
4281 if (ae_dev->hw_err_reset_req) {
4282 reset_type = hclge_get_reset_level(ae_dev,
4283 &ae_dev->hw_err_reset_req);
4284 hclge_set_def_reset_request(ae_dev, reset_type);
4287 if (hdev->default_reset_request && ae_dev->ops->reset_event)
4288 ae_dev->ops->reset_event(hdev->pdev, NULL);
4290 /* enable interrupt after error handling complete */
4291 hclge_enable_vector(&hdev->misc_vector, true);
4294 static void hclge_handle_err_recovery(struct hclge_dev *hdev)
4296 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4298 ae_dev->hw_err_reset_req = 0;
4300 if (hclge_find_error_source(hdev)) {
4301 hclge_handle_error_info_log(ae_dev);
4302 hclge_handle_mac_tnl(hdev);
4305 hclge_handle_err_reset_request(hdev);
4308 static void hclge_misc_err_recovery(struct hclge_dev *hdev)
4310 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4311 struct device *dev = &hdev->pdev->dev;
4314 msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
4315 if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
4316 if (hclge_handle_hw_msix_error
4317 (hdev, &hdev->default_reset_request))
4318 dev_info(dev, "received msix interrupt 0x%x\n",
4322 hclge_handle_hw_ras_error(ae_dev);
4324 hclge_handle_err_reset_request(hdev);
4327 static void hclge_errhand_service_task(struct hclge_dev *hdev)
4329 if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
4332 if (hnae3_dev_ras_imp_supported(hdev))
4333 hclge_handle_err_recovery(hdev);
4335 hclge_misc_err_recovery(hdev);
4338 static void hclge_reset_service_task(struct hclge_dev *hdev)
4340 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4343 down(&hdev->reset_sem);
4344 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4346 hclge_reset_subtask(hdev);
4348 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4349 up(&hdev->reset_sem);
4352 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4356 /* start from vport 1 for PF is always alive */
4357 for (i = 1; i < hdev->num_alloc_vport; i++) {
4358 struct hclge_vport *vport = &hdev->vport[i];
4360 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4361 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4363 /* If vf is not alive, set to default value */
4364 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4365 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4369 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4371 unsigned long delta = round_jiffies_relative(HZ);
4373 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4376 /* Always handle the link updating to make sure link state is
4377 * updated when it is triggered by mbx.
4379 hclge_update_link_status(hdev);
4380 hclge_sync_mac_table(hdev);
4381 hclge_sync_promisc_mode(hdev);
4382 hclge_sync_fd_table(hdev);
4384 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4385 delta = jiffies - hdev->last_serv_processed;
4387 if (delta < round_jiffies_relative(HZ)) {
4388 delta = round_jiffies_relative(HZ) - delta;
4393 hdev->serv_processed_cnt++;
4394 hclge_update_vport_alive(hdev);
4396 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4397 hdev->last_serv_processed = jiffies;
4401 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4402 hclge_update_stats_for_all(hdev);
4404 hclge_update_port_info(hdev);
4405 hclge_sync_vlan_filter(hdev);
4407 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4408 hclge_rfs_filter_expire(hdev);
4410 hdev->last_serv_processed = jiffies;
4413 hclge_task_schedule(hdev, delta);
4416 static void hclge_ptp_service_task(struct hclge_dev *hdev)
4418 unsigned long flags;
4420 if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) ||
4421 !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) ||
4422 !time_is_before_jiffies(hdev->ptp->tx_start + HZ))
4425 /* to prevent concurrence with the irq handler */
4426 spin_lock_irqsave(&hdev->ptp->lock, flags);
4428 /* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq
4429 * handler may handle it just before spin_lock_irqsave().
4431 if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state))
4432 hclge_ptp_clean_tx_hwts(hdev);
4434 spin_unlock_irqrestore(&hdev->ptp->lock, flags);
4437 static void hclge_service_task(struct work_struct *work)
4439 struct hclge_dev *hdev =
4440 container_of(work, struct hclge_dev, service_task.work);
4442 hclge_errhand_service_task(hdev);
4443 hclge_reset_service_task(hdev);
4444 hclge_ptp_service_task(hdev);
4445 hclge_mailbox_service_task(hdev);
4446 hclge_periodic_service_task(hdev);
4448 /* Handle error recovery, reset and mbx again in case periodical task
4449 * delays the handling by calling hclge_task_schedule() in
4450 * hclge_periodic_service_task().
4452 hclge_errhand_service_task(hdev);
4453 hclge_reset_service_task(hdev);
4454 hclge_mailbox_service_task(hdev);
4457 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4459 /* VF handle has no client */
4460 if (!handle->client)
4461 return container_of(handle, struct hclge_vport, nic);
4462 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4463 return container_of(handle, struct hclge_vport, roce);
4465 return container_of(handle, struct hclge_vport, nic);
4468 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4469 struct hnae3_vector_info *vector_info)
4471 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 64
4473 vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4475 /* need an extend offset to config vector >= 64 */
4476 if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4477 vector_info->io_addr = hdev->hw.io_base +
4478 HCLGE_VECTOR_REG_BASE +
4479 (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4481 vector_info->io_addr = hdev->hw.io_base +
4482 HCLGE_VECTOR_EXT_REG_BASE +
4483 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4484 HCLGE_VECTOR_REG_OFFSET_H +
4485 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4486 HCLGE_VECTOR_REG_OFFSET;
4488 hdev->vector_status[idx] = hdev->vport[0].vport_id;
4489 hdev->vector_irq[idx] = vector_info->vector;
4492 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4493 struct hnae3_vector_info *vector_info)
4495 struct hclge_vport *vport = hclge_get_vport(handle);
4496 struct hnae3_vector_info *vector = vector_info;
4497 struct hclge_dev *hdev = vport->back;
4502 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4503 vector_num = min(hdev->num_msi_left, vector_num);
4505 for (j = 0; j < vector_num; j++) {
4506 while (++i < hdev->num_nic_msi) {
4507 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4508 hclge_get_vector_info(hdev, i, vector);
4516 hdev->num_msi_left -= alloc;
4517 hdev->num_msi_used += alloc;
4522 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4526 for (i = 0; i < hdev->num_msi; i++)
4527 if (vector == hdev->vector_irq[i])
4533 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4535 struct hclge_vport *vport = hclge_get_vport(handle);
4536 struct hclge_dev *hdev = vport->back;
4539 vector_id = hclge_get_vector_index(hdev, vector);
4540 if (vector_id < 0) {
4541 dev_err(&hdev->pdev->dev,
4542 "Get vector index fail. vector = %d\n", vector);
4546 hclge_free_vector(hdev, vector_id);
4551 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4553 return HCLGE_RSS_KEY_SIZE;
4556 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4557 const u8 hfunc, const u8 *key)
4559 struct hclge_rss_config_cmd *req;
4560 unsigned int key_offset = 0;
4561 struct hclge_desc desc;
4566 key_counts = HCLGE_RSS_KEY_SIZE;
4567 req = (struct hclge_rss_config_cmd *)desc.data;
4569 while (key_counts) {
4570 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4573 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4574 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4576 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4577 memcpy(req->hash_key,
4578 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4580 key_counts -= key_size;
4582 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4584 dev_err(&hdev->pdev->dev,
4585 "Configure RSS config fail, status = %d\n",
4593 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4595 struct hclge_rss_indirection_table_cmd *req;
4596 struct hclge_desc desc;
4597 int rss_cfg_tbl_num;
4605 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4606 rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4607 HCLGE_RSS_CFG_TBL_SIZE;
4609 for (i = 0; i < rss_cfg_tbl_num; i++) {
4610 hclge_cmd_setup_basic_desc
4611 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4613 req->start_table_index =
4614 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4615 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4616 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4617 qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4618 req->rss_qid_l[j] = qid & 0xff;
4620 j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4621 rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4622 (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4623 req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4625 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4627 dev_err(&hdev->pdev->dev,
4628 "Configure rss indir table fail,status = %d\n",
4636 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4637 u16 *tc_size, u16 *tc_offset)
4639 struct hclge_rss_tc_mode_cmd *req;
4640 struct hclge_desc desc;
4644 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4645 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4647 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4650 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4651 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4652 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4653 hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4654 tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4655 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4656 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4658 req->rss_tc_mode[i] = cpu_to_le16(mode);
4661 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4663 dev_err(&hdev->pdev->dev,
4664 "Configure rss tc mode fail, status = %d\n", ret);
4669 static void hclge_get_rss_type(struct hclge_vport *vport)
4671 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4672 vport->rss_tuple_sets.ipv4_udp_en ||
4673 vport->rss_tuple_sets.ipv4_sctp_en ||
4674 vport->rss_tuple_sets.ipv6_tcp_en ||
4675 vport->rss_tuple_sets.ipv6_udp_en ||
4676 vport->rss_tuple_sets.ipv6_sctp_en)
4677 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4678 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4679 vport->rss_tuple_sets.ipv6_fragment_en)
4680 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4682 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4685 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4687 struct hclge_rss_input_tuple_cmd *req;
4688 struct hclge_desc desc;
4691 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4693 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4695 /* Get the tuple cfg from pf */
4696 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4697 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4698 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4699 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4700 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4701 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4702 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4703 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4704 hclge_get_rss_type(&hdev->vport[0]);
4705 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4707 dev_err(&hdev->pdev->dev,
4708 "Configure rss input fail, status = %d\n", ret);
4712 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4715 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4716 struct hclge_vport *vport = hclge_get_vport(handle);
4719 /* Get hash algorithm */
4721 switch (vport->rss_algo) {
4722 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4723 *hfunc = ETH_RSS_HASH_TOP;
4725 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4726 *hfunc = ETH_RSS_HASH_XOR;
4729 *hfunc = ETH_RSS_HASH_UNKNOWN;
4734 /* Get the RSS Key required by the user */
4736 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4738 /* Get indirect table */
4740 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4741 indir[i] = vport->rss_indirection_tbl[i];
4746 static int hclge_parse_rss_hfunc(struct hclge_vport *vport, const u8 hfunc,
4750 case ETH_RSS_HASH_TOP:
4751 *hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4753 case ETH_RSS_HASH_XOR:
4754 *hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4756 case ETH_RSS_HASH_NO_CHANGE:
4757 *hash_algo = vport->rss_algo;
4764 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4765 const u8 *key, const u8 hfunc)
4767 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4768 struct hclge_vport *vport = hclge_get_vport(handle);
4769 struct hclge_dev *hdev = vport->back;
4773 ret = hclge_parse_rss_hfunc(vport, hfunc, &hash_algo);
4775 dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
4779 /* Set the RSS Hash Key if specififed by the user */
4781 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4785 /* Update the shadow RSS key with user specified qids */
4786 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4788 ret = hclge_set_rss_algo_key(hdev, hash_algo,
4789 vport->rss_hash_key);
4793 vport->rss_algo = hash_algo;
4795 /* Update the shadow RSS table with user specified qids */
4796 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4797 vport->rss_indirection_tbl[i] = indir[i];
4799 /* Update the hardware */
4800 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4803 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4805 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4807 if (nfc->data & RXH_L4_B_2_3)
4808 hash_sets |= HCLGE_D_PORT_BIT;
4810 hash_sets &= ~HCLGE_D_PORT_BIT;
4812 if (nfc->data & RXH_IP_SRC)
4813 hash_sets |= HCLGE_S_IP_BIT;
4815 hash_sets &= ~HCLGE_S_IP_BIT;
4817 if (nfc->data & RXH_IP_DST)
4818 hash_sets |= HCLGE_D_IP_BIT;
4820 hash_sets &= ~HCLGE_D_IP_BIT;
4822 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4823 hash_sets |= HCLGE_V_TAG_BIT;
4828 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4829 struct ethtool_rxnfc *nfc,
4830 struct hclge_rss_input_tuple_cmd *req)
4832 struct hclge_dev *hdev = vport->back;
4835 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4836 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4837 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4838 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4839 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4840 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4841 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4842 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4844 tuple_sets = hclge_get_rss_hash_bits(nfc);
4845 switch (nfc->flow_type) {
4847 req->ipv4_tcp_en = tuple_sets;
4850 req->ipv6_tcp_en = tuple_sets;
4853 req->ipv4_udp_en = tuple_sets;
4856 req->ipv6_udp_en = tuple_sets;
4859 req->ipv4_sctp_en = tuple_sets;
4862 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4863 (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4866 req->ipv6_sctp_en = tuple_sets;
4869 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4872 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4881 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4882 struct ethtool_rxnfc *nfc)
4884 struct hclge_vport *vport = hclge_get_vport(handle);
4885 struct hclge_dev *hdev = vport->back;
4886 struct hclge_rss_input_tuple_cmd *req;
4887 struct hclge_desc desc;
4890 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4891 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4894 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4895 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4897 ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4899 dev_err(&hdev->pdev->dev,
4900 "failed to init rss tuple cmd, ret = %d\n", ret);
4904 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4906 dev_err(&hdev->pdev->dev,
4907 "Set rss tuple fail, status = %d\n", ret);
4911 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4912 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4913 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4914 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4915 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4916 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4917 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4918 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4919 hclge_get_rss_type(vport);
4923 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4926 switch (flow_type) {
4928 *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4931 *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4934 *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4937 *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4940 *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4943 *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4947 *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4956 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4960 if (tuple_sets & HCLGE_D_PORT_BIT)
4961 tuple_data |= RXH_L4_B_2_3;
4962 if (tuple_sets & HCLGE_S_PORT_BIT)
4963 tuple_data |= RXH_L4_B_0_1;
4964 if (tuple_sets & HCLGE_D_IP_BIT)
4965 tuple_data |= RXH_IP_DST;
4966 if (tuple_sets & HCLGE_S_IP_BIT)
4967 tuple_data |= RXH_IP_SRC;
4972 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4973 struct ethtool_rxnfc *nfc)
4975 struct hclge_vport *vport = hclge_get_vport(handle);
4981 ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4982 if (ret || !tuple_sets)
4985 nfc->data = hclge_convert_rss_tuple(tuple_sets);
4990 static int hclge_get_tc_size(struct hnae3_handle *handle)
4992 struct hclge_vport *vport = hclge_get_vport(handle);
4993 struct hclge_dev *hdev = vport->back;
4995 return hdev->pf_rss_size_max;
4998 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
5000 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
5001 struct hclge_vport *vport = hdev->vport;
5002 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
5003 u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
5004 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
5005 struct hnae3_tc_info *tc_info;
5010 tc_info = &vport->nic.kinfo.tc_info;
5011 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
5012 rss_size = tc_info->tqp_count[i];
5015 if (!(hdev->hw_tc_map & BIT(i)))
5018 /* tc_size set to hardware is the log2 of roundup power of two
5019 * of rss_size, the acutal queue size is limited by indirection
5022 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
5024 dev_err(&hdev->pdev->dev,
5025 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
5030 roundup_size = roundup_pow_of_two(rss_size);
5031 roundup_size = ilog2(roundup_size);
5034 tc_size[i] = roundup_size;
5035 tc_offset[i] = tc_info->tqp_offset[i];
5038 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
5041 int hclge_rss_init_hw(struct hclge_dev *hdev)
5043 struct hclge_vport *vport = hdev->vport;
5044 u16 *rss_indir = vport[0].rss_indirection_tbl;
5045 u8 *key = vport[0].rss_hash_key;
5046 u8 hfunc = vport[0].rss_algo;
5049 ret = hclge_set_rss_indir_table(hdev, rss_indir);
5053 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
5057 ret = hclge_set_rss_input_tuple(hdev);
5061 return hclge_init_rss_tc_mode(hdev);
5064 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
5066 struct hclge_vport *vport = &hdev->vport[0];
5069 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
5070 vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size;
5073 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
5075 u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
5076 int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
5077 struct hclge_vport *vport = &hdev->vport[0];
5080 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
5081 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
5083 vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5084 vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5085 vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
5086 vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5087 vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5088 vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5089 vport->rss_tuple_sets.ipv6_sctp_en =
5090 hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
5091 HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
5092 HCLGE_RSS_INPUT_TUPLE_SCTP;
5093 vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5095 vport->rss_algo = rss_algo;
5097 rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
5098 sizeof(*rss_ind_tbl), GFP_KERNEL);
5102 vport->rss_indirection_tbl = rss_ind_tbl;
5103 memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE);
5105 hclge_rss_indir_init_cfg(hdev);
5110 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
5111 int vector_id, bool en,
5112 struct hnae3_ring_chain_node *ring_chain)
5114 struct hclge_dev *hdev = vport->back;
5115 struct hnae3_ring_chain_node *node;
5116 struct hclge_desc desc;
5117 struct hclge_ctrl_vector_chain_cmd *req =
5118 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
5119 enum hclge_cmd_status status;
5120 enum hclge_opcode_type op;
5121 u16 tqp_type_and_id;
5124 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
5125 hclge_cmd_setup_basic_desc(&desc, op, false);
5126 req->int_vector_id_l = hnae3_get_field(vector_id,
5127 HCLGE_VECTOR_ID_L_M,
5128 HCLGE_VECTOR_ID_L_S);
5129 req->int_vector_id_h = hnae3_get_field(vector_id,
5130 HCLGE_VECTOR_ID_H_M,
5131 HCLGE_VECTOR_ID_H_S);
5134 for (node = ring_chain; node; node = node->next) {
5135 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
5136 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
5138 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
5139 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
5140 HCLGE_TQP_ID_S, node->tqp_index);
5141 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
5143 hnae3_get_field(node->int_gl_idx,
5144 HNAE3_RING_GL_IDX_M,
5145 HNAE3_RING_GL_IDX_S));
5146 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
5147 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
5148 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
5149 req->vfid = vport->vport_id;
5151 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5153 dev_err(&hdev->pdev->dev,
5154 "Map TQP fail, status is %d.\n",
5160 hclge_cmd_setup_basic_desc(&desc,
5163 req->int_vector_id_l =
5164 hnae3_get_field(vector_id,
5165 HCLGE_VECTOR_ID_L_M,
5166 HCLGE_VECTOR_ID_L_S);
5167 req->int_vector_id_h =
5168 hnae3_get_field(vector_id,
5169 HCLGE_VECTOR_ID_H_M,
5170 HCLGE_VECTOR_ID_H_S);
5175 req->int_cause_num = i;
5176 req->vfid = vport->vport_id;
5177 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5179 dev_err(&hdev->pdev->dev,
5180 "Map TQP fail, status is %d.\n", status);
5188 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5189 struct hnae3_ring_chain_node *ring_chain)
5191 struct hclge_vport *vport = hclge_get_vport(handle);
5192 struct hclge_dev *hdev = vport->back;
5195 vector_id = hclge_get_vector_index(hdev, vector);
5196 if (vector_id < 0) {
5197 dev_err(&hdev->pdev->dev,
5198 "failed to get vector index. vector=%d\n", vector);
5202 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5205 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5206 struct hnae3_ring_chain_node *ring_chain)
5208 struct hclge_vport *vport = hclge_get_vport(handle);
5209 struct hclge_dev *hdev = vport->back;
5212 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5215 vector_id = hclge_get_vector_index(hdev, vector);
5216 if (vector_id < 0) {
5217 dev_err(&handle->pdev->dev,
5218 "Get vector index fail. ret =%d\n", vector_id);
5222 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5224 dev_err(&handle->pdev->dev,
5225 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5231 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5232 bool en_uc, bool en_mc, bool en_bc)
5234 struct hclge_vport *vport = &hdev->vport[vf_id];
5235 struct hnae3_handle *handle = &vport->nic;
5236 struct hclge_promisc_cfg_cmd *req;
5237 struct hclge_desc desc;
5238 bool uc_tx_en = en_uc;
5242 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5244 req = (struct hclge_promisc_cfg_cmd *)desc.data;
5247 if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5250 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5251 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5252 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5253 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5254 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5255 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5256 req->extend_promisc = promisc_cfg;
5258 /* to be compatible with DEVICE_VERSION_V1/2 */
5260 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5261 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5262 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5263 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5264 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5265 req->promisc = promisc_cfg;
5267 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5269 dev_err(&hdev->pdev->dev,
5270 "failed to set vport %u promisc mode, ret = %d.\n",
5276 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5277 bool en_mc_pmc, bool en_bc_pmc)
5279 return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5280 en_uc_pmc, en_mc_pmc, en_bc_pmc);
5283 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5286 struct hclge_vport *vport = hclge_get_vport(handle);
5287 struct hclge_dev *hdev = vport->back;
5288 bool en_bc_pmc = true;
5290 /* For device whose version below V2, if broadcast promisc enabled,
5291 * vlan filter is always bypassed. So broadcast promisc should be
5292 * disabled until user enable promisc mode
5294 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5295 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5297 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5301 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5303 struct hclge_vport *vport = hclge_get_vport(handle);
5305 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
5308 static void hclge_sync_fd_state(struct hclge_dev *hdev)
5310 if (hlist_empty(&hdev->fd_rule_list))
5311 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5314 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
5316 if (!test_bit(location, hdev->fd_bmap)) {
5317 set_bit(location, hdev->fd_bmap);
5318 hdev->hclge_fd_rule_num++;
5322 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
5324 if (test_bit(location, hdev->fd_bmap)) {
5325 clear_bit(location, hdev->fd_bmap);
5326 hdev->hclge_fd_rule_num--;
5330 static void hclge_fd_free_node(struct hclge_dev *hdev,
5331 struct hclge_fd_rule *rule)
5333 hlist_del(&rule->rule_node);
5335 hclge_sync_fd_state(hdev);
5338 static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5339 struct hclge_fd_rule *old_rule,
5340 struct hclge_fd_rule *new_rule,
5341 enum HCLGE_FD_NODE_STATE state)
5344 case HCLGE_FD_TO_ADD:
5345 case HCLGE_FD_ACTIVE:
5346 /* 1) if the new state is TO_ADD, just replace the old rule
5347 * with the same location, no matter its state, because the
5348 * new rule will be configured to the hardware.
5349 * 2) if the new state is ACTIVE, it means the new rule
5350 * has been configured to the hardware, so just replace
5351 * the old rule node with the same location.
5352 * 3) for it doesn't add a new node to the list, so it's
5353 * unnecessary to update the rule number and fd_bmap.
5355 new_rule->rule_node.next = old_rule->rule_node.next;
5356 new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5357 memcpy(old_rule, new_rule, sizeof(*old_rule));
5360 case HCLGE_FD_DELETED:
5361 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5362 hclge_fd_free_node(hdev, old_rule);
5364 case HCLGE_FD_TO_DEL:
5365 /* if new request is TO_DEL, and old rule is existent
5366 * 1) the state of old rule is TO_DEL, we need do nothing,
5367 * because we delete rule by location, other rule content
5369 * 2) the state of old rule is ACTIVE, we need to change its
5370 * state to TO_DEL, so the rule will be deleted when periodic
5371 * task being scheduled.
5372 * 3) the state of old rule is TO_ADD, it means the rule hasn't
5373 * been added to hardware, so we just delete the rule node from
5374 * fd_rule_list directly.
5376 if (old_rule->state == HCLGE_FD_TO_ADD) {
5377 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5378 hclge_fd_free_node(hdev, old_rule);
5381 old_rule->state = HCLGE_FD_TO_DEL;
5386 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5388 struct hclge_fd_rule **parent)
5390 struct hclge_fd_rule *rule;
5391 struct hlist_node *node;
5393 hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5394 if (rule->location == location)
5396 else if (rule->location > location)
5398 /* record the parent node, use to keep the nodes in fd_rule_list
5407 /* insert fd rule node in ascend order according to rule->location */
5408 static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5409 struct hclge_fd_rule *rule,
5410 struct hclge_fd_rule *parent)
5412 INIT_HLIST_NODE(&rule->rule_node);
5415 hlist_add_behind(&rule->rule_node, &parent->rule_node);
5417 hlist_add_head(&rule->rule_node, hlist);
5420 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5421 struct hclge_fd_user_def_cfg *cfg)
5423 struct hclge_fd_user_def_cfg_cmd *req;
5424 struct hclge_desc desc;
5428 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5430 req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5432 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5433 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5434 HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5435 req->ol2_cfg = cpu_to_le16(data);
5438 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5439 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5440 HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5441 req->ol3_cfg = cpu_to_le16(data);
5444 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5445 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5446 HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5447 req->ol4_cfg = cpu_to_le16(data);
5449 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5451 dev_err(&hdev->pdev->dev,
5452 "failed to set fd user def data, ret= %d\n", ret);
5456 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5460 if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5464 spin_lock_bh(&hdev->fd_rule_lock);
5466 ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5468 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5471 spin_unlock_bh(&hdev->fd_rule_lock);
5474 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5475 struct hclge_fd_rule *rule)
5477 struct hlist_head *hlist = &hdev->fd_rule_list;
5478 struct hclge_fd_rule *fd_rule, *parent = NULL;
5479 struct hclge_fd_user_def_info *info, *old_info;
5480 struct hclge_fd_user_def_cfg *cfg;
5482 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5483 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5486 /* for valid layer is start from 1, so need minus 1 to get the cfg */
5487 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5488 info = &rule->ep.user_def;
5490 if (!cfg->ref_cnt || cfg->offset == info->offset)
5493 if (cfg->ref_cnt > 1)
5496 fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5498 old_info = &fd_rule->ep.user_def;
5499 if (info->layer == old_info->layer)
5504 dev_err(&hdev->pdev->dev,
5505 "No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5510 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5511 struct hclge_fd_rule *rule)
5513 struct hclge_fd_user_def_cfg *cfg;
5515 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5516 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5519 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5520 if (!cfg->ref_cnt) {
5521 cfg->offset = rule->ep.user_def.offset;
5522 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5527 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5528 struct hclge_fd_rule *rule)
5530 struct hclge_fd_user_def_cfg *cfg;
5532 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5533 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5536 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5541 if (!cfg->ref_cnt) {
5543 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5547 static void hclge_update_fd_list(struct hclge_dev *hdev,
5548 enum HCLGE_FD_NODE_STATE state, u16 location,
5549 struct hclge_fd_rule *new_rule)
5551 struct hlist_head *hlist = &hdev->fd_rule_list;
5552 struct hclge_fd_rule *fd_rule, *parent = NULL;
5554 fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5556 hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5557 if (state == HCLGE_FD_ACTIVE)
5558 hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5559 hclge_sync_fd_user_def_cfg(hdev, true);
5561 hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5565 /* it's unlikely to fail here, because we have checked the rule
5568 if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5569 dev_warn(&hdev->pdev->dev,
5570 "failed to delete fd rule %u, it's inexistent\n",
5575 hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5576 hclge_sync_fd_user_def_cfg(hdev, true);
5578 hclge_fd_insert_rule_node(hlist, new_rule, parent);
5579 hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5581 if (state == HCLGE_FD_TO_ADD) {
5582 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5583 hclge_task_schedule(hdev, 0);
5587 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5589 struct hclge_get_fd_mode_cmd *req;
5590 struct hclge_desc desc;
5593 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5595 req = (struct hclge_get_fd_mode_cmd *)desc.data;
5597 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5599 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5603 *fd_mode = req->mode;
5608 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5609 u32 *stage1_entry_num,
5610 u32 *stage2_entry_num,
5611 u16 *stage1_counter_num,
5612 u16 *stage2_counter_num)
5614 struct hclge_get_fd_allocation_cmd *req;
5615 struct hclge_desc desc;
5618 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5620 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5622 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5624 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5629 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5630 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5631 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5632 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5637 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5638 enum HCLGE_FD_STAGE stage_num)
5640 struct hclge_set_fd_key_config_cmd *req;
5641 struct hclge_fd_key_cfg *stage;
5642 struct hclge_desc desc;
5645 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5647 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5648 stage = &hdev->fd_cfg.key_cfg[stage_num];
5649 req->stage = stage_num;
5650 req->key_select = stage->key_sel;
5651 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5652 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5653 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5654 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5655 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5656 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5658 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5660 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5665 static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5667 struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5669 spin_lock_bh(&hdev->fd_rule_lock);
5670 memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5671 spin_unlock_bh(&hdev->fd_rule_lock);
5673 hclge_fd_set_user_def_cmd(hdev, cfg);
5676 static int hclge_init_fd_config(struct hclge_dev *hdev)
5678 #define LOW_2_WORDS 0x03
5679 struct hclge_fd_key_cfg *key_cfg;
5682 if (!hnae3_dev_fd_supported(hdev))
5685 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5689 switch (hdev->fd_cfg.fd_mode) {
5690 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5691 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5693 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5694 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5697 dev_err(&hdev->pdev->dev,
5698 "Unsupported flow director mode %u\n",
5699 hdev->fd_cfg.fd_mode);
5703 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5704 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5705 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5706 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5707 key_cfg->outer_sipv6_word_en = 0;
5708 key_cfg->outer_dipv6_word_en = 0;
5710 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5711 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5712 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5713 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5715 /* If use max 400bit key, we can support tuples for ether type */
5716 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5717 key_cfg->tuple_active |=
5718 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5719 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5720 key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5723 /* roce_type is used to filter roce frames
5724 * dst_vport is used to specify the rule
5726 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5728 ret = hclge_get_fd_allocation(hdev,
5729 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5730 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5731 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5732 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5736 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5739 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5740 int loc, u8 *key, bool is_add)
5742 struct hclge_fd_tcam_config_1_cmd *req1;
5743 struct hclge_fd_tcam_config_2_cmd *req2;
5744 struct hclge_fd_tcam_config_3_cmd *req3;
5745 struct hclge_desc desc[3];
5748 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5749 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5750 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5751 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5752 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5754 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5755 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5756 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5758 req1->stage = stage;
5759 req1->xy_sel = sel_x ? 1 : 0;
5760 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5761 req1->index = cpu_to_le32(loc);
5762 req1->entry_vld = sel_x ? is_add : 0;
5765 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5766 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5767 sizeof(req2->tcam_data));
5768 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5769 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5772 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5774 dev_err(&hdev->pdev->dev,
5775 "config tcam key fail, ret=%d\n",
5781 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5782 struct hclge_fd_ad_data *action)
5784 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5785 struct hclge_fd_ad_config_cmd *req;
5786 struct hclge_desc desc;
5790 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5792 req = (struct hclge_fd_ad_config_cmd *)desc.data;
5793 req->index = cpu_to_le32(loc);
5796 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5797 action->write_rule_id_to_bd);
5798 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5800 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5801 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5802 action->override_tc);
5803 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5804 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5807 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5808 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5809 action->forward_to_direct_queue);
5810 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5812 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5813 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5814 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5815 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5816 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5817 action->counter_id);
5819 req->ad_data = cpu_to_le64(ad_data);
5820 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5822 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5827 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5828 struct hclge_fd_rule *rule)
5830 int offset, moffset, ip_offset;
5831 enum HCLGE_FD_KEY_OPT key_opt;
5832 u16 tmp_x_s, tmp_y_s;
5833 u32 tmp_x_l, tmp_y_l;
5837 if (rule->unused_tuple & BIT(tuple_bit))
5840 key_opt = tuple_key_info[tuple_bit].key_opt;
5841 offset = tuple_key_info[tuple_bit].offset;
5842 moffset = tuple_key_info[tuple_bit].moffset;
5846 calc_x(*key_x, p[offset], p[moffset]);
5847 calc_y(*key_y, p[offset], p[moffset]);
5851 calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5852 calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5853 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5854 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5858 calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5859 calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5860 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5861 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5865 for (i = 0; i < ETH_ALEN; i++) {
5866 calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
5868 calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
5874 ip_offset = IPV4_INDEX * sizeof(u32);
5875 calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
5876 *(u32 *)(&p[moffset + ip_offset]));
5877 calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
5878 *(u32 *)(&p[moffset + ip_offset]));
5879 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5880 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5888 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5889 u8 vf_id, u8 network_port_id)
5891 u32 port_number = 0;
5893 if (port_type == HOST_PORT) {
5894 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5896 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5898 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5900 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5901 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5902 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5908 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5909 __le32 *key_x, __le32 *key_y,
5910 struct hclge_fd_rule *rule)
5912 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5913 u8 cur_pos = 0, tuple_size, shift_bits;
5916 for (i = 0; i < MAX_META_DATA; i++) {
5917 tuple_size = meta_data_key_info[i].key_length;
5918 tuple_bit = key_cfg->meta_data_active & BIT(i);
5920 switch (tuple_bit) {
5921 case BIT(ROCE_TYPE):
5922 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5923 cur_pos += tuple_size;
5925 case BIT(DST_VPORT):
5926 port_number = hclge_get_port_number(HOST_PORT, 0,
5928 hnae3_set_field(meta_data,
5929 GENMASK(cur_pos + tuple_size, cur_pos),
5930 cur_pos, port_number);
5931 cur_pos += tuple_size;
5938 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5939 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5940 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5942 *key_x = cpu_to_le32(tmp_x << shift_bits);
5943 *key_y = cpu_to_le32(tmp_y << shift_bits);
5946 /* A complete key is combined with meta data key and tuple key.
5947 * Meta data key is stored at the MSB region, and tuple key is stored at
5948 * the LSB region, unused bits will be filled 0.
5950 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5951 struct hclge_fd_rule *rule)
5953 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5954 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5955 u8 *cur_key_x, *cur_key_y;
5956 u8 meta_data_region;
5961 memset(key_x, 0, sizeof(key_x));
5962 memset(key_y, 0, sizeof(key_y));
5966 for (i = 0; i < MAX_TUPLE; i++) {
5969 tuple_size = tuple_key_info[i].key_length / 8;
5970 if (!(key_cfg->tuple_active & BIT(i)))
5973 tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
5976 cur_key_x += tuple_size;
5977 cur_key_y += tuple_size;
5981 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5982 MAX_META_DATA_LENGTH / 8;
5984 hclge_fd_convert_meta_data(key_cfg,
5985 (__le32 *)(key_x + meta_data_region),
5986 (__le32 *)(key_y + meta_data_region),
5989 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5992 dev_err(&hdev->pdev->dev,
5993 "fd key_y config fail, loc=%u, ret=%d\n",
5994 rule->queue_id, ret);
5998 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
6001 dev_err(&hdev->pdev->dev,
6002 "fd key_x config fail, loc=%u, ret=%d\n",
6003 rule->queue_id, ret);
6007 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
6008 struct hclge_fd_rule *rule)
6010 struct hclge_vport *vport = hdev->vport;
6011 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
6012 struct hclge_fd_ad_data ad_data;
6014 memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
6015 ad_data.ad_id = rule->location;
6017 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6018 ad_data.drop_packet = true;
6019 } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
6020 ad_data.override_tc = true;
6022 kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
6024 ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
6026 ad_data.forward_to_direct_queue = true;
6027 ad_data.queue_id = rule->queue_id;
6030 if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) {
6031 ad_data.use_counter = true;
6032 ad_data.counter_id = rule->vf_id %
6033 hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1];
6035 ad_data.use_counter = false;
6036 ad_data.counter_id = 0;
6039 ad_data.use_next_stage = false;
6040 ad_data.next_input_key = 0;
6042 ad_data.write_rule_id_to_bd = true;
6043 ad_data.rule_id = rule->location;
6045 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
6048 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
6051 if (!spec || !unused_tuple)
6054 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6057 *unused_tuple |= BIT(INNER_SRC_IP);
6060 *unused_tuple |= BIT(INNER_DST_IP);
6063 *unused_tuple |= BIT(INNER_SRC_PORT);
6066 *unused_tuple |= BIT(INNER_DST_PORT);
6069 *unused_tuple |= BIT(INNER_IP_TOS);
6074 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
6077 if (!spec || !unused_tuple)
6080 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6081 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6084 *unused_tuple |= BIT(INNER_SRC_IP);
6087 *unused_tuple |= BIT(INNER_DST_IP);
6090 *unused_tuple |= BIT(INNER_IP_TOS);
6093 *unused_tuple |= BIT(INNER_IP_PROTO);
6095 if (spec->l4_4_bytes)
6098 if (spec->ip_ver != ETH_RX_NFC_IP4)
6104 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
6107 if (!spec || !unused_tuple)
6110 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6112 /* check whether src/dst ip address used */
6113 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6114 *unused_tuple |= BIT(INNER_SRC_IP);
6116 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6117 *unused_tuple |= BIT(INNER_DST_IP);
6120 *unused_tuple |= BIT(INNER_SRC_PORT);
6123 *unused_tuple |= BIT(INNER_DST_PORT);
6126 *unused_tuple |= BIT(INNER_IP_TOS);
6131 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
6134 if (!spec || !unused_tuple)
6137 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6138 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6140 /* check whether src/dst ip address used */
6141 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6142 *unused_tuple |= BIT(INNER_SRC_IP);
6144 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6145 *unused_tuple |= BIT(INNER_DST_IP);
6147 if (!spec->l4_proto)
6148 *unused_tuple |= BIT(INNER_IP_PROTO);
6151 *unused_tuple |= BIT(INNER_IP_TOS);
6153 if (spec->l4_4_bytes)
6159 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
6161 if (!spec || !unused_tuple)
6164 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
6165 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
6166 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
6168 if (is_zero_ether_addr(spec->h_source))
6169 *unused_tuple |= BIT(INNER_SRC_MAC);
6171 if (is_zero_ether_addr(spec->h_dest))
6172 *unused_tuple |= BIT(INNER_DST_MAC);
6175 *unused_tuple |= BIT(INNER_ETH_TYPE);
6180 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
6181 struct ethtool_rx_flow_spec *fs,
6184 if (fs->flow_type & FLOW_EXT) {
6185 if (fs->h_ext.vlan_etype) {
6186 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
6190 if (!fs->h_ext.vlan_tci)
6191 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6193 if (fs->m_ext.vlan_tci &&
6194 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
6195 dev_err(&hdev->pdev->dev,
6196 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
6197 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
6201 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6204 if (fs->flow_type & FLOW_MAC_EXT) {
6205 if (hdev->fd_cfg.fd_mode !=
6206 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6207 dev_err(&hdev->pdev->dev,
6208 "FLOW_MAC_EXT is not supported in current fd mode!\n");
6212 if (is_zero_ether_addr(fs->h_ext.h_dest))
6213 *unused_tuple |= BIT(INNER_DST_MAC);
6215 *unused_tuple &= ~BIT(INNER_DST_MAC);
6221 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
6222 struct hclge_fd_user_def_info *info)
6224 switch (flow_type) {
6226 info->layer = HCLGE_FD_USER_DEF_L2;
6227 *unused_tuple &= ~BIT(INNER_L2_RSV);
6230 case IPV6_USER_FLOW:
6231 info->layer = HCLGE_FD_USER_DEF_L3;
6232 *unused_tuple &= ~BIT(INNER_L3_RSV);
6238 info->layer = HCLGE_FD_USER_DEF_L4;
6239 *unused_tuple &= ~BIT(INNER_L4_RSV);
6248 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
6250 return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
6253 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
6254 struct ethtool_rx_flow_spec *fs,
6256 struct hclge_fd_user_def_info *info)
6258 u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
6259 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6260 u16 data, offset, data_mask, offset_mask;
6263 info->layer = HCLGE_FD_USER_DEF_NONE;
6264 *unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
6266 if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
6269 /* user-def data from ethtool is 64 bit value, the bit0~15 is used
6270 * for data, and bit32~47 is used for offset.
6272 data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6273 data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6274 offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6275 offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6277 if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
6278 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
6282 if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
6283 dev_err(&hdev->pdev->dev,
6284 "user-def offset[%u] should be no more than %u\n",
6285 offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
6289 if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
6290 dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
6294 ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
6296 dev_err(&hdev->pdev->dev,
6297 "unsupported flow type for user-def bytes, ret = %d\n",
6303 info->data_mask = data_mask;
6304 info->offset = offset;
6309 static int hclge_fd_check_spec(struct hclge_dev *hdev,
6310 struct ethtool_rx_flow_spec *fs,
6312 struct hclge_fd_user_def_info *info)
6317 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6318 dev_err(&hdev->pdev->dev,
6319 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
6321 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
6325 ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
6329 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6330 switch (flow_type) {
6334 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
6338 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
6344 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6347 case IPV6_USER_FLOW:
6348 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6352 if (hdev->fd_cfg.fd_mode !=
6353 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6354 dev_err(&hdev->pdev->dev,
6355 "ETHER_FLOW is not supported in current fd mode!\n");
6359 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6363 dev_err(&hdev->pdev->dev,
6364 "unsupported protocol type, protocol type = %#x\n",
6370 dev_err(&hdev->pdev->dev,
6371 "failed to check flow union tuple, ret = %d\n",
6376 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6379 static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
6380 struct ethtool_rx_flow_spec *fs,
6381 struct hclge_fd_rule *rule, u8 ip_proto)
6383 rule->tuples.src_ip[IPV4_INDEX] =
6384 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6385 rule->tuples_mask.src_ip[IPV4_INDEX] =
6386 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6388 rule->tuples.dst_ip[IPV4_INDEX] =
6389 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6390 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6391 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6393 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6394 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6396 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6397 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6399 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6400 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6402 rule->tuples.ether_proto = ETH_P_IP;
6403 rule->tuples_mask.ether_proto = 0xFFFF;
6405 rule->tuples.ip_proto = ip_proto;
6406 rule->tuples_mask.ip_proto = 0xFF;
6409 static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
6410 struct ethtool_rx_flow_spec *fs,
6411 struct hclge_fd_rule *rule)
6413 rule->tuples.src_ip[IPV4_INDEX] =
6414 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6415 rule->tuples_mask.src_ip[IPV4_INDEX] =
6416 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6418 rule->tuples.dst_ip[IPV4_INDEX] =
6419 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6420 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6421 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6423 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6424 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6426 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6427 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6429 rule->tuples.ether_proto = ETH_P_IP;
6430 rule->tuples_mask.ether_proto = 0xFFFF;
6433 static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
6434 struct ethtool_rx_flow_spec *fs,
6435 struct hclge_fd_rule *rule, u8 ip_proto)
6437 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
6439 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6442 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6444 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6447 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6448 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6450 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6451 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6453 rule->tuples.ether_proto = ETH_P_IPV6;
6454 rule->tuples_mask.ether_proto = 0xFFFF;
6456 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6457 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6459 rule->tuples.ip_proto = ip_proto;
6460 rule->tuples_mask.ip_proto = 0xFF;
6463 static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
6464 struct ethtool_rx_flow_spec *fs,
6465 struct hclge_fd_rule *rule)
6467 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6469 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6472 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6474 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6477 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6478 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6480 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6481 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6483 rule->tuples.ether_proto = ETH_P_IPV6;
6484 rule->tuples_mask.ether_proto = 0xFFFF;
6487 static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
6488 struct ethtool_rx_flow_spec *fs,
6489 struct hclge_fd_rule *rule)
6491 ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6492 ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6494 ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6495 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6497 rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6498 rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6501 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6502 struct hclge_fd_rule *rule)
6504 switch (info->layer) {
6505 case HCLGE_FD_USER_DEF_L2:
6506 rule->tuples.l2_user_def = info->data;
6507 rule->tuples_mask.l2_user_def = info->data_mask;
6509 case HCLGE_FD_USER_DEF_L3:
6510 rule->tuples.l3_user_def = info->data;
6511 rule->tuples_mask.l3_user_def = info->data_mask;
6513 case HCLGE_FD_USER_DEF_L4:
6514 rule->tuples.l4_user_def = (u32)info->data << 16;
6515 rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6521 rule->ep.user_def = *info;
6524 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
6525 struct ethtool_rx_flow_spec *fs,
6526 struct hclge_fd_rule *rule,
6527 struct hclge_fd_user_def_info *info)
6529 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6531 switch (flow_type) {
6533 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
6536 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
6539 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
6542 hclge_fd_get_ip4_tuple(hdev, fs, rule);
6545 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
6548 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
6551 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
6553 case IPV6_USER_FLOW:
6554 hclge_fd_get_ip6_tuple(hdev, fs, rule);
6557 hclge_fd_get_ether_tuple(hdev, fs, rule);
6563 if (fs->flow_type & FLOW_EXT) {
6564 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6565 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6566 hclge_fd_get_user_def_tuple(info, rule);
6569 if (fs->flow_type & FLOW_MAC_EXT) {
6570 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6571 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6577 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6578 struct hclge_fd_rule *rule)
6582 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6586 return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6589 static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6590 struct hclge_fd_rule *rule)
6594 spin_lock_bh(&hdev->fd_rule_lock);
6596 if (hdev->fd_active_type != rule->rule_type &&
6597 (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6598 hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6599 dev_err(&hdev->pdev->dev,
6600 "mode conflict(new type %d, active type %d), please delete existent rules first\n",
6601 rule->rule_type, hdev->fd_active_type);
6602 spin_unlock_bh(&hdev->fd_rule_lock);
6606 ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6610 ret = hclge_clear_arfs_rules(hdev);
6614 ret = hclge_fd_config_rule(hdev, rule);
6618 rule->state = HCLGE_FD_ACTIVE;
6619 hdev->fd_active_type = rule->rule_type;
6620 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6623 spin_unlock_bh(&hdev->fd_rule_lock);
6627 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6629 struct hclge_vport *vport = hclge_get_vport(handle);
6630 struct hclge_dev *hdev = vport->back;
6632 return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6635 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6636 u16 *vport_id, u8 *action, u16 *queue_id)
6638 struct hclge_vport *vport = hdev->vport;
6640 if (ring_cookie == RX_CLS_FLOW_DISC) {
6641 *action = HCLGE_FD_ACTION_DROP_PACKET;
6643 u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6644 u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6647 /* To keep consistent with user's configuration, minus 1 when
6648 * printing 'vf', because vf id from ethtool is added 1 for vf.
6650 if (vf > hdev->num_req_vfs) {
6651 dev_err(&hdev->pdev->dev,
6652 "Error: vf id (%u) should be less than %u\n",
6653 vf - 1, hdev->num_req_vfs);
6657 *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6658 tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6661 dev_err(&hdev->pdev->dev,
6662 "Error: queue id (%u) > max tqp num (%u)\n",
6667 *action = HCLGE_FD_ACTION_SELECT_QUEUE;
6674 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6675 struct ethtool_rxnfc *cmd)
6677 struct hclge_vport *vport = hclge_get_vport(handle);
6678 struct hclge_dev *hdev = vport->back;
6679 struct hclge_fd_user_def_info info;
6680 u16 dst_vport_id = 0, q_index = 0;
6681 struct ethtool_rx_flow_spec *fs;
6682 struct hclge_fd_rule *rule;
6687 if (!hnae3_dev_fd_supported(hdev)) {
6688 dev_err(&hdev->pdev->dev,
6689 "flow table director is not supported\n");
6694 dev_err(&hdev->pdev->dev,
6695 "please enable flow director first\n");
6699 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6701 ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6705 ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6710 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6714 ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
6720 rule->flow_type = fs->flow_type;
6721 rule->location = fs->location;
6722 rule->unused_tuple = unused;
6723 rule->vf_id = dst_vport_id;
6724 rule->queue_id = q_index;
6725 rule->action = action;
6726 rule->rule_type = HCLGE_FD_EP_ACTIVE;
6728 ret = hclge_add_fd_entry_common(hdev, rule);
6735 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6736 struct ethtool_rxnfc *cmd)
6738 struct hclge_vport *vport = hclge_get_vport(handle);
6739 struct hclge_dev *hdev = vport->back;
6740 struct ethtool_rx_flow_spec *fs;
6743 if (!hnae3_dev_fd_supported(hdev))
6746 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6748 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6751 spin_lock_bh(&hdev->fd_rule_lock);
6752 if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6753 !test_bit(fs->location, hdev->fd_bmap)) {
6754 dev_err(&hdev->pdev->dev,
6755 "Delete fail, rule %u is inexistent\n", fs->location);
6756 spin_unlock_bh(&hdev->fd_rule_lock);
6760 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6765 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6768 spin_unlock_bh(&hdev->fd_rule_lock);
6772 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6775 struct hclge_fd_rule *rule;
6776 struct hlist_node *node;
6779 if (!hnae3_dev_fd_supported(hdev))
6782 spin_lock_bh(&hdev->fd_rule_lock);
6784 for_each_set_bit(location, hdev->fd_bmap,
6785 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6786 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6790 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6792 hlist_del(&rule->rule_node);
6795 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6796 hdev->hclge_fd_rule_num = 0;
6797 bitmap_zero(hdev->fd_bmap,
6798 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6801 spin_unlock_bh(&hdev->fd_rule_lock);
6804 static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6806 hclge_clear_fd_rules_in_list(hdev, true);
6807 hclge_fd_disable_user_def(hdev);
6810 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6812 struct hclge_vport *vport = hclge_get_vport(handle);
6813 struct hclge_dev *hdev = vport->back;
6814 struct hclge_fd_rule *rule;
6815 struct hlist_node *node;
6817 /* Return ok here, because reset error handling will check this
6818 * return value. If error is returned here, the reset process will
6821 if (!hnae3_dev_fd_supported(hdev))
6824 /* if fd is disabled, should not restore it when reset */
6828 spin_lock_bh(&hdev->fd_rule_lock);
6829 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6830 if (rule->state == HCLGE_FD_ACTIVE)
6831 rule->state = HCLGE_FD_TO_ADD;
6833 spin_unlock_bh(&hdev->fd_rule_lock);
6834 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6839 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6840 struct ethtool_rxnfc *cmd)
6842 struct hclge_vport *vport = hclge_get_vport(handle);
6843 struct hclge_dev *hdev = vport->back;
6845 if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6848 cmd->rule_cnt = hdev->hclge_fd_rule_num;
6849 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6854 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6855 struct ethtool_tcpip4_spec *spec,
6856 struct ethtool_tcpip4_spec *spec_mask)
6858 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6859 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6860 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6862 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6863 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6864 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6866 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6867 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6868 0 : cpu_to_be16(rule->tuples_mask.src_port);
6870 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6871 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6872 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6874 spec->tos = rule->tuples.ip_tos;
6875 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6876 0 : rule->tuples_mask.ip_tos;
6879 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6880 struct ethtool_usrip4_spec *spec,
6881 struct ethtool_usrip4_spec *spec_mask)
6883 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6884 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6885 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6887 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6888 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6889 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6891 spec->tos = rule->tuples.ip_tos;
6892 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6893 0 : rule->tuples_mask.ip_tos;
6895 spec->proto = rule->tuples.ip_proto;
6896 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6897 0 : rule->tuples_mask.ip_proto;
6899 spec->ip_ver = ETH_RX_NFC_IP4;
6902 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6903 struct ethtool_tcpip6_spec *spec,
6904 struct ethtool_tcpip6_spec *spec_mask)
6906 cpu_to_be32_array(spec->ip6src,
6907 rule->tuples.src_ip, IPV6_SIZE);
6908 cpu_to_be32_array(spec->ip6dst,
6909 rule->tuples.dst_ip, IPV6_SIZE);
6910 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6911 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6913 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6916 if (rule->unused_tuple & BIT(INNER_DST_IP))
6917 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6919 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6922 spec->tclass = rule->tuples.ip_tos;
6923 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6924 0 : rule->tuples_mask.ip_tos;
6926 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6927 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6928 0 : cpu_to_be16(rule->tuples_mask.src_port);
6930 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6931 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6932 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6935 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6936 struct ethtool_usrip6_spec *spec,
6937 struct ethtool_usrip6_spec *spec_mask)
6939 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6940 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6941 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6942 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6944 cpu_to_be32_array(spec_mask->ip6src,
6945 rule->tuples_mask.src_ip, IPV6_SIZE);
6947 if (rule->unused_tuple & BIT(INNER_DST_IP))
6948 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6950 cpu_to_be32_array(spec_mask->ip6dst,
6951 rule->tuples_mask.dst_ip, IPV6_SIZE);
6953 spec->tclass = rule->tuples.ip_tos;
6954 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6955 0 : rule->tuples_mask.ip_tos;
6957 spec->l4_proto = rule->tuples.ip_proto;
6958 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6959 0 : rule->tuples_mask.ip_proto;
6962 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6963 struct ethhdr *spec,
6964 struct ethhdr *spec_mask)
6966 ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6967 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6969 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6970 eth_zero_addr(spec_mask->h_source);
6972 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6974 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6975 eth_zero_addr(spec_mask->h_dest);
6977 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6979 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6980 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6981 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6984 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
6985 struct hclge_fd_rule *rule)
6987 if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
6988 HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
6989 fs->h_ext.data[0] = 0;
6990 fs->h_ext.data[1] = 0;
6991 fs->m_ext.data[0] = 0;
6992 fs->m_ext.data[1] = 0;
6994 fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
6995 fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
6997 cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
6998 fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
7002 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
7003 struct hclge_fd_rule *rule)
7005 if (fs->flow_type & FLOW_EXT) {
7006 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
7007 fs->m_ext.vlan_tci =
7008 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
7009 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
7011 hclge_fd_get_user_def_info(fs, rule);
7014 if (fs->flow_type & FLOW_MAC_EXT) {
7015 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
7016 if (rule->unused_tuple & BIT(INNER_DST_MAC))
7017 eth_zero_addr(fs->m_u.ether_spec.h_dest);
7019 ether_addr_copy(fs->m_u.ether_spec.h_dest,
7020 rule->tuples_mask.dst_mac);
7024 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
7025 struct ethtool_rxnfc *cmd)
7027 struct hclge_vport *vport = hclge_get_vport(handle);
7028 struct hclge_fd_rule *rule = NULL;
7029 struct hclge_dev *hdev = vport->back;
7030 struct ethtool_rx_flow_spec *fs;
7031 struct hlist_node *node2;
7033 if (!hnae3_dev_fd_supported(hdev))
7036 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
7038 spin_lock_bh(&hdev->fd_rule_lock);
7040 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
7041 if (rule->location >= fs->location)
7045 if (!rule || fs->location != rule->location) {
7046 spin_unlock_bh(&hdev->fd_rule_lock);
7051 fs->flow_type = rule->flow_type;
7052 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
7056 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
7057 &fs->m_u.tcp_ip4_spec);
7060 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
7061 &fs->m_u.usr_ip4_spec);
7066 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
7067 &fs->m_u.tcp_ip6_spec);
7069 case IPV6_USER_FLOW:
7070 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
7071 &fs->m_u.usr_ip6_spec);
7073 /* The flow type of fd rule has been checked before adding in to rule
7074 * list. As other flow types have been handled, it must be ETHER_FLOW
7075 * for the default case
7078 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
7079 &fs->m_u.ether_spec);
7083 hclge_fd_get_ext_info(fs, rule);
7085 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
7086 fs->ring_cookie = RX_CLS_FLOW_DISC;
7090 fs->ring_cookie = rule->queue_id;
7091 vf_id = rule->vf_id;
7092 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
7093 fs->ring_cookie |= vf_id;
7096 spin_unlock_bh(&hdev->fd_rule_lock);
7101 static int hclge_get_all_rules(struct hnae3_handle *handle,
7102 struct ethtool_rxnfc *cmd, u32 *rule_locs)
7104 struct hclge_vport *vport = hclge_get_vport(handle);
7105 struct hclge_dev *hdev = vport->back;
7106 struct hclge_fd_rule *rule;
7107 struct hlist_node *node2;
7110 if (!hnae3_dev_fd_supported(hdev))
7113 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
7115 spin_lock_bh(&hdev->fd_rule_lock);
7116 hlist_for_each_entry_safe(rule, node2,
7117 &hdev->fd_rule_list, rule_node) {
7118 if (cnt == cmd->rule_cnt) {
7119 spin_unlock_bh(&hdev->fd_rule_lock);
7123 if (rule->state == HCLGE_FD_TO_DEL)
7126 rule_locs[cnt] = rule->location;
7130 spin_unlock_bh(&hdev->fd_rule_lock);
7132 cmd->rule_cnt = cnt;
7137 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
7138 struct hclge_fd_rule_tuples *tuples)
7140 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
7141 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
7143 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
7144 tuples->ip_proto = fkeys->basic.ip_proto;
7145 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
7147 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
7148 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
7149 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
7153 for (i = 0; i < IPV6_SIZE; i++) {
7154 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
7155 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
7160 /* traverse all rules, check whether an existed rule has the same tuples */
7161 static struct hclge_fd_rule *
7162 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
7163 const struct hclge_fd_rule_tuples *tuples)
7165 struct hclge_fd_rule *rule = NULL;
7166 struct hlist_node *node;
7168 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7169 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
7176 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
7177 struct hclge_fd_rule *rule)
7179 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
7180 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
7181 BIT(INNER_SRC_PORT);
7184 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
7185 rule->state = HCLGE_FD_TO_ADD;
7186 if (tuples->ether_proto == ETH_P_IP) {
7187 if (tuples->ip_proto == IPPROTO_TCP)
7188 rule->flow_type = TCP_V4_FLOW;
7190 rule->flow_type = UDP_V4_FLOW;
7192 if (tuples->ip_proto == IPPROTO_TCP)
7193 rule->flow_type = TCP_V6_FLOW;
7195 rule->flow_type = UDP_V6_FLOW;
7197 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
7198 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
7201 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
7202 u16 flow_id, struct flow_keys *fkeys)
7204 struct hclge_vport *vport = hclge_get_vport(handle);
7205 struct hclge_fd_rule_tuples new_tuples = {};
7206 struct hclge_dev *hdev = vport->back;
7207 struct hclge_fd_rule *rule;
7210 if (!hnae3_dev_fd_supported(hdev))
7213 /* when there is already fd rule existed add by user,
7214 * arfs should not work
7216 spin_lock_bh(&hdev->fd_rule_lock);
7217 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
7218 hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
7219 spin_unlock_bh(&hdev->fd_rule_lock);
7223 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
7225 /* check is there flow director filter existed for this flow,
7226 * if not, create a new filter for it;
7227 * if filter exist with different queue id, modify the filter;
7228 * if filter exist with same queue id, do nothing
7230 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7232 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
7233 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7234 spin_unlock_bh(&hdev->fd_rule_lock);
7238 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
7240 spin_unlock_bh(&hdev->fd_rule_lock);
7244 rule->location = bit_id;
7245 rule->arfs.flow_id = flow_id;
7246 rule->queue_id = queue_id;
7247 hclge_fd_build_arfs_rule(&new_tuples, rule);
7248 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
7249 hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
7250 } else if (rule->queue_id != queue_id) {
7251 rule->queue_id = queue_id;
7252 rule->state = HCLGE_FD_TO_ADD;
7253 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7254 hclge_task_schedule(hdev, 0);
7256 spin_unlock_bh(&hdev->fd_rule_lock);
7257 return rule->location;
7260 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
7262 #ifdef CONFIG_RFS_ACCEL
7263 struct hnae3_handle *handle = &hdev->vport[0].nic;
7264 struct hclge_fd_rule *rule;
7265 struct hlist_node *node;
7267 spin_lock_bh(&hdev->fd_rule_lock);
7268 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
7269 spin_unlock_bh(&hdev->fd_rule_lock);
7272 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7273 if (rule->state != HCLGE_FD_ACTIVE)
7275 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
7276 rule->arfs.flow_id, rule->location)) {
7277 rule->state = HCLGE_FD_TO_DEL;
7278 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7281 spin_unlock_bh(&hdev->fd_rule_lock);
7285 /* make sure being called after lock up with fd_rule_lock */
7286 static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
7288 #ifdef CONFIG_RFS_ACCEL
7289 struct hclge_fd_rule *rule;
7290 struct hlist_node *node;
7293 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
7296 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7297 switch (rule->state) {
7298 case HCLGE_FD_TO_DEL:
7299 case HCLGE_FD_ACTIVE:
7300 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7301 rule->location, NULL, false);
7305 case HCLGE_FD_TO_ADD:
7306 hclge_fd_dec_rule_cnt(hdev, rule->location);
7307 hlist_del(&rule->rule_node);
7314 hclge_sync_fd_state(hdev);
7320 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
7321 struct hclge_fd_rule *rule)
7323 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
7324 struct flow_match_basic match;
7325 u16 ethtype_key, ethtype_mask;
7327 flow_rule_match_basic(flow, &match);
7328 ethtype_key = ntohs(match.key->n_proto);
7329 ethtype_mask = ntohs(match.mask->n_proto);
7331 if (ethtype_key == ETH_P_ALL) {
7335 rule->tuples.ether_proto = ethtype_key;
7336 rule->tuples_mask.ether_proto = ethtype_mask;
7337 rule->tuples.ip_proto = match.key->ip_proto;
7338 rule->tuples_mask.ip_proto = match.mask->ip_proto;
7340 rule->unused_tuple |= BIT(INNER_IP_PROTO);
7341 rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7345 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7346 struct hclge_fd_rule *rule)
7348 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7349 struct flow_match_eth_addrs match;
7351 flow_rule_match_eth_addrs(flow, &match);
7352 ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7353 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7354 ether_addr_copy(rule->tuples.src_mac, match.key->src);
7355 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7357 rule->unused_tuple |= BIT(INNER_DST_MAC);
7358 rule->unused_tuple |= BIT(INNER_SRC_MAC);
7362 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7363 struct hclge_fd_rule *rule)
7365 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7366 struct flow_match_vlan match;
7368 flow_rule_match_vlan(flow, &match);
7369 rule->tuples.vlan_tag1 = match.key->vlan_id |
7370 (match.key->vlan_priority << VLAN_PRIO_SHIFT);
7371 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7372 (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7374 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7378 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
7379 struct hclge_fd_rule *rule)
7383 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7384 struct flow_match_control match;
7386 flow_rule_match_control(flow, &match);
7387 addr_type = match.key->addr_type;
7390 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7391 struct flow_match_ipv4_addrs match;
7393 flow_rule_match_ipv4_addrs(flow, &match);
7394 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7395 rule->tuples_mask.src_ip[IPV4_INDEX] =
7396 be32_to_cpu(match.mask->src);
7397 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7398 rule->tuples_mask.dst_ip[IPV4_INDEX] =
7399 be32_to_cpu(match.mask->dst);
7400 } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7401 struct flow_match_ipv6_addrs match;
7403 flow_rule_match_ipv6_addrs(flow, &match);
7404 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
7406 be32_to_cpu_array(rule->tuples_mask.src_ip,
7407 match.mask->src.s6_addr32, IPV6_SIZE);
7408 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
7410 be32_to_cpu_array(rule->tuples_mask.dst_ip,
7411 match.mask->dst.s6_addr32, IPV6_SIZE);
7413 rule->unused_tuple |= BIT(INNER_SRC_IP);
7414 rule->unused_tuple |= BIT(INNER_DST_IP);
7418 static void hclge_get_cls_key_port(const struct flow_rule *flow,
7419 struct hclge_fd_rule *rule)
7421 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7422 struct flow_match_ports match;
7424 flow_rule_match_ports(flow, &match);
7426 rule->tuples.src_port = be16_to_cpu(match.key->src);
7427 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7428 rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7429 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7431 rule->unused_tuple |= BIT(INNER_SRC_PORT);
7432 rule->unused_tuple |= BIT(INNER_DST_PORT);
7436 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7437 struct flow_cls_offload *cls_flower,
7438 struct hclge_fd_rule *rule)
7440 struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7441 struct flow_dissector *dissector = flow->match.dissector;
7443 if (dissector->used_keys &
7444 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7445 BIT(FLOW_DISSECTOR_KEY_BASIC) |
7446 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7447 BIT(FLOW_DISSECTOR_KEY_VLAN) |
7448 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7449 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7450 BIT(FLOW_DISSECTOR_KEY_PORTS))) {
7451 dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
7452 dissector->used_keys);
7456 hclge_get_cls_key_basic(flow, rule);
7457 hclge_get_cls_key_mac(flow, rule);
7458 hclge_get_cls_key_vlan(flow, rule);
7459 hclge_get_cls_key_ip(flow, rule);
7460 hclge_get_cls_key_port(flow, rule);
7465 static int hclge_check_cls_flower(struct hclge_dev *hdev,
7466 struct flow_cls_offload *cls_flower, int tc)
7468 u32 prio = cls_flower->common.prio;
7470 if (tc < 0 || tc > hdev->tc_max) {
7471 dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7476 prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7477 dev_err(&hdev->pdev->dev,
7478 "prio %u should be in range[1, %u]\n",
7479 prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7483 if (test_bit(prio - 1, hdev->fd_bmap)) {
7484 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7490 static int hclge_add_cls_flower(struct hnae3_handle *handle,
7491 struct flow_cls_offload *cls_flower,
7494 struct hclge_vport *vport = hclge_get_vport(handle);
7495 struct hclge_dev *hdev = vport->back;
7496 struct hclge_fd_rule *rule;
7499 ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7501 dev_err(&hdev->pdev->dev,
7502 "failed to check cls flower params, ret = %d\n", ret);
7506 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7510 ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7516 rule->action = HCLGE_FD_ACTION_SELECT_TC;
7517 rule->cls_flower.tc = tc;
7518 rule->location = cls_flower->common.prio - 1;
7520 rule->cls_flower.cookie = cls_flower->cookie;
7521 rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7523 ret = hclge_add_fd_entry_common(hdev, rule);
7530 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7531 unsigned long cookie)
7533 struct hclge_fd_rule *rule;
7534 struct hlist_node *node;
7536 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7537 if (rule->cls_flower.cookie == cookie)
7544 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7545 struct flow_cls_offload *cls_flower)
7547 struct hclge_vport *vport = hclge_get_vport(handle);
7548 struct hclge_dev *hdev = vport->back;
7549 struct hclge_fd_rule *rule;
7552 spin_lock_bh(&hdev->fd_rule_lock);
7554 rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7556 spin_unlock_bh(&hdev->fd_rule_lock);
7560 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7563 spin_unlock_bh(&hdev->fd_rule_lock);
7567 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7568 spin_unlock_bh(&hdev->fd_rule_lock);
7573 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7575 struct hclge_fd_rule *rule;
7576 struct hlist_node *node;
7579 if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7582 spin_lock_bh(&hdev->fd_rule_lock);
7584 hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7585 switch (rule->state) {
7586 case HCLGE_FD_TO_ADD:
7587 ret = hclge_fd_config_rule(hdev, rule);
7590 rule->state = HCLGE_FD_ACTIVE;
7592 case HCLGE_FD_TO_DEL:
7593 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7594 rule->location, NULL, false);
7597 hclge_fd_dec_rule_cnt(hdev, rule->location);
7598 hclge_fd_free_node(hdev, rule);
7607 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7609 spin_unlock_bh(&hdev->fd_rule_lock);
7612 static void hclge_sync_fd_table(struct hclge_dev *hdev)
7614 if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7615 bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7617 hclge_clear_fd_rules_in_list(hdev, clear_list);
7620 hclge_sync_fd_user_def_cfg(hdev, false);
7622 hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7625 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7627 struct hclge_vport *vport = hclge_get_vport(handle);
7628 struct hclge_dev *hdev = vport->back;
7630 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7631 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7634 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7636 struct hclge_vport *vport = hclge_get_vport(handle);
7637 struct hclge_dev *hdev = vport->back;
7639 return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7642 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7644 struct hclge_vport *vport = hclge_get_vport(handle);
7645 struct hclge_dev *hdev = vport->back;
7647 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7650 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7652 struct hclge_vport *vport = hclge_get_vport(handle);
7653 struct hclge_dev *hdev = vport->back;
7655 return hdev->rst_stats.hw_reset_done_cnt;
7658 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7660 struct hclge_vport *vport = hclge_get_vport(handle);
7661 struct hclge_dev *hdev = vport->back;
7663 hdev->fd_en = enable;
7666 set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7668 hclge_restore_fd_entries(handle);
7670 hclge_task_schedule(hdev, 0);
7673 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7675 struct hclge_desc desc;
7676 struct hclge_config_mac_mode_cmd *req =
7677 (struct hclge_config_mac_mode_cmd *)desc.data;
7681 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7684 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7685 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7686 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7687 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7688 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7689 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7690 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7691 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7692 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7693 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7696 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7698 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7700 dev_err(&hdev->pdev->dev,
7701 "mac enable fail, ret =%d.\n", ret);
7704 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7705 u8 switch_param, u8 param_mask)
7707 struct hclge_mac_vlan_switch_cmd *req;
7708 struct hclge_desc desc;
7712 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7713 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7715 /* read current config parameter */
7716 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7718 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7719 req->func_id = cpu_to_le32(func_id);
7721 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7723 dev_err(&hdev->pdev->dev,
7724 "read mac vlan switch parameter fail, ret = %d\n", ret);
7728 /* modify and write new config parameter */
7729 hclge_cmd_reuse_desc(&desc, false);
7730 req->switch_param = (req->switch_param & param_mask) | switch_param;
7731 req->param_mask = param_mask;
7733 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7735 dev_err(&hdev->pdev->dev,
7736 "set mac vlan switch parameter fail, ret = %d\n", ret);
7740 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7743 #define HCLGE_PHY_LINK_STATUS_NUM 200
7745 struct phy_device *phydev = hdev->hw.mac.phydev;
7750 ret = phy_read_status(phydev);
7752 dev_err(&hdev->pdev->dev,
7753 "phy update link status fail, ret = %d\n", ret);
7757 if (phydev->link == link_ret)
7760 msleep(HCLGE_LINK_STATUS_MS);
7761 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7764 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7766 #define HCLGE_MAC_LINK_STATUS_NUM 100
7773 ret = hclge_get_mac_link_status(hdev, &link_status);
7776 if (link_status == link_ret)
7779 msleep(HCLGE_LINK_STATUS_MS);
7780 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7784 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7789 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7792 hclge_phy_link_status_wait(hdev, link_ret);
7794 return hclge_mac_link_status_wait(hdev, link_ret);
7797 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7799 struct hclge_config_mac_mode_cmd *req;
7800 struct hclge_desc desc;
7804 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7805 /* 1 Read out the MAC mode config at first */
7806 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7807 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7809 dev_err(&hdev->pdev->dev,
7810 "mac loopback get fail, ret =%d.\n", ret);
7814 /* 2 Then setup the loopback flag */
7815 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7816 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7818 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7820 /* 3 Config mac work mode with loopback flag
7821 * and its original configure parameters
7823 hclge_cmd_reuse_desc(&desc, false);
7824 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7826 dev_err(&hdev->pdev->dev,
7827 "mac loopback set fail, ret =%d.\n", ret);
7831 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7832 enum hnae3_loop loop_mode)
7834 #define HCLGE_COMMON_LB_RETRY_MS 10
7835 #define HCLGE_COMMON_LB_RETRY_NUM 100
7837 struct hclge_common_lb_cmd *req;
7838 struct hclge_desc desc;
7842 req = (struct hclge_common_lb_cmd *)desc.data;
7843 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7845 switch (loop_mode) {
7846 case HNAE3_LOOP_SERIAL_SERDES:
7847 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7849 case HNAE3_LOOP_PARALLEL_SERDES:
7850 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7852 case HNAE3_LOOP_PHY:
7853 loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7856 dev_err(&hdev->pdev->dev,
7857 "unsupported common loopback mode %d\n", loop_mode);
7862 req->enable = loop_mode_b;
7863 req->mask = loop_mode_b;
7865 req->mask = loop_mode_b;
7868 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7870 dev_err(&hdev->pdev->dev,
7871 "common loopback set fail, ret = %d\n", ret);
7876 msleep(HCLGE_COMMON_LB_RETRY_MS);
7877 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7879 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7881 dev_err(&hdev->pdev->dev,
7882 "common loopback get, ret = %d\n", ret);
7885 } while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7886 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7888 if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7889 dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
7891 } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7892 dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
7898 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7899 enum hnae3_loop loop_mode)
7903 ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7907 hclge_cfg_mac_mode(hdev, en);
7909 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7911 dev_err(&hdev->pdev->dev,
7912 "serdes loopback config mac mode timeout\n");
7917 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7918 struct phy_device *phydev)
7922 if (!phydev->suspended) {
7923 ret = phy_suspend(phydev);
7928 ret = phy_resume(phydev);
7932 return phy_loopback(phydev, true);
7935 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7936 struct phy_device *phydev)
7940 ret = phy_loopback(phydev, false);
7944 return phy_suspend(phydev);
7947 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7949 struct phy_device *phydev = hdev->hw.mac.phydev;
7953 if (hnae3_dev_phy_imp_supported(hdev))
7954 return hclge_set_common_loopback(hdev, en,
7960 ret = hclge_enable_phy_loopback(hdev, phydev);
7962 ret = hclge_disable_phy_loopback(hdev, phydev);
7964 dev_err(&hdev->pdev->dev,
7965 "set phy loopback fail, ret = %d\n", ret);
7969 hclge_cfg_mac_mode(hdev, en);
7971 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7973 dev_err(&hdev->pdev->dev,
7974 "phy loopback config mac mode timeout\n");
7979 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
7980 u16 stream_id, bool enable)
7982 struct hclge_desc desc;
7983 struct hclge_cfg_com_tqp_queue_cmd *req =
7984 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7986 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7987 req->tqp_id = cpu_to_le16(tqp_id);
7988 req->stream_id = cpu_to_le16(stream_id);
7990 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7992 return hclge_cmd_send(&hdev->hw, &desc, 1);
7995 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
7997 struct hclge_vport *vport = hclge_get_vport(handle);
7998 struct hclge_dev *hdev = vport->back;
8002 for (i = 0; i < handle->kinfo.num_tqps; i++) {
8003 ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
8010 static int hclge_set_loopback(struct hnae3_handle *handle,
8011 enum hnae3_loop loop_mode, bool en)
8013 struct hclge_vport *vport = hclge_get_vport(handle);
8014 struct hclge_dev *hdev = vport->back;
8017 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
8018 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
8019 * the same, the packets are looped back in the SSU. If SSU loopback
8020 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
8022 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8023 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
8025 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
8026 HCLGE_SWITCH_ALW_LPBK_MASK);
8031 switch (loop_mode) {
8032 case HNAE3_LOOP_APP:
8033 ret = hclge_set_app_loopback(hdev, en);
8035 case HNAE3_LOOP_SERIAL_SERDES:
8036 case HNAE3_LOOP_PARALLEL_SERDES:
8037 ret = hclge_set_common_loopback(hdev, en, loop_mode);
8039 case HNAE3_LOOP_PHY:
8040 ret = hclge_set_phy_loopback(hdev, en);
8044 dev_err(&hdev->pdev->dev,
8045 "loop_mode %d is not supported\n", loop_mode);
8052 ret = hclge_tqp_enable(handle, en);
8054 dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
8055 en ? "enable" : "disable", ret);
8060 static int hclge_set_default_loopback(struct hclge_dev *hdev)
8064 ret = hclge_set_app_loopback(hdev, false);
8068 ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
8072 return hclge_cfg_common_loopback(hdev, false,
8073 HNAE3_LOOP_PARALLEL_SERDES);
8076 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
8078 struct hclge_vport *vport = hclge_get_vport(handle);
8079 struct hnae3_knic_private_info *kinfo;
8080 struct hnae3_queue *queue;
8081 struct hclge_tqp *tqp;
8084 kinfo = &vport->nic.kinfo;
8085 for (i = 0; i < kinfo->num_tqps; i++) {
8086 queue = handle->kinfo.tqp[i];
8087 tqp = container_of(queue, struct hclge_tqp, q);
8088 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
8092 static void hclge_flush_link_update(struct hclge_dev *hdev)
8094 #define HCLGE_FLUSH_LINK_TIMEOUT 100000
8096 unsigned long last = hdev->serv_processed_cnt;
8099 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
8100 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
8101 last == hdev->serv_processed_cnt)
8105 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
8107 struct hclge_vport *vport = hclge_get_vport(handle);
8108 struct hclge_dev *hdev = vport->back;
8111 hclge_task_schedule(hdev, 0);
8113 /* Set the DOWN flag here to disable link updating */
8114 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8116 /* flush memory to make sure DOWN is seen by service task */
8117 smp_mb__before_atomic();
8118 hclge_flush_link_update(hdev);
8122 static int hclge_ae_start(struct hnae3_handle *handle)
8124 struct hclge_vport *vport = hclge_get_vport(handle);
8125 struct hclge_dev *hdev = vport->back;
8128 hclge_cfg_mac_mode(hdev, true);
8129 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
8130 hdev->hw.mac.link = 0;
8132 /* reset tqp stats */
8133 hclge_reset_tqp_stats(handle);
8135 hclge_mac_start_phy(hdev);
8140 static void hclge_ae_stop(struct hnae3_handle *handle)
8142 struct hclge_vport *vport = hclge_get_vport(handle);
8143 struct hclge_dev *hdev = vport->back;
8145 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8146 spin_lock_bh(&hdev->fd_rule_lock);
8147 hclge_clear_arfs_rules(hdev);
8148 spin_unlock_bh(&hdev->fd_rule_lock);
8150 /* If it is not PF reset or FLR, the firmware will disable the MAC,
8151 * so it only need to stop phy here.
8153 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
8154 hdev->reset_type != HNAE3_FUNC_RESET &&
8155 hdev->reset_type != HNAE3_FLR_RESET) {
8156 hclge_mac_stop_phy(hdev);
8157 hclge_update_link_status(hdev);
8161 hclge_reset_tqp(handle);
8163 hclge_config_mac_tnl_int(hdev, false);
8166 hclge_cfg_mac_mode(hdev, false);
8168 hclge_mac_stop_phy(hdev);
8170 /* reset tqp stats */
8171 hclge_reset_tqp_stats(handle);
8172 hclge_update_link_status(hdev);
8175 int hclge_vport_start(struct hclge_vport *vport)
8177 struct hclge_dev *hdev = vport->back;
8179 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8180 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
8181 vport->last_active_jiffies = jiffies;
8183 if (test_bit(vport->vport_id, hdev->vport_config_block)) {
8184 if (vport->vport_id) {
8185 hclge_restore_mac_table_common(vport);
8186 hclge_restore_vport_vlan_table(vport);
8188 hclge_restore_hw_table(hdev);
8192 clear_bit(vport->vport_id, hdev->vport_config_block);
8197 void hclge_vport_stop(struct hclge_vport *vport)
8199 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8202 static int hclge_client_start(struct hnae3_handle *handle)
8204 struct hclge_vport *vport = hclge_get_vport(handle);
8206 return hclge_vport_start(vport);
8209 static void hclge_client_stop(struct hnae3_handle *handle)
8211 struct hclge_vport *vport = hclge_get_vport(handle);
8213 hclge_vport_stop(vport);
8216 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
8217 u16 cmdq_resp, u8 resp_code,
8218 enum hclge_mac_vlan_tbl_opcode op)
8220 struct hclge_dev *hdev = vport->back;
8223 dev_err(&hdev->pdev->dev,
8224 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8229 if (op == HCLGE_MAC_VLAN_ADD) {
8230 if (!resp_code || resp_code == 1)
8232 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
8233 resp_code == HCLGE_ADD_MC_OVERFLOW)
8236 dev_err(&hdev->pdev->dev,
8237 "add mac addr failed for undefined, code=%u.\n",
8240 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
8243 } else if (resp_code == 1) {
8244 dev_dbg(&hdev->pdev->dev,
8245 "remove mac addr failed for miss.\n");
8249 dev_err(&hdev->pdev->dev,
8250 "remove mac addr failed for undefined, code=%u.\n",
8253 } else if (op == HCLGE_MAC_VLAN_LKUP) {
8256 } else if (resp_code == 1) {
8257 dev_dbg(&hdev->pdev->dev,
8258 "lookup mac addr failed for miss.\n");
8262 dev_err(&hdev->pdev->dev,
8263 "lookup mac addr failed for undefined, code=%u.\n",
8268 dev_err(&hdev->pdev->dev,
8269 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
8274 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
8276 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8278 unsigned int word_num;
8279 unsigned int bit_num;
8281 if (vfid > 255 || vfid < 0)
8284 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
8285 word_num = vfid / 32;
8286 bit_num = vfid % 32;
8288 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8290 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
8292 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
8293 bit_num = vfid % 32;
8295 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8297 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
8303 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
8305 #define HCLGE_DESC_NUMBER 3
8306 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8309 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
8310 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
8311 if (desc[i].data[j])
8317 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
8318 const u8 *addr, bool is_mc)
8320 const unsigned char *mac_addr = addr;
8321 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
8322 (mac_addr[0]) | (mac_addr[1] << 8);
8323 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
8325 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8327 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8328 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8331 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8332 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8335 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8336 struct hclge_mac_vlan_tbl_entry_cmd *req)
8338 struct hclge_dev *hdev = vport->back;
8339 struct hclge_desc desc;
8344 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8346 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8348 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8350 dev_err(&hdev->pdev->dev,
8351 "del mac addr failed for cmd_send, ret =%d.\n",
8355 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8356 retval = le16_to_cpu(desc.retval);
8358 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8359 HCLGE_MAC_VLAN_REMOVE);
8362 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8363 struct hclge_mac_vlan_tbl_entry_cmd *req,
8364 struct hclge_desc *desc,
8367 struct hclge_dev *hdev = vport->back;
8372 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8374 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8375 memcpy(desc[0].data,
8377 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8378 hclge_cmd_setup_basic_desc(&desc[1],
8379 HCLGE_OPC_MAC_VLAN_ADD,
8381 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8382 hclge_cmd_setup_basic_desc(&desc[2],
8383 HCLGE_OPC_MAC_VLAN_ADD,
8385 ret = hclge_cmd_send(&hdev->hw, desc, 3);
8387 memcpy(desc[0].data,
8389 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8390 ret = hclge_cmd_send(&hdev->hw, desc, 1);
8393 dev_err(&hdev->pdev->dev,
8394 "lookup mac addr failed for cmd_send, ret =%d.\n",
8398 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8399 retval = le16_to_cpu(desc[0].retval);
8401 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8402 HCLGE_MAC_VLAN_LKUP);
8405 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8406 struct hclge_mac_vlan_tbl_entry_cmd *req,
8407 struct hclge_desc *mc_desc)
8409 struct hclge_dev *hdev = vport->back;
8416 struct hclge_desc desc;
8418 hclge_cmd_setup_basic_desc(&desc,
8419 HCLGE_OPC_MAC_VLAN_ADD,
8421 memcpy(desc.data, req,
8422 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8423 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8424 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8425 retval = le16_to_cpu(desc.retval);
8427 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8429 HCLGE_MAC_VLAN_ADD);
8431 hclge_cmd_reuse_desc(&mc_desc[0], false);
8432 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8433 hclge_cmd_reuse_desc(&mc_desc[1], false);
8434 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8435 hclge_cmd_reuse_desc(&mc_desc[2], false);
8436 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
8437 memcpy(mc_desc[0].data, req,
8438 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8439 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8440 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8441 retval = le16_to_cpu(mc_desc[0].retval);
8443 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8445 HCLGE_MAC_VLAN_ADD);
8449 dev_err(&hdev->pdev->dev,
8450 "add mac addr failed for cmd_send, ret =%d.\n",
8458 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8459 u16 *allocated_size)
8461 struct hclge_umv_spc_alc_cmd *req;
8462 struct hclge_desc desc;
8465 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8466 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8468 req->space_size = cpu_to_le32(space_size);
8470 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8472 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8477 *allocated_size = le32_to_cpu(desc.data[1]);
8482 static int hclge_init_umv_space(struct hclge_dev *hdev)
8484 u16 allocated_size = 0;
8487 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8491 if (allocated_size < hdev->wanted_umv_size)
8492 dev_warn(&hdev->pdev->dev,
8493 "failed to alloc umv space, want %u, get %u\n",
8494 hdev->wanted_umv_size, allocated_size);
8496 hdev->max_umv_size = allocated_size;
8497 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8498 hdev->share_umv_size = hdev->priv_umv_size +
8499 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8504 static void hclge_reset_umv_space(struct hclge_dev *hdev)
8506 struct hclge_vport *vport;
8509 for (i = 0; i < hdev->num_alloc_vport; i++) {
8510 vport = &hdev->vport[i];
8511 vport->used_umv_num = 0;
8514 mutex_lock(&hdev->vport_lock);
8515 hdev->share_umv_size = hdev->priv_umv_size +
8516 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8517 mutex_unlock(&hdev->vport_lock);
8520 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8522 struct hclge_dev *hdev = vport->back;
8526 mutex_lock(&hdev->vport_lock);
8528 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8529 hdev->share_umv_size == 0);
8532 mutex_unlock(&hdev->vport_lock);
8537 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8539 struct hclge_dev *hdev = vport->back;
8542 if (vport->used_umv_num > hdev->priv_umv_size)
8543 hdev->share_umv_size++;
8545 if (vport->used_umv_num > 0)
8546 vport->used_umv_num--;
8548 if (vport->used_umv_num >= hdev->priv_umv_size &&
8549 hdev->share_umv_size > 0)
8550 hdev->share_umv_size--;
8551 vport->used_umv_num++;
8555 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8558 struct hclge_mac_node *mac_node, *tmp;
8560 list_for_each_entry_safe(mac_node, tmp, list, node)
8561 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8567 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8568 enum HCLGE_MAC_NODE_STATE state)
8571 /* from set_rx_mode or tmp_add_list */
8572 case HCLGE_MAC_TO_ADD:
8573 if (mac_node->state == HCLGE_MAC_TO_DEL)
8574 mac_node->state = HCLGE_MAC_ACTIVE;
8576 /* only from set_rx_mode */
8577 case HCLGE_MAC_TO_DEL:
8578 if (mac_node->state == HCLGE_MAC_TO_ADD) {
8579 list_del(&mac_node->node);
8582 mac_node->state = HCLGE_MAC_TO_DEL;
8585 /* only from tmp_add_list, the mac_node->state won't be
8588 case HCLGE_MAC_ACTIVE:
8589 if (mac_node->state == HCLGE_MAC_TO_ADD)
8590 mac_node->state = HCLGE_MAC_ACTIVE;
8596 int hclge_update_mac_list(struct hclge_vport *vport,
8597 enum HCLGE_MAC_NODE_STATE state,
8598 enum HCLGE_MAC_ADDR_TYPE mac_type,
8599 const unsigned char *addr)
8601 struct hclge_dev *hdev = vport->back;
8602 struct hclge_mac_node *mac_node;
8603 struct list_head *list;
8605 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8606 &vport->uc_mac_list : &vport->mc_mac_list;
8608 spin_lock_bh(&vport->mac_list_lock);
8610 /* if the mac addr is already in the mac list, no need to add a new
8611 * one into it, just check the mac addr state, convert it to a new
8612 * state, or just remove it, or do nothing.
8614 mac_node = hclge_find_mac_node(list, addr);
8616 hclge_update_mac_node(mac_node, state);
8617 spin_unlock_bh(&vport->mac_list_lock);
8618 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8622 /* if this address is never added, unnecessary to delete */
8623 if (state == HCLGE_MAC_TO_DEL) {
8624 spin_unlock_bh(&vport->mac_list_lock);
8625 dev_err(&hdev->pdev->dev,
8626 "failed to delete address %pM from mac list\n",
8631 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8633 spin_unlock_bh(&vport->mac_list_lock);
8637 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8639 mac_node->state = state;
8640 ether_addr_copy(mac_node->mac_addr, addr);
8641 list_add_tail(&mac_node->node, list);
8643 spin_unlock_bh(&vport->mac_list_lock);
8648 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8649 const unsigned char *addr)
8651 struct hclge_vport *vport = hclge_get_vport(handle);
8653 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8657 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8658 const unsigned char *addr)
8660 struct hclge_dev *hdev = vport->back;
8661 struct hclge_mac_vlan_tbl_entry_cmd req;
8662 struct hclge_desc desc;
8663 u16 egress_port = 0;
8666 /* mac addr check */
8667 if (is_zero_ether_addr(addr) ||
8668 is_broadcast_ether_addr(addr) ||
8669 is_multicast_ether_addr(addr)) {
8670 dev_err(&hdev->pdev->dev,
8671 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
8672 addr, is_zero_ether_addr(addr),
8673 is_broadcast_ether_addr(addr),
8674 is_multicast_ether_addr(addr));
8678 memset(&req, 0, sizeof(req));
8680 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8681 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8683 req.egress_port = cpu_to_le16(egress_port);
8685 hclge_prepare_mac_addr(&req, addr, false);
8687 /* Lookup the mac address in the mac_vlan table, and add
8688 * it if the entry is inexistent. Repeated unicast entry
8689 * is not allowed in the mac vlan table.
8691 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8692 if (ret == -ENOENT) {
8693 mutex_lock(&hdev->vport_lock);
8694 if (!hclge_is_umv_space_full(vport, false)) {
8695 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8697 hclge_update_umv_space(vport, false);
8698 mutex_unlock(&hdev->vport_lock);
8701 mutex_unlock(&hdev->vport_lock);
8703 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8704 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8705 hdev->priv_umv_size);
8710 /* check if we just hit the duplicate */
8717 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8718 const unsigned char *addr)
8720 struct hclge_vport *vport = hclge_get_vport(handle);
8722 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8726 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8727 const unsigned char *addr)
8729 struct hclge_dev *hdev = vport->back;
8730 struct hclge_mac_vlan_tbl_entry_cmd req;
8733 /* mac addr check */
8734 if (is_zero_ether_addr(addr) ||
8735 is_broadcast_ether_addr(addr) ||
8736 is_multicast_ether_addr(addr)) {
8737 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8742 memset(&req, 0, sizeof(req));
8743 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8744 hclge_prepare_mac_addr(&req, addr, false);
8745 ret = hclge_remove_mac_vlan_tbl(vport, &req);
8747 mutex_lock(&hdev->vport_lock);
8748 hclge_update_umv_space(vport, true);
8749 mutex_unlock(&hdev->vport_lock);
8750 } else if (ret == -ENOENT) {
8757 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8758 const unsigned char *addr)
8760 struct hclge_vport *vport = hclge_get_vport(handle);
8762 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8766 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8767 const unsigned char *addr)
8769 struct hclge_dev *hdev = vport->back;
8770 struct hclge_mac_vlan_tbl_entry_cmd req;
8771 struct hclge_desc desc[3];
8774 /* mac addr check */
8775 if (!is_multicast_ether_addr(addr)) {
8776 dev_err(&hdev->pdev->dev,
8777 "Add mc mac err! invalid mac:%pM.\n",
8781 memset(&req, 0, sizeof(req));
8782 hclge_prepare_mac_addr(&req, addr, true);
8783 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8785 /* This mac addr do not exist, add new entry for it */
8786 memset(desc[0].data, 0, sizeof(desc[0].data));
8787 memset(desc[1].data, 0, sizeof(desc[0].data));
8788 memset(desc[2].data, 0, sizeof(desc[0].data));
8790 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8793 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8794 /* if already overflow, not to print each time */
8795 if (status == -ENOSPC &&
8796 !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8797 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8802 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8803 const unsigned char *addr)
8805 struct hclge_vport *vport = hclge_get_vport(handle);
8807 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8811 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8812 const unsigned char *addr)
8814 struct hclge_dev *hdev = vport->back;
8815 struct hclge_mac_vlan_tbl_entry_cmd req;
8816 enum hclge_cmd_status status;
8817 struct hclge_desc desc[3];
8819 /* mac addr check */
8820 if (!is_multicast_ether_addr(addr)) {
8821 dev_dbg(&hdev->pdev->dev,
8822 "Remove mc mac err! invalid mac:%pM.\n",
8827 memset(&req, 0, sizeof(req));
8828 hclge_prepare_mac_addr(&req, addr, true);
8829 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8831 /* This mac addr exist, remove this handle's VFID for it */
8832 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8836 if (hclge_is_all_function_id_zero(desc))
8837 /* All the vfid is zero, so need to delete this entry */
8838 status = hclge_remove_mac_vlan_tbl(vport, &req);
8840 /* Not all the vfid is zero, update the vfid */
8841 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8842 } else if (status == -ENOENT) {
8849 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8850 struct list_head *list,
8851 int (*sync)(struct hclge_vport *,
8852 const unsigned char *))
8854 struct hclge_mac_node *mac_node, *tmp;
8857 list_for_each_entry_safe(mac_node, tmp, list, node) {
8858 ret = sync(vport, mac_node->mac_addr);
8860 mac_node->state = HCLGE_MAC_ACTIVE;
8862 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8865 /* If one unicast mac address is existing in hardware,
8866 * we need to try whether other unicast mac addresses
8867 * are new addresses that can be added.
8875 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8876 struct list_head *list,
8877 int (*unsync)(struct hclge_vport *,
8878 const unsigned char *))
8880 struct hclge_mac_node *mac_node, *tmp;
8883 list_for_each_entry_safe(mac_node, tmp, list, node) {
8884 ret = unsync(vport, mac_node->mac_addr);
8885 if (!ret || ret == -ENOENT) {
8886 list_del(&mac_node->node);
8889 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8896 static bool hclge_sync_from_add_list(struct list_head *add_list,
8897 struct list_head *mac_list)
8899 struct hclge_mac_node *mac_node, *tmp, *new_node;
8900 bool all_added = true;
8902 list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8903 if (mac_node->state == HCLGE_MAC_TO_ADD)
8906 /* if the mac address from tmp_add_list is not in the
8907 * uc/mc_mac_list, it means have received a TO_DEL request
8908 * during the time window of adding the mac address into mac
8909 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8910 * then it will be removed at next time. else it must be TO_ADD,
8911 * this address hasn't been added into mac table,
8912 * so just remove the mac node.
8914 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8916 hclge_update_mac_node(new_node, mac_node->state);
8917 list_del(&mac_node->node);
8919 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8920 mac_node->state = HCLGE_MAC_TO_DEL;
8921 list_move_tail(&mac_node->node, mac_list);
8923 list_del(&mac_node->node);
8931 static void hclge_sync_from_del_list(struct list_head *del_list,
8932 struct list_head *mac_list)
8934 struct hclge_mac_node *mac_node, *tmp, *new_node;
8936 list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8937 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8939 /* If the mac addr exists in the mac list, it means
8940 * received a new TO_ADD request during the time window
8941 * of configuring the mac address. For the mac node
8942 * state is TO_ADD, and the address is already in the
8943 * in the hardware(due to delete fail), so we just need
8944 * to change the mac node state to ACTIVE.
8946 new_node->state = HCLGE_MAC_ACTIVE;
8947 list_del(&mac_node->node);
8950 list_move_tail(&mac_node->node, mac_list);
8955 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8956 enum HCLGE_MAC_ADDR_TYPE mac_type,
8959 if (mac_type == HCLGE_MAC_ADDR_UC) {
8961 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8963 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8966 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8968 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8972 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8973 enum HCLGE_MAC_ADDR_TYPE mac_type)
8975 struct hclge_mac_node *mac_node, *tmp, *new_node;
8976 struct list_head tmp_add_list, tmp_del_list;
8977 struct list_head *list;
8980 INIT_LIST_HEAD(&tmp_add_list);
8981 INIT_LIST_HEAD(&tmp_del_list);
8983 /* move the mac addr to the tmp_add_list and tmp_del_list, then
8984 * we can add/delete these mac addr outside the spin lock
8986 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8987 &vport->uc_mac_list : &vport->mc_mac_list;
8989 spin_lock_bh(&vport->mac_list_lock);
8991 list_for_each_entry_safe(mac_node, tmp, list, node) {
8992 switch (mac_node->state) {
8993 case HCLGE_MAC_TO_DEL:
8994 list_move_tail(&mac_node->node, &tmp_del_list);
8996 case HCLGE_MAC_TO_ADD:
8997 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9000 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
9001 new_node->state = mac_node->state;
9002 list_add_tail(&new_node->node, &tmp_add_list);
9010 spin_unlock_bh(&vport->mac_list_lock);
9012 /* delete first, in order to get max mac table space for adding */
9013 if (mac_type == HCLGE_MAC_ADDR_UC) {
9014 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9015 hclge_rm_uc_addr_common);
9016 hclge_sync_vport_mac_list(vport, &tmp_add_list,
9017 hclge_add_uc_addr_common);
9019 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9020 hclge_rm_mc_addr_common);
9021 hclge_sync_vport_mac_list(vport, &tmp_add_list,
9022 hclge_add_mc_addr_common);
9025 /* if some mac addresses were added/deleted fail, move back to the
9026 * mac_list, and retry at next time.
9028 spin_lock_bh(&vport->mac_list_lock);
9030 hclge_sync_from_del_list(&tmp_del_list, list);
9031 all_added = hclge_sync_from_add_list(&tmp_add_list, list);
9033 spin_unlock_bh(&vport->mac_list_lock);
9035 hclge_update_overflow_flags(vport, mac_type, all_added);
9038 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
9040 struct hclge_dev *hdev = vport->back;
9042 if (test_bit(vport->vport_id, hdev->vport_config_block))
9045 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
9051 static void hclge_sync_mac_table(struct hclge_dev *hdev)
9055 for (i = 0; i < hdev->num_alloc_vport; i++) {
9056 struct hclge_vport *vport = &hdev->vport[i];
9058 if (!hclge_need_sync_mac_table(vport))
9061 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
9062 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
9066 static void hclge_build_del_list(struct list_head *list,
9068 struct list_head *tmp_del_list)
9070 struct hclge_mac_node *mac_cfg, *tmp;
9072 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
9073 switch (mac_cfg->state) {
9074 case HCLGE_MAC_TO_DEL:
9075 case HCLGE_MAC_ACTIVE:
9076 list_move_tail(&mac_cfg->node, tmp_del_list);
9078 case HCLGE_MAC_TO_ADD:
9080 list_del(&mac_cfg->node);
9088 static void hclge_unsync_del_list(struct hclge_vport *vport,
9089 int (*unsync)(struct hclge_vport *vport,
9090 const unsigned char *addr),
9092 struct list_head *tmp_del_list)
9094 struct hclge_mac_node *mac_cfg, *tmp;
9097 list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
9098 ret = unsync(vport, mac_cfg->mac_addr);
9099 if (!ret || ret == -ENOENT) {
9100 /* clear all mac addr from hardware, but remain these
9101 * mac addr in the mac list, and restore them after
9102 * vf reset finished.
9105 mac_cfg->state == HCLGE_MAC_ACTIVE) {
9106 mac_cfg->state = HCLGE_MAC_TO_ADD;
9108 list_del(&mac_cfg->node);
9111 } else if (is_del_list) {
9112 mac_cfg->state = HCLGE_MAC_TO_DEL;
9117 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
9118 enum HCLGE_MAC_ADDR_TYPE mac_type)
9120 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
9121 struct hclge_dev *hdev = vport->back;
9122 struct list_head tmp_del_list, *list;
9124 if (mac_type == HCLGE_MAC_ADDR_UC) {
9125 list = &vport->uc_mac_list;
9126 unsync = hclge_rm_uc_addr_common;
9128 list = &vport->mc_mac_list;
9129 unsync = hclge_rm_mc_addr_common;
9132 INIT_LIST_HEAD(&tmp_del_list);
9135 set_bit(vport->vport_id, hdev->vport_config_block);
9137 spin_lock_bh(&vport->mac_list_lock);
9139 hclge_build_del_list(list, is_del_list, &tmp_del_list);
9141 spin_unlock_bh(&vport->mac_list_lock);
9143 hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
9145 spin_lock_bh(&vport->mac_list_lock);
9147 hclge_sync_from_del_list(&tmp_del_list, list);
9149 spin_unlock_bh(&vport->mac_list_lock);
9152 /* remove all mac address when uninitailize */
9153 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
9154 enum HCLGE_MAC_ADDR_TYPE mac_type)
9156 struct hclge_mac_node *mac_node, *tmp;
9157 struct hclge_dev *hdev = vport->back;
9158 struct list_head tmp_del_list, *list;
9160 INIT_LIST_HEAD(&tmp_del_list);
9162 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9163 &vport->uc_mac_list : &vport->mc_mac_list;
9165 spin_lock_bh(&vport->mac_list_lock);
9167 list_for_each_entry_safe(mac_node, tmp, list, node) {
9168 switch (mac_node->state) {
9169 case HCLGE_MAC_TO_DEL:
9170 case HCLGE_MAC_ACTIVE:
9171 list_move_tail(&mac_node->node, &tmp_del_list);
9173 case HCLGE_MAC_TO_ADD:
9174 list_del(&mac_node->node);
9180 spin_unlock_bh(&vport->mac_list_lock);
9182 if (mac_type == HCLGE_MAC_ADDR_UC)
9183 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9184 hclge_rm_uc_addr_common);
9186 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9187 hclge_rm_mc_addr_common);
9189 if (!list_empty(&tmp_del_list))
9190 dev_warn(&hdev->pdev->dev,
9191 "uninit %s mac list for vport %u not completely.\n",
9192 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
9195 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
9196 list_del(&mac_node->node);
9201 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
9203 struct hclge_vport *vport;
9206 for (i = 0; i < hdev->num_alloc_vport; i++) {
9207 vport = &hdev->vport[i];
9208 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
9209 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
9213 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
9214 u16 cmdq_resp, u8 resp_code)
9216 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
9217 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
9218 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
9219 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
9224 dev_err(&hdev->pdev->dev,
9225 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9230 switch (resp_code) {
9231 case HCLGE_ETHERTYPE_SUCCESS_ADD:
9232 case HCLGE_ETHERTYPE_ALREADY_ADD:
9235 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
9236 dev_err(&hdev->pdev->dev,
9237 "add mac ethertype failed for manager table overflow.\n");
9238 return_status = -EIO;
9240 case HCLGE_ETHERTYPE_KEY_CONFLICT:
9241 dev_err(&hdev->pdev->dev,
9242 "add mac ethertype failed for key conflict.\n");
9243 return_status = -EIO;
9246 dev_err(&hdev->pdev->dev,
9247 "add mac ethertype failed for undefined, code=%u.\n",
9249 return_status = -EIO;
9252 return return_status;
9255 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
9258 struct hclge_mac_vlan_tbl_entry_cmd req;
9259 struct hclge_dev *hdev = vport->back;
9260 struct hclge_desc desc;
9261 u16 egress_port = 0;
9264 if (is_zero_ether_addr(mac_addr))
9267 memset(&req, 0, sizeof(req));
9268 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
9269 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
9270 req.egress_port = cpu_to_le16(egress_port);
9271 hclge_prepare_mac_addr(&req, mac_addr, false);
9273 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
9276 vf_idx += HCLGE_VF_VPORT_START_NUM;
9277 for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++)
9279 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
9285 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
9288 struct hclge_vport *vport = hclge_get_vport(handle);
9289 struct hclge_dev *hdev = vport->back;
9291 vport = hclge_get_vf_vport(hdev, vf);
9295 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
9296 dev_info(&hdev->pdev->dev,
9297 "Specified MAC(=%pM) is same as before, no change committed!\n",
9302 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
9303 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
9308 ether_addr_copy(vport->vf_info.mac, mac_addr);
9310 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9311 dev_info(&hdev->pdev->dev,
9312 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
9314 return hclge_inform_reset_assert_to_vf(vport);
9317 dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
9322 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9323 const struct hclge_mac_mgr_tbl_entry_cmd *req)
9325 struct hclge_desc desc;
9330 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9331 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9333 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9335 dev_err(&hdev->pdev->dev,
9336 "add mac ethertype failed for cmd_send, ret =%d.\n",
9341 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9342 retval = le16_to_cpu(desc.retval);
9344 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9347 static int init_mgr_tbl(struct hclge_dev *hdev)
9352 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9353 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9355 dev_err(&hdev->pdev->dev,
9356 "add mac ethertype failed, ret =%d.\n",
9365 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9367 struct hclge_vport *vport = hclge_get_vport(handle);
9368 struct hclge_dev *hdev = vport->back;
9370 ether_addr_copy(p, hdev->hw.mac.mac_addr);
9373 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9374 const u8 *old_addr, const u8 *new_addr)
9376 struct list_head *list = &vport->uc_mac_list;
9377 struct hclge_mac_node *old_node, *new_node;
9379 new_node = hclge_find_mac_node(list, new_addr);
9381 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9385 new_node->state = HCLGE_MAC_TO_ADD;
9386 ether_addr_copy(new_node->mac_addr, new_addr);
9387 list_add(&new_node->node, list);
9389 if (new_node->state == HCLGE_MAC_TO_DEL)
9390 new_node->state = HCLGE_MAC_ACTIVE;
9392 /* make sure the new addr is in the list head, avoid dev
9393 * addr may be not re-added into mac table for the umv space
9394 * limitation after global/imp reset which will clear mac
9395 * table by hardware.
9397 list_move(&new_node->node, list);
9400 if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9401 old_node = hclge_find_mac_node(list, old_addr);
9403 if (old_node->state == HCLGE_MAC_TO_ADD) {
9404 list_del(&old_node->node);
9407 old_node->state = HCLGE_MAC_TO_DEL;
9412 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9417 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
9420 const unsigned char *new_addr = (const unsigned char *)p;
9421 struct hclge_vport *vport = hclge_get_vport(handle);
9422 struct hclge_dev *hdev = vport->back;
9423 unsigned char *old_addr = NULL;
9426 /* mac addr check */
9427 if (is_zero_ether_addr(new_addr) ||
9428 is_broadcast_ether_addr(new_addr) ||
9429 is_multicast_ether_addr(new_addr)) {
9430 dev_err(&hdev->pdev->dev,
9431 "change uc mac err! invalid mac: %pM.\n",
9436 ret = hclge_pause_addr_cfg(hdev, new_addr);
9438 dev_err(&hdev->pdev->dev,
9439 "failed to configure mac pause address, ret = %d\n",
9445 old_addr = hdev->hw.mac.mac_addr;
9447 spin_lock_bh(&vport->mac_list_lock);
9448 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9450 dev_err(&hdev->pdev->dev,
9451 "failed to change the mac addr:%pM, ret = %d\n",
9453 spin_unlock_bh(&vport->mac_list_lock);
9456 hclge_pause_addr_cfg(hdev, old_addr);
9460 /* we must update dev addr with spin lock protect, preventing dev addr
9461 * being removed by set_rx_mode path.
9463 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9464 spin_unlock_bh(&vport->mac_list_lock);
9466 hclge_task_schedule(hdev, 0);
9471 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9473 struct mii_ioctl_data *data = if_mii(ifr);
9475 if (!hnae3_dev_phy_imp_supported(hdev))
9480 data->phy_id = hdev->hw.mac.phy_addr;
9481 /* this command reads phy id and register at the same time */
9484 data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9488 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9494 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9497 struct hclge_vport *vport = hclge_get_vport(handle);
9498 struct hclge_dev *hdev = vport->back;
9502 return hclge_ptp_get_cfg(hdev, ifr);
9504 return hclge_ptp_set_cfg(hdev, ifr);
9506 if (!hdev->hw.mac.phydev)
9507 return hclge_mii_ioctl(hdev, ifr, cmd);
9510 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9513 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id,
9516 struct hclge_port_vlan_filter_bypass_cmd *req;
9517 struct hclge_desc desc;
9520 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false);
9521 req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
9523 hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B,
9526 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9528 dev_err(&hdev->pdev->dev,
9529 "failed to set vport%u port vlan filter bypass state, ret = %d.\n",
9535 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9536 u8 fe_type, bool filter_en, u8 vf_id)
9538 struct hclge_vlan_filter_ctrl_cmd *req;
9539 struct hclge_desc desc;
9542 /* read current vlan filter parameter */
9543 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9544 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9545 req->vlan_type = vlan_type;
9548 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9550 dev_err(&hdev->pdev->dev,
9551 "failed to get vlan filter config, ret = %d.\n", ret);
9555 /* modify and write new config parameter */
9556 hclge_cmd_reuse_desc(&desc, false);
9557 req->vlan_fe = filter_en ?
9558 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9560 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9562 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
9568 static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
9570 struct hclge_dev *hdev = vport->back;
9571 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
9574 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9575 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9576 HCLGE_FILTER_FE_EGRESS_V1_B,
9577 enable, vport->vport_id);
9579 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9580 HCLGE_FILTER_FE_EGRESS, enable,
9585 if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) {
9586 ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
9588 } else if (!vport->vport_id) {
9589 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
9592 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9593 HCLGE_FILTER_FE_INGRESS,
9600 static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
9602 struct hnae3_handle *handle = &vport->nic;
9603 struct hclge_vport_vlan_cfg *vlan, *tmp;
9604 struct hclge_dev *hdev = vport->back;
9606 if (vport->vport_id) {
9607 if (vport->port_base_vlan_cfg.state !=
9608 HNAE3_PORT_BASE_VLAN_DISABLE)
9611 if (vport->vf_info.trusted && vport->vf_info.request_uc_en)
9613 } else if (handle->netdev_flags & HNAE3_USER_UPE) {
9617 if (!vport->req_vlan_fltr_en)
9620 /* compatible with former device, always enable vlan filter */
9621 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
9624 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
9625 if (vlan->vlan_id != 0)
9631 int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
9633 struct hclge_dev *hdev = vport->back;
9637 mutex_lock(&hdev->vport_lock);
9639 vport->req_vlan_fltr_en = request_en;
9641 need_en = hclge_need_enable_vport_vlan_filter(vport);
9642 if (need_en == vport->cur_vlan_fltr_en) {
9643 mutex_unlock(&hdev->vport_lock);
9647 ret = hclge_set_vport_vlan_filter(vport, need_en);
9649 mutex_unlock(&hdev->vport_lock);
9653 vport->cur_vlan_fltr_en = need_en;
9655 mutex_unlock(&hdev->vport_lock);
9660 static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9662 struct hclge_vport *vport = hclge_get_vport(handle);
9664 return hclge_enable_vport_vlan_filter(vport, enable);
9667 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9668 bool is_kill, u16 vlan,
9669 struct hclge_desc *desc)
9671 struct hclge_vlan_filter_vf_cfg_cmd *req0;
9672 struct hclge_vlan_filter_vf_cfg_cmd *req1;
9677 hclge_cmd_setup_basic_desc(&desc[0],
9678 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9679 hclge_cmd_setup_basic_desc(&desc[1],
9680 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9682 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9684 vf_byte_off = vfid / 8;
9685 vf_byte_val = 1 << (vfid % 8);
9687 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9688 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9690 req0->vlan_id = cpu_to_le16(vlan);
9691 req0->vlan_cfg = is_kill;
9693 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9694 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9696 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9698 ret = hclge_cmd_send(&hdev->hw, desc, 2);
9700 dev_err(&hdev->pdev->dev,
9701 "Send vf vlan command fail, ret =%d.\n",
9709 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9710 bool is_kill, struct hclge_desc *desc)
9712 struct hclge_vlan_filter_vf_cfg_cmd *req;
9714 req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9717 #define HCLGE_VF_VLAN_NO_ENTRY 2
9718 if (!req->resp_code || req->resp_code == 1)
9721 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9722 set_bit(vfid, hdev->vf_vlan_full);
9723 dev_warn(&hdev->pdev->dev,
9724 "vf vlan table is full, vf vlan filter is disabled\n");
9728 dev_err(&hdev->pdev->dev,
9729 "Add vf vlan filter fail, ret =%u.\n",
9732 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
9733 if (!req->resp_code)
9736 /* vf vlan filter is disabled when vf vlan table is full,
9737 * then new vlan id will not be added into vf vlan table.
9738 * Just return 0 without warning, avoid massive verbose
9739 * print logs when unload.
9741 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9744 dev_err(&hdev->pdev->dev,
9745 "Kill vf vlan filter fail, ret =%u.\n",
9752 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9753 bool is_kill, u16 vlan)
9755 struct hclge_vport *vport = &hdev->vport[vfid];
9756 struct hclge_desc desc[2];
9759 /* if vf vlan table is full, firmware will close vf vlan filter, it
9760 * is unable and unnecessary to add new vlan id to vf vlan filter.
9761 * If spoof check is enable, and vf vlan is full, it shouldn't add
9762 * new vlan, because tx packets with these vlan id will be dropped.
9764 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9765 if (vport->vf_info.spoofchk && vlan) {
9766 dev_err(&hdev->pdev->dev,
9767 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
9773 ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9777 return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9780 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9781 u16 vlan_id, bool is_kill)
9783 struct hclge_vlan_filter_pf_cfg_cmd *req;
9784 struct hclge_desc desc;
9785 u8 vlan_offset_byte_val;
9786 u8 vlan_offset_byte;
9790 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9792 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9793 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9794 HCLGE_VLAN_BYTE_SIZE;
9795 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9797 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9798 req->vlan_offset = vlan_offset_160;
9799 req->vlan_cfg = is_kill;
9800 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9802 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9804 dev_err(&hdev->pdev->dev,
9805 "port vlan command, send fail, ret =%d.\n", ret);
9809 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9810 u16 vport_id, u16 vlan_id,
9813 u16 vport_idx, vport_num = 0;
9816 if (is_kill && !vlan_id)
9819 if (vlan_id >= VLAN_N_VID)
9822 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9824 dev_err(&hdev->pdev->dev,
9825 "Set %u vport vlan filter config fail, ret =%d.\n",
9830 /* vlan 0 may be added twice when 8021q module is enabled */
9831 if (!is_kill && !vlan_id &&
9832 test_bit(vport_id, hdev->vlan_table[vlan_id]))
9835 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9836 dev_err(&hdev->pdev->dev,
9837 "Add port vlan failed, vport %u is already in vlan %u\n",
9843 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9844 dev_err(&hdev->pdev->dev,
9845 "Delete port vlan failed, vport %u is not in vlan %u\n",
9850 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9853 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9854 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9860 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9862 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9863 struct hclge_vport_vtag_tx_cfg_cmd *req;
9864 struct hclge_dev *hdev = vport->back;
9865 struct hclge_desc desc;
9869 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9871 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9872 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9873 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9874 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9875 vcfg->accept_tag1 ? 1 : 0);
9876 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9877 vcfg->accept_untag1 ? 1 : 0);
9878 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9879 vcfg->accept_tag2 ? 1 : 0);
9880 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9881 vcfg->accept_untag2 ? 1 : 0);
9882 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9883 vcfg->insert_tag1_en ? 1 : 0);
9884 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9885 vcfg->insert_tag2_en ? 1 : 0);
9886 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9887 vcfg->tag_shift_mode_en ? 1 : 0);
9888 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9890 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9891 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9892 HCLGE_VF_NUM_PER_BYTE;
9893 req->vf_bitmap[bmap_index] =
9894 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9896 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9898 dev_err(&hdev->pdev->dev,
9899 "Send port txvlan cfg command fail, ret =%d\n",
9905 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9907 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9908 struct hclge_vport_vtag_rx_cfg_cmd *req;
9909 struct hclge_dev *hdev = vport->back;
9910 struct hclge_desc desc;
9914 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9916 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9917 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9918 vcfg->strip_tag1_en ? 1 : 0);
9919 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9920 vcfg->strip_tag2_en ? 1 : 0);
9921 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9922 vcfg->vlan1_vlan_prionly ? 1 : 0);
9923 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9924 vcfg->vlan2_vlan_prionly ? 1 : 0);
9925 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9926 vcfg->strip_tag1_discard_en ? 1 : 0);
9927 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9928 vcfg->strip_tag2_discard_en ? 1 : 0);
9930 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9931 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9932 HCLGE_VF_NUM_PER_BYTE;
9933 req->vf_bitmap[bmap_index] =
9934 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9936 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9938 dev_err(&hdev->pdev->dev,
9939 "Send port rxvlan cfg command fail, ret =%d\n",
9945 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9946 u16 port_base_vlan_state,
9947 u16 vlan_tag, u8 qos)
9951 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9952 vport->txvlan_cfg.accept_tag1 = true;
9953 vport->txvlan_cfg.insert_tag1_en = false;
9954 vport->txvlan_cfg.default_tag1 = 0;
9956 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9958 vport->txvlan_cfg.accept_tag1 =
9959 ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9960 vport->txvlan_cfg.insert_tag1_en = true;
9961 vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) |
9965 vport->txvlan_cfg.accept_untag1 = true;
9967 /* accept_tag2 and accept_untag2 are not supported on
9968 * pdev revision(0x20), new revision support them,
9969 * this two fields can not be configured by user.
9971 vport->txvlan_cfg.accept_tag2 = true;
9972 vport->txvlan_cfg.accept_untag2 = true;
9973 vport->txvlan_cfg.insert_tag2_en = false;
9974 vport->txvlan_cfg.default_tag2 = 0;
9975 vport->txvlan_cfg.tag_shift_mode_en = true;
9977 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9978 vport->rxvlan_cfg.strip_tag1_en = false;
9979 vport->rxvlan_cfg.strip_tag2_en =
9980 vport->rxvlan_cfg.rx_vlan_offload_en;
9981 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9983 vport->rxvlan_cfg.strip_tag1_en =
9984 vport->rxvlan_cfg.rx_vlan_offload_en;
9985 vport->rxvlan_cfg.strip_tag2_en = true;
9986 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9989 vport->rxvlan_cfg.strip_tag1_discard_en = false;
9990 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9991 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9993 ret = hclge_set_vlan_tx_offload_cfg(vport);
9997 return hclge_set_vlan_rx_offload_cfg(vport);
10000 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
10002 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
10003 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
10004 struct hclge_desc desc;
10007 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
10008 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
10009 rx_req->ot_fst_vlan_type =
10010 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
10011 rx_req->ot_sec_vlan_type =
10012 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
10013 rx_req->in_fst_vlan_type =
10014 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
10015 rx_req->in_sec_vlan_type =
10016 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
10018 status = hclge_cmd_send(&hdev->hw, &desc, 1);
10020 dev_err(&hdev->pdev->dev,
10021 "Send rxvlan protocol type command fail, ret =%d\n",
10026 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
10028 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
10029 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
10030 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
10032 status = hclge_cmd_send(&hdev->hw, &desc, 1);
10034 dev_err(&hdev->pdev->dev,
10035 "Send txvlan protocol type command fail, ret =%d\n",
10041 static int hclge_init_vlan_config(struct hclge_dev *hdev)
10043 #define HCLGE_DEF_VLAN_TYPE 0x8100
10045 struct hnae3_handle *handle = &hdev->vport[0].nic;
10046 struct hclge_vport *vport;
10050 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
10051 /* for revision 0x21, vf vlan filter is per function */
10052 for (i = 0; i < hdev->num_alloc_vport; i++) {
10053 vport = &hdev->vport[i];
10054 ret = hclge_set_vlan_filter_ctrl(hdev,
10055 HCLGE_FILTER_TYPE_VF,
10056 HCLGE_FILTER_FE_EGRESS,
10061 vport->cur_vlan_fltr_en = true;
10064 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
10065 HCLGE_FILTER_FE_INGRESS, true,
10070 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10071 HCLGE_FILTER_FE_EGRESS_V1_B,
10077 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10078 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10079 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10080 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10081 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
10082 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
10084 ret = hclge_set_vlan_protocol_type(hdev);
10088 for (i = 0; i < hdev->num_alloc_vport; i++) {
10092 vport = &hdev->vport[i];
10093 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10094 qos = vport->port_base_vlan_cfg.vlan_info.qos;
10096 ret = hclge_vlan_offload_cfg(vport,
10097 vport->port_base_vlan_cfg.state,
10103 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
10106 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10107 bool writen_to_tbl)
10109 struct hclge_vport_vlan_cfg *vlan, *tmp;
10111 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
10112 if (vlan->vlan_id == vlan_id)
10115 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
10119 vlan->hd_tbl_status = writen_to_tbl;
10120 vlan->vlan_id = vlan_id;
10122 list_add_tail(&vlan->node, &vport->vlan_list);
10125 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
10127 struct hclge_vport_vlan_cfg *vlan, *tmp;
10128 struct hclge_dev *hdev = vport->back;
10131 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10132 if (!vlan->hd_tbl_status) {
10133 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10135 vlan->vlan_id, false);
10137 dev_err(&hdev->pdev->dev,
10138 "restore vport vlan list failed, ret=%d\n",
10143 vlan->hd_tbl_status = true;
10149 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10152 struct hclge_vport_vlan_cfg *vlan, *tmp;
10153 struct hclge_dev *hdev = vport->back;
10155 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10156 if (vlan->vlan_id == vlan_id) {
10157 if (is_write_tbl && vlan->hd_tbl_status)
10158 hclge_set_vlan_filter_hw(hdev,
10159 htons(ETH_P_8021Q),
10164 list_del(&vlan->node);
10171 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
10173 struct hclge_vport_vlan_cfg *vlan, *tmp;
10174 struct hclge_dev *hdev = vport->back;
10176 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10177 if (vlan->hd_tbl_status)
10178 hclge_set_vlan_filter_hw(hdev,
10179 htons(ETH_P_8021Q),
10184 vlan->hd_tbl_status = false;
10186 list_del(&vlan->node);
10190 clear_bit(vport->vport_id, hdev->vf_vlan_full);
10193 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
10195 struct hclge_vport_vlan_cfg *vlan, *tmp;
10196 struct hclge_vport *vport;
10199 for (i = 0; i < hdev->num_alloc_vport; i++) {
10200 vport = &hdev->vport[i];
10201 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10202 list_del(&vlan->node);
10208 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
10210 struct hclge_vport_vlan_cfg *vlan, *tmp;
10211 struct hclge_dev *hdev = vport->back;
10217 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
10218 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10219 state = vport->port_base_vlan_cfg.state;
10221 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
10222 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
10223 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
10224 vport->vport_id, vlan_id,
10229 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10230 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10232 vlan->vlan_id, false);
10235 vlan->hd_tbl_status = true;
10239 /* For global reset and imp reset, hardware will clear the mac table,
10240 * so we change the mac address state from ACTIVE to TO_ADD, then they
10241 * can be restored in the service task after reset complete. Furtherly,
10242 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
10243 * be restored after reset, so just remove these mac nodes from mac_list.
10245 static void hclge_mac_node_convert_for_reset(struct list_head *list)
10247 struct hclge_mac_node *mac_node, *tmp;
10249 list_for_each_entry_safe(mac_node, tmp, list, node) {
10250 if (mac_node->state == HCLGE_MAC_ACTIVE) {
10251 mac_node->state = HCLGE_MAC_TO_ADD;
10252 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
10253 list_del(&mac_node->node);
10259 void hclge_restore_mac_table_common(struct hclge_vport *vport)
10261 spin_lock_bh(&vport->mac_list_lock);
10263 hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
10264 hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
10265 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
10267 spin_unlock_bh(&vport->mac_list_lock);
10270 static void hclge_restore_hw_table(struct hclge_dev *hdev)
10272 struct hclge_vport *vport = &hdev->vport[0];
10273 struct hnae3_handle *handle = &vport->nic;
10275 hclge_restore_mac_table_common(vport);
10276 hclge_restore_vport_vlan_table(vport);
10277 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10278 hclge_restore_fd_entries(handle);
10281 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10283 struct hclge_vport *vport = hclge_get_vport(handle);
10285 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10286 vport->rxvlan_cfg.strip_tag1_en = false;
10287 vport->rxvlan_cfg.strip_tag2_en = enable;
10288 vport->rxvlan_cfg.strip_tag2_discard_en = false;
10290 vport->rxvlan_cfg.strip_tag1_en = enable;
10291 vport->rxvlan_cfg.strip_tag2_en = true;
10292 vport->rxvlan_cfg.strip_tag2_discard_en = true;
10295 vport->rxvlan_cfg.strip_tag1_discard_en = false;
10296 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10297 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10298 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10300 return hclge_set_vlan_rx_offload_cfg(vport);
10303 static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport)
10305 struct hclge_dev *hdev = vport->back;
10307 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
10308 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state);
10311 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10312 u16 port_base_vlan_state,
10313 struct hclge_vlan_info *new_info,
10314 struct hclge_vlan_info *old_info)
10316 struct hclge_dev *hdev = vport->back;
10319 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10320 hclge_rm_vport_all_vlan_table(vport, false);
10321 /* force clear VLAN 0 */
10322 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0);
10325 return hclge_set_vlan_filter_hw(hdev,
10326 htons(new_info->vlan_proto),
10328 new_info->vlan_tag,
10332 /* force add VLAN 0 */
10333 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
10337 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10338 vport->vport_id, old_info->vlan_tag,
10343 return hclge_add_vport_all_vlan_table(vport);
10346 static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
10347 const struct hclge_vlan_info *old_cfg)
10349 if (new_cfg->vlan_tag != old_cfg->vlan_tag)
10352 if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0))
10358 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10359 struct hclge_vlan_info *vlan_info)
10361 struct hnae3_handle *nic = &vport->nic;
10362 struct hclge_vlan_info *old_vlan_info;
10363 struct hclge_dev *hdev = vport->back;
10366 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10368 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
10373 if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
10376 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
10377 /* add new VLAN tag */
10378 ret = hclge_set_vlan_filter_hw(hdev,
10379 htons(vlan_info->vlan_proto),
10381 vlan_info->vlan_tag,
10386 /* remove old VLAN tag */
10387 if (old_vlan_info->vlan_tag == 0)
10388 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
10391 ret = hclge_set_vlan_filter_hw(hdev,
10392 htons(ETH_P_8021Q),
10394 old_vlan_info->vlan_tag,
10397 dev_err(&hdev->pdev->dev,
10398 "failed to clear vport%u port base vlan %u, ret = %d.\n",
10399 vport->vport_id, old_vlan_info->vlan_tag, ret);
10406 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10412 vport->port_base_vlan_cfg.state = state;
10413 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10414 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10416 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10418 vport->port_base_vlan_cfg.vlan_info = *vlan_info;
10419 hclge_set_vport_vlan_fltr_change(vport);
10424 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10425 enum hnae3_port_base_vlan_state state,
10428 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10430 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10432 return HNAE3_PORT_BASE_VLAN_ENABLE;
10436 return HNAE3_PORT_BASE_VLAN_DISABLE;
10438 if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan &&
10439 vport->port_base_vlan_cfg.vlan_info.qos == qos)
10440 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10442 return HNAE3_PORT_BASE_VLAN_MODIFY;
10445 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10446 u16 vlan, u8 qos, __be16 proto)
10448 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10449 struct hclge_vport *vport = hclge_get_vport(handle);
10450 struct hclge_dev *hdev = vport->back;
10451 struct hclge_vlan_info vlan_info;
10455 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10456 return -EOPNOTSUPP;
10458 vport = hclge_get_vf_vport(hdev, vfid);
10462 /* qos is a 3 bits value, so can not be bigger than 7 */
10463 if (vlan > VLAN_N_VID - 1 || qos > 7)
10465 if (proto != htons(ETH_P_8021Q))
10466 return -EPROTONOSUPPORT;
10468 state = hclge_get_port_base_vlan_state(vport,
10469 vport->port_base_vlan_cfg.state,
10471 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10474 vlan_info.vlan_tag = vlan;
10475 vlan_info.qos = qos;
10476 vlan_info.vlan_proto = ntohs(proto);
10478 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10480 dev_err(&hdev->pdev->dev,
10481 "failed to update port base vlan for vf %d, ret = %d\n",
10486 /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10489 if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
10490 test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10491 hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10492 vport->vport_id, state,
10498 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10500 struct hclge_vlan_info *vlan_info;
10501 struct hclge_vport *vport;
10505 /* clear port base vlan for all vf */
10506 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10507 vport = &hdev->vport[vf];
10508 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10510 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10512 vlan_info->vlan_tag, true);
10514 dev_err(&hdev->pdev->dev,
10515 "failed to clear vf vlan for vf%d, ret = %d\n",
10516 vf - HCLGE_VF_VPORT_START_NUM, ret);
10520 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10521 u16 vlan_id, bool is_kill)
10523 struct hclge_vport *vport = hclge_get_vport(handle);
10524 struct hclge_dev *hdev = vport->back;
10525 bool writen_to_tbl = false;
10528 /* When device is resetting or reset failed, firmware is unable to
10529 * handle mailbox. Just record the vlan id, and remove it after
10532 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10533 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10534 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10538 /* when port base vlan enabled, we use port base vlan as the vlan
10539 * filter entry. In this case, we don't update vlan filter table
10540 * when user add new vlan or remove exist vlan, just update the vport
10541 * vlan list. The vlan id in vlan list will be writen in vlan filter
10542 * table until port base vlan disabled
10544 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10545 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10547 writen_to_tbl = true;
10552 hclge_rm_vport_vlan_table(vport, vlan_id, false);
10554 hclge_add_vport_vlan_table(vport, vlan_id,
10556 } else if (is_kill) {
10557 /* when remove hw vlan filter failed, record the vlan id,
10558 * and try to remove it from hw later, to be consistence
10561 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10564 hclge_set_vport_vlan_fltr_change(vport);
10569 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
10571 struct hclge_vport *vport;
10575 for (i = 0; i < hdev->num_alloc_vport; i++) {
10576 vport = &hdev->vport[i];
10577 if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10581 ret = hclge_enable_vport_vlan_filter(vport,
10582 vport->req_vlan_fltr_en);
10584 dev_err(&hdev->pdev->dev,
10585 "failed to sync vlan filter state for vport%u, ret = %d\n",
10586 vport->vport_id, ret);
10587 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10594 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10596 #define HCLGE_MAX_SYNC_COUNT 60
10598 int i, ret, sync_cnt = 0;
10601 /* start from vport 1 for PF is always alive */
10602 for (i = 0; i < hdev->num_alloc_vport; i++) {
10603 struct hclge_vport *vport = &hdev->vport[i];
10605 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10607 while (vlan_id != VLAN_N_VID) {
10608 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10609 vport->vport_id, vlan_id,
10611 if (ret && ret != -EINVAL)
10614 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10615 hclge_rm_vport_vlan_table(vport, vlan_id, false);
10616 hclge_set_vport_vlan_fltr_change(vport);
10619 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
10622 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10627 hclge_sync_vlan_fltr_state(hdev);
10630 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10632 struct hclge_config_max_frm_size_cmd *req;
10633 struct hclge_desc desc;
10635 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10637 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10638 req->max_frm_size = cpu_to_le16(new_mps);
10639 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10641 return hclge_cmd_send(&hdev->hw, &desc, 1);
10644 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10646 struct hclge_vport *vport = hclge_get_vport(handle);
10648 return hclge_set_vport_mtu(vport, new_mtu);
10651 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10653 struct hclge_dev *hdev = vport->back;
10654 int i, max_frm_size, ret;
10656 /* HW supprt 2 layer vlan */
10657 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10658 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10659 max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10662 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10663 mutex_lock(&hdev->vport_lock);
10664 /* VF's mps must fit within hdev->mps */
10665 if (vport->vport_id && max_frm_size > hdev->mps) {
10666 mutex_unlock(&hdev->vport_lock);
10668 } else if (vport->vport_id) {
10669 vport->mps = max_frm_size;
10670 mutex_unlock(&hdev->vport_lock);
10674 /* PF's mps must be greater then VF's mps */
10675 for (i = 1; i < hdev->num_alloc_vport; i++)
10676 if (max_frm_size < hdev->vport[i].mps) {
10677 mutex_unlock(&hdev->vport_lock);
10681 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10683 ret = hclge_set_mac_mtu(hdev, max_frm_size);
10685 dev_err(&hdev->pdev->dev,
10686 "Change mtu fail, ret =%d\n", ret);
10690 hdev->mps = max_frm_size;
10691 vport->mps = max_frm_size;
10693 ret = hclge_buffer_alloc(hdev);
10695 dev_err(&hdev->pdev->dev,
10696 "Allocate buffer fail, ret =%d\n", ret);
10699 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10700 mutex_unlock(&hdev->vport_lock);
10704 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10707 struct hclge_reset_tqp_queue_cmd *req;
10708 struct hclge_desc desc;
10711 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10713 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10714 req->tqp_id = cpu_to_le16(queue_id);
10716 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10718 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10720 dev_err(&hdev->pdev->dev,
10721 "Send tqp reset cmd error, status =%d\n", ret);
10728 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id,
10731 struct hclge_reset_tqp_queue_cmd *req;
10732 struct hclge_desc desc;
10735 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10737 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10738 req->tqp_id = cpu_to_le16(queue_id);
10740 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10742 dev_err(&hdev->pdev->dev,
10743 "Get reset status error, status =%d\n", ret);
10747 *reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10752 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10754 struct hnae3_queue *queue;
10755 struct hclge_tqp *tqp;
10757 queue = handle->kinfo.tqp[queue_id];
10758 tqp = container_of(queue, struct hclge_tqp, q);
10763 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
10765 struct hclge_vport *vport = hclge_get_vport(handle);
10766 struct hclge_dev *hdev = vport->back;
10767 u16 reset_try_times = 0;
10773 for (i = 0; i < handle->kinfo.num_tqps; i++) {
10774 queue_gid = hclge_covert_handle_qid_global(handle, i);
10775 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10777 dev_err(&hdev->pdev->dev,
10778 "failed to send reset tqp cmd, ret = %d\n",
10783 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10784 ret = hclge_get_reset_status(hdev, queue_gid,
10792 /* Wait for tqp hw reset */
10793 usleep_range(1000, 1200);
10796 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10797 dev_err(&hdev->pdev->dev,
10798 "wait for tqp hw reset timeout\n");
10802 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10804 dev_err(&hdev->pdev->dev,
10805 "failed to deassert soft reset, ret = %d\n",
10809 reset_try_times = 0;
10814 static int hclge_reset_rcb(struct hnae3_handle *handle)
10816 #define HCLGE_RESET_RCB_NOT_SUPPORT 0U
10817 #define HCLGE_RESET_RCB_SUCCESS 1U
10819 struct hclge_vport *vport = hclge_get_vport(handle);
10820 struct hclge_dev *hdev = vport->back;
10821 struct hclge_reset_cmd *req;
10822 struct hclge_desc desc;
10827 queue_gid = hclge_covert_handle_qid_global(handle, 0);
10829 req = (struct hclge_reset_cmd *)desc.data;
10830 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
10831 hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
10832 req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
10833 req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
10835 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10837 dev_err(&hdev->pdev->dev,
10838 "failed to send rcb reset cmd, ret = %d\n", ret);
10842 return_status = req->fun_reset_rcb_return_status;
10843 if (return_status == HCLGE_RESET_RCB_SUCCESS)
10846 if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
10847 dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
10852 /* if reset rcb cmd is unsupported, we need to send reset tqp cmd
10853 * again to reset all tqps
10855 return hclge_reset_tqp_cmd(handle);
10858 int hclge_reset_tqp(struct hnae3_handle *handle)
10860 struct hclge_vport *vport = hclge_get_vport(handle);
10861 struct hclge_dev *hdev = vport->back;
10864 /* only need to disable PF's tqp */
10865 if (!vport->vport_id) {
10866 ret = hclge_tqp_enable(handle, false);
10868 dev_err(&hdev->pdev->dev,
10869 "failed to disable tqp, ret = %d\n", ret);
10874 return hclge_reset_rcb(handle);
10877 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10879 struct hclge_vport *vport = hclge_get_vport(handle);
10880 struct hclge_dev *hdev = vport->back;
10882 return hdev->fw_version;
10885 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10887 struct phy_device *phydev = hdev->hw.mac.phydev;
10892 phy_set_asym_pause(phydev, rx_en, tx_en);
10895 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10899 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10902 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10904 dev_err(&hdev->pdev->dev,
10905 "configure pauseparam error, ret = %d.\n", ret);
10910 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10912 struct phy_device *phydev = hdev->hw.mac.phydev;
10913 u16 remote_advertising = 0;
10914 u16 local_advertising;
10915 u32 rx_pause, tx_pause;
10918 if (!phydev->link || !phydev->autoneg)
10921 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10924 remote_advertising = LPA_PAUSE_CAP;
10926 if (phydev->asym_pause)
10927 remote_advertising |= LPA_PAUSE_ASYM;
10929 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10930 remote_advertising);
10931 tx_pause = flowctl & FLOW_CTRL_TX;
10932 rx_pause = flowctl & FLOW_CTRL_RX;
10934 if (phydev->duplex == HCLGE_MAC_HALF) {
10939 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10942 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10943 u32 *rx_en, u32 *tx_en)
10945 struct hclge_vport *vport = hclge_get_vport(handle);
10946 struct hclge_dev *hdev = vport->back;
10947 u8 media_type = hdev->hw.mac.media_type;
10949 *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
10950 hclge_get_autoneg(handle) : 0;
10952 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10958 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10961 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10964 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10973 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10974 u32 rx_en, u32 tx_en)
10976 if (rx_en && tx_en)
10977 hdev->fc_mode_last_time = HCLGE_FC_FULL;
10978 else if (rx_en && !tx_en)
10979 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10980 else if (!rx_en && tx_en)
10981 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10983 hdev->fc_mode_last_time = HCLGE_FC_NONE;
10985 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10988 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10989 u32 rx_en, u32 tx_en)
10991 struct hclge_vport *vport = hclge_get_vport(handle);
10992 struct hclge_dev *hdev = vport->back;
10993 struct phy_device *phydev = hdev->hw.mac.phydev;
10996 if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
10997 fc_autoneg = hclge_get_autoneg(handle);
10998 if (auto_neg != fc_autoneg) {
10999 dev_info(&hdev->pdev->dev,
11000 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
11001 return -EOPNOTSUPP;
11005 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
11006 dev_info(&hdev->pdev->dev,
11007 "Priority flow control enabled. Cannot set link flow control.\n");
11008 return -EOPNOTSUPP;
11011 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
11013 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
11015 if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
11016 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
11019 return phy_start_aneg(phydev);
11021 return -EOPNOTSUPP;
11024 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
11025 u8 *auto_neg, u32 *speed, u8 *duplex)
11027 struct hclge_vport *vport = hclge_get_vport(handle);
11028 struct hclge_dev *hdev = vport->back;
11031 *speed = hdev->hw.mac.speed;
11033 *duplex = hdev->hw.mac.duplex;
11035 *auto_neg = hdev->hw.mac.autoneg;
11038 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
11041 struct hclge_vport *vport = hclge_get_vport(handle);
11042 struct hclge_dev *hdev = vport->back;
11044 /* When nic is down, the service task is not running, doesn't update
11045 * the port information per second. Query the port information before
11046 * return the media type, ensure getting the correct media information.
11048 hclge_update_port_info(hdev);
11051 *media_type = hdev->hw.mac.media_type;
11054 *module_type = hdev->hw.mac.module_type;
11057 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
11058 u8 *tp_mdix_ctrl, u8 *tp_mdix)
11060 struct hclge_vport *vport = hclge_get_vport(handle);
11061 struct hclge_dev *hdev = vport->back;
11062 struct phy_device *phydev = hdev->hw.mac.phydev;
11063 int mdix_ctrl, mdix, is_resolved;
11064 unsigned int retval;
11067 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11068 *tp_mdix = ETH_TP_MDI_INVALID;
11072 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
11074 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
11075 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
11076 HCLGE_PHY_MDIX_CTRL_S);
11078 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
11079 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
11080 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
11082 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
11084 switch (mdix_ctrl) {
11086 *tp_mdix_ctrl = ETH_TP_MDI;
11089 *tp_mdix_ctrl = ETH_TP_MDI_X;
11092 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
11095 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11100 *tp_mdix = ETH_TP_MDI_INVALID;
11102 *tp_mdix = ETH_TP_MDI_X;
11104 *tp_mdix = ETH_TP_MDI;
11107 static void hclge_info_show(struct hclge_dev *hdev)
11109 struct device *dev = &hdev->pdev->dev;
11111 dev_info(dev, "PF info begin:\n");
11113 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
11114 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
11115 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
11116 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
11117 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
11118 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
11119 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
11120 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
11121 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
11122 dev_info(dev, "This is %s PF\n",
11123 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
11124 dev_info(dev, "DCB %s\n",
11125 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
11126 dev_info(dev, "MQPRIO %s\n",
11127 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
11128 dev_info(dev, "Default tx spare buffer size: %u\n",
11129 hdev->tx_spare_buf_size);
11131 dev_info(dev, "PF info end.\n");
11134 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
11135 struct hclge_vport *vport)
11137 struct hnae3_client *client = vport->nic.client;
11138 struct hclge_dev *hdev = ae_dev->priv;
11139 int rst_cnt = hdev->rst_stats.reset_cnt;
11142 ret = client->ops->init_instance(&vport->nic);
11146 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11147 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11148 rst_cnt != hdev->rst_stats.reset_cnt) {
11153 /* Enable nic hw error interrupts */
11154 ret = hclge_config_nic_hw_error(hdev, true);
11156 dev_err(&ae_dev->pdev->dev,
11157 "fail(%d) to enable hw error interrupts\n", ret);
11161 hnae3_set_client_init_flag(client, ae_dev, 1);
11163 if (netif_msg_drv(&hdev->vport->nic))
11164 hclge_info_show(hdev);
11169 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11170 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11171 msleep(HCLGE_WAIT_RESET_DONE);
11173 client->ops->uninit_instance(&vport->nic, 0);
11178 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
11179 struct hclge_vport *vport)
11181 struct hclge_dev *hdev = ae_dev->priv;
11182 struct hnae3_client *client;
11186 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
11190 client = hdev->roce_client;
11191 ret = hclge_init_roce_base_info(vport);
11195 rst_cnt = hdev->rst_stats.reset_cnt;
11196 ret = client->ops->init_instance(&vport->roce);
11200 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11201 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11202 rst_cnt != hdev->rst_stats.reset_cnt) {
11204 goto init_roce_err;
11207 /* Enable roce ras interrupts */
11208 ret = hclge_config_rocee_ras_interrupt(hdev, true);
11210 dev_err(&ae_dev->pdev->dev,
11211 "fail(%d) to enable roce ras interrupts\n", ret);
11212 goto init_roce_err;
11215 hnae3_set_client_init_flag(client, ae_dev, 1);
11220 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11221 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11222 msleep(HCLGE_WAIT_RESET_DONE);
11224 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11229 static int hclge_init_client_instance(struct hnae3_client *client,
11230 struct hnae3_ae_dev *ae_dev)
11232 struct hclge_dev *hdev = ae_dev->priv;
11233 struct hclge_vport *vport = &hdev->vport[0];
11236 switch (client->type) {
11237 case HNAE3_CLIENT_KNIC:
11238 hdev->nic_client = client;
11239 vport->nic.client = client;
11240 ret = hclge_init_nic_client_instance(ae_dev, vport);
11244 ret = hclge_init_roce_client_instance(ae_dev, vport);
11249 case HNAE3_CLIENT_ROCE:
11250 if (hnae3_dev_roce_supported(hdev)) {
11251 hdev->roce_client = client;
11252 vport->roce.client = client;
11255 ret = hclge_init_roce_client_instance(ae_dev, vport);
11267 hdev->nic_client = NULL;
11268 vport->nic.client = NULL;
11271 hdev->roce_client = NULL;
11272 vport->roce.client = NULL;
11276 static void hclge_uninit_client_instance(struct hnae3_client *client,
11277 struct hnae3_ae_dev *ae_dev)
11279 struct hclge_dev *hdev = ae_dev->priv;
11280 struct hclge_vport *vport = &hdev->vport[0];
11282 if (hdev->roce_client) {
11283 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11284 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11285 msleep(HCLGE_WAIT_RESET_DONE);
11287 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11288 hdev->roce_client = NULL;
11289 vport->roce.client = NULL;
11291 if (client->type == HNAE3_CLIENT_ROCE)
11293 if (hdev->nic_client && client->ops->uninit_instance) {
11294 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11295 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11296 msleep(HCLGE_WAIT_RESET_DONE);
11298 client->ops->uninit_instance(&vport->nic, 0);
11299 hdev->nic_client = NULL;
11300 vport->nic.client = NULL;
11304 static int hclge_dev_mem_map(struct hclge_dev *hdev)
11306 #define HCLGE_MEM_BAR 4
11308 struct pci_dev *pdev = hdev->pdev;
11309 struct hclge_hw *hw = &hdev->hw;
11311 /* for device does not have device memory, return directly */
11312 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
11315 hw->mem_base = devm_ioremap_wc(&pdev->dev,
11316 pci_resource_start(pdev, HCLGE_MEM_BAR),
11317 pci_resource_len(pdev, HCLGE_MEM_BAR));
11318 if (!hw->mem_base) {
11319 dev_err(&pdev->dev, "failed to map device memory\n");
11326 static int hclge_pci_init(struct hclge_dev *hdev)
11328 struct pci_dev *pdev = hdev->pdev;
11329 struct hclge_hw *hw;
11332 ret = pci_enable_device(pdev);
11334 dev_err(&pdev->dev, "failed to enable PCI device\n");
11338 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11340 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11342 dev_err(&pdev->dev,
11343 "can't set consistent PCI DMA");
11344 goto err_disable_device;
11346 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
11349 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
11351 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
11352 goto err_disable_device;
11355 pci_set_master(pdev);
11357 hw->io_base = pcim_iomap(pdev, 2, 0);
11358 if (!hw->io_base) {
11359 dev_err(&pdev->dev, "Can't map configuration register space\n");
11361 goto err_clr_master;
11364 ret = hclge_dev_mem_map(hdev);
11366 goto err_unmap_io_base;
11368 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11373 pcim_iounmap(pdev, hdev->hw.io_base);
11375 pci_clear_master(pdev);
11376 pci_release_regions(pdev);
11377 err_disable_device:
11378 pci_disable_device(pdev);
11383 static void hclge_pci_uninit(struct hclge_dev *hdev)
11385 struct pci_dev *pdev = hdev->pdev;
11387 if (hdev->hw.mem_base)
11388 devm_iounmap(&pdev->dev, hdev->hw.mem_base);
11390 pcim_iounmap(pdev, hdev->hw.io_base);
11391 pci_free_irq_vectors(pdev);
11392 pci_clear_master(pdev);
11393 pci_release_mem_regions(pdev);
11394 pci_disable_device(pdev);
11397 static void hclge_state_init(struct hclge_dev *hdev)
11399 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11400 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11401 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11402 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11403 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11404 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11405 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11408 static void hclge_state_uninit(struct hclge_dev *hdev)
11410 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11411 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11413 if (hdev->reset_timer.function)
11414 del_timer_sync(&hdev->reset_timer);
11415 if (hdev->service_task.work.func)
11416 cancel_delayed_work_sync(&hdev->service_task);
11419 static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
11420 enum hnae3_reset_type rst_type)
11422 #define HCLGE_RESET_RETRY_WAIT_MS 500
11423 #define HCLGE_RESET_RETRY_CNT 5
11425 struct hclge_dev *hdev = ae_dev->priv;
11430 down(&hdev->reset_sem);
11431 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11432 hdev->reset_type = rst_type;
11433 ret = hclge_reset_prepare(hdev);
11434 if (ret || hdev->reset_pending) {
11435 dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
11437 if (hdev->reset_pending ||
11438 retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
11439 dev_err(&hdev->pdev->dev,
11440 "reset_pending:0x%lx, retry_cnt:%d\n",
11441 hdev->reset_pending, retry_cnt);
11442 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11443 up(&hdev->reset_sem);
11444 msleep(HCLGE_RESET_RETRY_WAIT_MS);
11449 /* disable misc vector before reset done */
11450 hclge_enable_vector(&hdev->misc_vector, false);
11451 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
11453 if (hdev->reset_type == HNAE3_FLR_RESET)
11454 hdev->rst_stats.flr_rst_cnt++;
11457 static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
11459 struct hclge_dev *hdev = ae_dev->priv;
11462 hclge_enable_vector(&hdev->misc_vector, true);
11464 ret = hclge_reset_rebuild(hdev);
11466 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11468 hdev->reset_type = HNAE3_NONE_RESET;
11469 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11470 up(&hdev->reset_sem);
11473 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11477 for (i = 0; i < hdev->num_alloc_vport; i++) {
11478 struct hclge_vport *vport = &hdev->vport[i];
11481 /* Send cmd to clear vport's FUNC_RST_ING */
11482 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11484 dev_warn(&hdev->pdev->dev,
11485 "clear vport(%u) rst failed %d!\n",
11486 vport->vport_id, ret);
11490 static int hclge_clear_hw_resource(struct hclge_dev *hdev)
11492 struct hclge_desc desc;
11495 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
11497 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11498 /* This new command is only supported by new firmware, it will
11499 * fail with older firmware. Error value -EOPNOSUPP can only be
11500 * returned by older firmware running this command, to keep code
11501 * backward compatible we will override this value and return
11504 if (ret && ret != -EOPNOTSUPP) {
11505 dev_err(&hdev->pdev->dev,
11506 "failed to clear hw resource, ret = %d\n", ret);
11512 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
11514 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11515 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
11518 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
11520 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11521 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
11524 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11526 struct pci_dev *pdev = ae_dev->pdev;
11527 struct hclge_dev *hdev;
11530 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11535 hdev->ae_dev = ae_dev;
11536 hdev->reset_type = HNAE3_NONE_RESET;
11537 hdev->reset_level = HNAE3_FUNC_RESET;
11538 ae_dev->priv = hdev;
11540 /* HW supprt 2 layer vlan */
11541 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11543 mutex_init(&hdev->vport_lock);
11544 spin_lock_init(&hdev->fd_rule_lock);
11545 sema_init(&hdev->reset_sem, 1);
11547 ret = hclge_pci_init(hdev);
11551 ret = hclge_devlink_init(hdev);
11553 goto err_pci_uninit;
11555 /* Firmware command queue initialize */
11556 ret = hclge_cmd_queue_init(hdev);
11558 goto err_devlink_uninit;
11560 /* Firmware command initialize */
11561 ret = hclge_cmd_init(hdev);
11563 goto err_cmd_uninit;
11565 ret = hclge_clear_hw_resource(hdev);
11567 goto err_cmd_uninit;
11569 ret = hclge_get_cap(hdev);
11571 goto err_cmd_uninit;
11573 ret = hclge_query_dev_specs(hdev);
11575 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11577 goto err_cmd_uninit;
11580 ret = hclge_configure(hdev);
11582 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11583 goto err_cmd_uninit;
11586 ret = hclge_init_msi(hdev);
11588 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11589 goto err_cmd_uninit;
11592 ret = hclge_misc_irq_init(hdev);
11594 goto err_msi_uninit;
11596 ret = hclge_alloc_tqps(hdev);
11598 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11599 goto err_msi_irq_uninit;
11602 ret = hclge_alloc_vport(hdev);
11604 goto err_msi_irq_uninit;
11606 ret = hclge_map_tqp(hdev);
11608 goto err_msi_irq_uninit;
11610 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
11611 !hnae3_dev_phy_imp_supported(hdev)) {
11612 ret = hclge_mac_mdio_config(hdev);
11614 goto err_msi_irq_uninit;
11617 ret = hclge_init_umv_space(hdev);
11619 goto err_mdiobus_unreg;
11621 ret = hclge_mac_init(hdev);
11623 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11624 goto err_mdiobus_unreg;
11627 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11629 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11630 goto err_mdiobus_unreg;
11633 ret = hclge_config_gro(hdev);
11635 goto err_mdiobus_unreg;
11637 ret = hclge_init_vlan_config(hdev);
11639 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11640 goto err_mdiobus_unreg;
11643 ret = hclge_tm_schd_init(hdev);
11645 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11646 goto err_mdiobus_unreg;
11649 ret = hclge_rss_init_cfg(hdev);
11651 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11652 goto err_mdiobus_unreg;
11655 ret = hclge_rss_init_hw(hdev);
11657 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11658 goto err_mdiobus_unreg;
11661 ret = init_mgr_tbl(hdev);
11663 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11664 goto err_mdiobus_unreg;
11667 ret = hclge_init_fd_config(hdev);
11669 dev_err(&pdev->dev,
11670 "fd table init fail, ret=%d\n", ret);
11671 goto err_mdiobus_unreg;
11674 ret = hclge_ptp_init(hdev);
11676 goto err_mdiobus_unreg;
11678 INIT_KFIFO(hdev->mac_tnl_log);
11680 hclge_dcb_ops_set(hdev);
11682 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11683 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11685 /* Setup affinity after service timer setup because add_timer_on
11686 * is called in affinity notify.
11688 hclge_misc_affinity_setup(hdev);
11690 hclge_clear_all_event_cause(hdev);
11691 hclge_clear_resetting_state(hdev);
11693 /* Log and clear the hw errors those already occurred */
11694 if (hnae3_dev_ras_imp_supported(hdev))
11695 hclge_handle_occurred_error(hdev);
11697 hclge_handle_all_hns_hw_errors(ae_dev);
11699 /* request delayed reset for the error recovery because an immediate
11700 * global reset on a PF affecting pending initialization of other PFs
11702 if (ae_dev->hw_err_reset_req) {
11703 enum hnae3_reset_type reset_level;
11705 reset_level = hclge_get_reset_level(ae_dev,
11706 &ae_dev->hw_err_reset_req);
11707 hclge_set_def_reset_request(ae_dev, reset_level);
11708 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11711 hclge_init_rxd_adv_layout(hdev);
11713 /* Enable MISC vector(vector0) */
11714 hclge_enable_vector(&hdev->misc_vector, true);
11716 hclge_state_init(hdev);
11717 hdev->last_reset_time = jiffies;
11719 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11720 HCLGE_DRIVER_NAME);
11722 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11727 if (hdev->hw.mac.phydev)
11728 mdiobus_unregister(hdev->hw.mac.mdio_bus);
11729 err_msi_irq_uninit:
11730 hclge_misc_irq_uninit(hdev);
11732 pci_free_irq_vectors(pdev);
11734 hclge_cmd_uninit(hdev);
11735 err_devlink_uninit:
11736 hclge_devlink_uninit(hdev);
11738 pcim_iounmap(pdev, hdev->hw.io_base);
11739 pci_clear_master(pdev);
11740 pci_release_regions(pdev);
11741 pci_disable_device(pdev);
11743 mutex_destroy(&hdev->vport_lock);
11747 static void hclge_stats_clear(struct hclge_dev *hdev)
11749 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11752 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11754 return hclge_config_switch_param(hdev, vf, enable,
11755 HCLGE_SWITCH_ANTI_SPOOF_MASK);
11758 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11760 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11761 HCLGE_FILTER_FE_NIC_INGRESS_B,
11765 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11769 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11771 dev_err(&hdev->pdev->dev,
11772 "Set vf %d mac spoof check %s failed, ret=%d\n",
11773 vf, enable ? "on" : "off", ret);
11777 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11779 dev_err(&hdev->pdev->dev,
11780 "Set vf %d vlan spoof check %s failed, ret=%d\n",
11781 vf, enable ? "on" : "off", ret);
11786 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11789 struct hclge_vport *vport = hclge_get_vport(handle);
11790 struct hclge_dev *hdev = vport->back;
11791 u32 new_spoofchk = enable ? 1 : 0;
11794 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11795 return -EOPNOTSUPP;
11797 vport = hclge_get_vf_vport(hdev, vf);
11801 if (vport->vf_info.spoofchk == new_spoofchk)
11804 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11805 dev_warn(&hdev->pdev->dev,
11806 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11808 else if (enable && hclge_is_umv_space_full(vport, true))
11809 dev_warn(&hdev->pdev->dev,
11810 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11813 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11817 vport->vf_info.spoofchk = new_spoofchk;
11821 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11823 struct hclge_vport *vport = hdev->vport;
11827 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11830 /* resume the vf spoof check state after reset */
11831 for (i = 0; i < hdev->num_alloc_vport; i++) {
11832 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11833 vport->vf_info.spoofchk);
11843 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11845 struct hclge_vport *vport = hclge_get_vport(handle);
11846 struct hclge_dev *hdev = vport->back;
11847 u32 new_trusted = enable ? 1 : 0;
11849 vport = hclge_get_vf_vport(hdev, vf);
11853 if (vport->vf_info.trusted == new_trusted)
11856 vport->vf_info.trusted = new_trusted;
11857 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
11858 hclge_task_schedule(hdev, 0);
11863 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11868 /* reset vf rate to default value */
11869 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11870 struct hclge_vport *vport = &hdev->vport[vf];
11872 vport->vf_info.max_tx_rate = 0;
11873 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11875 dev_err(&hdev->pdev->dev,
11876 "vf%d failed to reset to default, ret=%d\n",
11877 vf - HCLGE_VF_VPORT_START_NUM, ret);
11881 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11882 int min_tx_rate, int max_tx_rate)
11884 if (min_tx_rate != 0 ||
11885 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11886 dev_err(&hdev->pdev->dev,
11887 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11888 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11895 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
11896 int min_tx_rate, int max_tx_rate, bool force)
11898 struct hclge_vport *vport = hclge_get_vport(handle);
11899 struct hclge_dev *hdev = vport->back;
11902 ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
11906 vport = hclge_get_vf_vport(hdev, vf);
11910 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
11913 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
11917 vport->vf_info.max_tx_rate = max_tx_rate;
11922 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
11924 struct hnae3_handle *handle = &hdev->vport->nic;
11925 struct hclge_vport *vport;
11929 /* resume the vf max_tx_rate after reset */
11930 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
11931 vport = hclge_get_vf_vport(hdev, vf);
11935 /* zero means max rate, after reset, firmware already set it to
11936 * max rate, so just continue.
11938 if (!vport->vf_info.max_tx_rate)
11941 ret = hclge_set_vf_rate(handle, vf, 0,
11942 vport->vf_info.max_tx_rate, true);
11944 dev_err(&hdev->pdev->dev,
11945 "vf%d failed to resume tx_rate:%u, ret=%d\n",
11946 vf, vport->vf_info.max_tx_rate, ret);
11954 static void hclge_reset_vport_state(struct hclge_dev *hdev)
11956 struct hclge_vport *vport = hdev->vport;
11959 for (i = 0; i < hdev->num_alloc_vport; i++) {
11960 hclge_vport_stop(vport);
11965 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
11967 struct hclge_dev *hdev = ae_dev->priv;
11968 struct pci_dev *pdev = ae_dev->pdev;
11971 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11973 hclge_stats_clear(hdev);
11974 /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
11975 * so here should not clean table in memory.
11977 if (hdev->reset_type == HNAE3_IMP_RESET ||
11978 hdev->reset_type == HNAE3_GLOBAL_RESET) {
11979 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11980 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
11981 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11982 hclge_reset_umv_space(hdev);
11985 ret = hclge_cmd_init(hdev);
11987 dev_err(&pdev->dev, "Cmd queue init failed\n");
11991 ret = hclge_map_tqp(hdev);
11993 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11997 ret = hclge_mac_init(hdev);
11999 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
12003 ret = hclge_tp_port_init(hdev);
12005 dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
12010 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
12012 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
12016 ret = hclge_config_gro(hdev);
12020 ret = hclge_init_vlan_config(hdev);
12022 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
12026 ret = hclge_tm_init_hw(hdev, true);
12028 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
12032 ret = hclge_rss_init_hw(hdev);
12034 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
12038 ret = init_mgr_tbl(hdev);
12040 dev_err(&pdev->dev,
12041 "failed to reinit manager table, ret = %d\n", ret);
12045 ret = hclge_init_fd_config(hdev);
12047 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
12051 ret = hclge_ptp_init(hdev);
12055 /* Log and clear the hw errors those already occurred */
12056 if (hnae3_dev_ras_imp_supported(hdev))
12057 hclge_handle_occurred_error(hdev);
12059 hclge_handle_all_hns_hw_errors(ae_dev);
12061 /* Re-enable the hw error interrupts because
12062 * the interrupts get disabled on global reset.
12064 ret = hclge_config_nic_hw_error(hdev, true);
12066 dev_err(&pdev->dev,
12067 "fail(%d) to re-enable NIC hw error interrupts\n",
12072 if (hdev->roce_client) {
12073 ret = hclge_config_rocee_ras_interrupt(hdev, true);
12075 dev_err(&pdev->dev,
12076 "fail(%d) to re-enable roce ras interrupts\n",
12082 hclge_reset_vport_state(hdev);
12083 ret = hclge_reset_vport_spoofchk(hdev);
12087 ret = hclge_resume_vf_rate(hdev);
12091 hclge_init_rxd_adv_layout(hdev);
12093 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
12094 HCLGE_DRIVER_NAME);
12099 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
12101 struct hclge_dev *hdev = ae_dev->priv;
12102 struct hclge_mac *mac = &hdev->hw.mac;
12104 hclge_reset_vf_rate(hdev);
12105 hclge_clear_vf_vlan(hdev);
12106 hclge_misc_affinity_teardown(hdev);
12107 hclge_state_uninit(hdev);
12108 hclge_ptp_uninit(hdev);
12109 hclge_uninit_rxd_adv_layout(hdev);
12110 hclge_uninit_mac_table(hdev);
12111 hclge_del_all_fd_entries(hdev);
12114 mdiobus_unregister(mac->mdio_bus);
12116 /* Disable MISC vector(vector0) */
12117 hclge_enable_vector(&hdev->misc_vector, false);
12118 synchronize_irq(hdev->misc_vector.vector_irq);
12120 /* Disable all hw interrupts */
12121 hclge_config_mac_tnl_int(hdev, false);
12122 hclge_config_nic_hw_error(hdev, false);
12123 hclge_config_rocee_ras_interrupt(hdev, false);
12125 hclge_cmd_uninit(hdev);
12126 hclge_misc_irq_uninit(hdev);
12127 hclge_devlink_uninit(hdev);
12128 hclge_pci_uninit(hdev);
12129 mutex_destroy(&hdev->vport_lock);
12130 hclge_uninit_vport_vlan_table(hdev);
12131 ae_dev->priv = NULL;
12134 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
12136 struct hclge_vport *vport = hclge_get_vport(handle);
12137 struct hclge_dev *hdev = vport->back;
12139 return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
12142 static void hclge_get_channels(struct hnae3_handle *handle,
12143 struct ethtool_channels *ch)
12145 ch->max_combined = hclge_get_max_channels(handle);
12146 ch->other_count = 1;
12148 ch->combined_count = handle->kinfo.rss_size;
12151 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
12152 u16 *alloc_tqps, u16 *max_rss_size)
12154 struct hclge_vport *vport = hclge_get_vport(handle);
12155 struct hclge_dev *hdev = vport->back;
12157 *alloc_tqps = vport->alloc_tqps;
12158 *max_rss_size = hdev->pf_rss_size_max;
12161 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
12162 bool rxfh_configured)
12164 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
12165 struct hclge_vport *vport = hclge_get_vport(handle);
12166 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
12167 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
12168 struct hclge_dev *hdev = vport->back;
12169 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
12170 u16 cur_rss_size = kinfo->rss_size;
12171 u16 cur_tqps = kinfo->num_tqps;
12172 u16 tc_valid[HCLGE_MAX_TC_NUM];
12178 kinfo->req_rss_size = new_tqps_num;
12180 ret = hclge_tm_vport_map_update(hdev);
12182 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
12186 roundup_size = roundup_pow_of_two(kinfo->rss_size);
12187 roundup_size = ilog2(roundup_size);
12188 /* Set the RSS TC mode according to the new RSS size */
12189 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
12192 if (!(hdev->hw_tc_map & BIT(i)))
12196 tc_size[i] = roundup_size;
12197 tc_offset[i] = kinfo->rss_size * i;
12199 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
12203 /* RSS indirection table has been configured by user */
12204 if (rxfh_configured)
12207 /* Reinitializes the rss indirect table according to the new RSS size */
12208 rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
12213 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
12214 rss_indir[i] = i % kinfo->rss_size;
12216 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
12218 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
12225 dev_info(&hdev->pdev->dev,
12226 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
12227 cur_rss_size, kinfo->rss_size,
12228 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
12233 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
12234 u32 *regs_num_64_bit)
12236 struct hclge_desc desc;
12240 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
12241 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12243 dev_err(&hdev->pdev->dev,
12244 "Query register number cmd failed, ret = %d.\n", ret);
12248 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
12249 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
12251 total_num = *regs_num_32_bit + *regs_num_64_bit;
12258 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12261 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
12262 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
12264 struct hclge_desc *desc;
12265 u32 *reg_val = data;
12275 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
12276 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
12277 HCLGE_32_BIT_REG_RTN_DATANUM);
12278 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12282 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
12283 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12285 dev_err(&hdev->pdev->dev,
12286 "Query 32 bit register cmd failed, ret = %d.\n", ret);
12291 for (i = 0; i < cmd_num; i++) {
12293 desc_data = (__le32 *)(&desc[i].data[0]);
12294 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
12296 desc_data = (__le32 *)(&desc[i]);
12297 n = HCLGE_32_BIT_REG_RTN_DATANUM;
12299 for (k = 0; k < n; k++) {
12300 *reg_val++ = le32_to_cpu(*desc_data++);
12312 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12315 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
12316 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
12318 struct hclge_desc *desc;
12319 u64 *reg_val = data;
12329 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
12330 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
12331 HCLGE_64_BIT_REG_RTN_DATANUM);
12332 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12336 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
12337 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12339 dev_err(&hdev->pdev->dev,
12340 "Query 64 bit register cmd failed, ret = %d.\n", ret);
12345 for (i = 0; i < cmd_num; i++) {
12347 desc_data = (__le64 *)(&desc[i].data[0]);
12348 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
12350 desc_data = (__le64 *)(&desc[i]);
12351 n = HCLGE_64_BIT_REG_RTN_DATANUM;
12353 for (k = 0; k < n; k++) {
12354 *reg_val++ = le64_to_cpu(*desc_data++);
12366 #define MAX_SEPARATE_NUM 4
12367 #define SEPARATOR_VALUE 0xFDFCFBFA
12368 #define REG_NUM_PER_LINE 4
12369 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
12370 #define REG_SEPARATOR_LINE 1
12371 #define REG_NUM_REMAIN_MASK 3
12373 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
12377 /* initialize command BD except the last one */
12378 for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
12379 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
12381 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12384 /* initialize the last command BD */
12385 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
12387 return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
12390 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
12394 u32 entries_per_desc, desc_index, index, offset, i;
12395 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
12398 ret = hclge_query_bd_num_cmd_send(hdev, desc);
12400 dev_err(&hdev->pdev->dev,
12401 "Get dfx bd num fail, status is %d.\n", ret);
12405 entries_per_desc = ARRAY_SIZE(desc[0].data);
12406 for (i = 0; i < type_num; i++) {
12407 offset = hclge_dfx_bd_offset_list[i];
12408 index = offset % entries_per_desc;
12409 desc_index = offset / entries_per_desc;
12410 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
12416 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
12417 struct hclge_desc *desc_src, int bd_num,
12418 enum hclge_opcode_type cmd)
12420 struct hclge_desc *desc = desc_src;
12423 hclge_cmd_setup_basic_desc(desc, cmd, true);
12424 for (i = 0; i < bd_num - 1; i++) {
12425 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12427 hclge_cmd_setup_basic_desc(desc, cmd, true);
12431 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
12433 dev_err(&hdev->pdev->dev,
12434 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
12440 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
12443 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
12444 struct hclge_desc *desc = desc_src;
12447 entries_per_desc = ARRAY_SIZE(desc->data);
12448 reg_num = entries_per_desc * bd_num;
12449 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
12450 for (i = 0; i < reg_num; i++) {
12451 index = i % entries_per_desc;
12452 desc_index = i / entries_per_desc;
12453 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
12455 for (i = 0; i < separator_num; i++)
12456 *reg++ = SEPARATOR_VALUE;
12458 return reg_num + separator_num;
12461 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
12463 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12464 int data_len_per_desc, bd_num, i;
12469 bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12473 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12475 dev_err(&hdev->pdev->dev,
12476 "Get dfx reg bd num fail, status is %d.\n", ret);
12480 data_len_per_desc = sizeof_field(struct hclge_desc, data);
12482 for (i = 0; i < dfx_reg_type_num; i++) {
12483 bd_num = bd_num_list[i];
12484 data_len = data_len_per_desc * bd_num;
12485 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
12489 kfree(bd_num_list);
12493 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
12495 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12496 int bd_num, bd_num_max, buf_len, i;
12497 struct hclge_desc *desc_src;
12502 bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12506 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12508 dev_err(&hdev->pdev->dev,
12509 "Get dfx reg bd num fail, status is %d.\n", ret);
12513 bd_num_max = bd_num_list[0];
12514 for (i = 1; i < dfx_reg_type_num; i++)
12515 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
12517 buf_len = sizeof(*desc_src) * bd_num_max;
12518 desc_src = kzalloc(buf_len, GFP_KERNEL);
12524 for (i = 0; i < dfx_reg_type_num; i++) {
12525 bd_num = bd_num_list[i];
12526 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
12527 hclge_dfx_reg_opcode_list[i]);
12529 dev_err(&hdev->pdev->dev,
12530 "Get dfx reg fail, status is %d.\n", ret);
12534 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
12539 kfree(bd_num_list);
12543 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
12544 struct hnae3_knic_private_info *kinfo)
12546 #define HCLGE_RING_REG_OFFSET 0x200
12547 #define HCLGE_RING_INT_REG_OFFSET 0x4
12549 int i, j, reg_num, separator_num;
12553 /* fetching per-PF registers valus from PF PCIe register space */
12554 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
12555 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12556 for (i = 0; i < reg_num; i++)
12557 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
12558 for (i = 0; i < separator_num; i++)
12559 *reg++ = SEPARATOR_VALUE;
12560 data_num_sum = reg_num + separator_num;
12562 reg_num = ARRAY_SIZE(common_reg_addr_list);
12563 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12564 for (i = 0; i < reg_num; i++)
12565 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
12566 for (i = 0; i < separator_num; i++)
12567 *reg++ = SEPARATOR_VALUE;
12568 data_num_sum += reg_num + separator_num;
12570 reg_num = ARRAY_SIZE(ring_reg_addr_list);
12571 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12572 for (j = 0; j < kinfo->num_tqps; j++) {
12573 for (i = 0; i < reg_num; i++)
12574 *reg++ = hclge_read_dev(&hdev->hw,
12575 ring_reg_addr_list[i] +
12576 HCLGE_RING_REG_OFFSET * j);
12577 for (i = 0; i < separator_num; i++)
12578 *reg++ = SEPARATOR_VALUE;
12580 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
12582 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
12583 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12584 for (j = 0; j < hdev->num_msi_used - 1; j++) {
12585 for (i = 0; i < reg_num; i++)
12586 *reg++ = hclge_read_dev(&hdev->hw,
12587 tqp_intr_reg_addr_list[i] +
12588 HCLGE_RING_INT_REG_OFFSET * j);
12589 for (i = 0; i < separator_num; i++)
12590 *reg++ = SEPARATOR_VALUE;
12592 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
12594 return data_num_sum;
12597 static int hclge_get_regs_len(struct hnae3_handle *handle)
12599 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
12600 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12601 struct hclge_vport *vport = hclge_get_vport(handle);
12602 struct hclge_dev *hdev = vport->back;
12603 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
12604 int regs_lines_32_bit, regs_lines_64_bit;
12607 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
12609 dev_err(&hdev->pdev->dev,
12610 "Get register number failed, ret = %d.\n", ret);
12614 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
12616 dev_err(&hdev->pdev->dev,
12617 "Get dfx reg len failed, ret = %d.\n", ret);
12621 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
12622 REG_SEPARATOR_LINE;
12623 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
12624 REG_SEPARATOR_LINE;
12625 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
12626 REG_SEPARATOR_LINE;
12627 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
12628 REG_SEPARATOR_LINE;
12629 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
12630 REG_SEPARATOR_LINE;
12631 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
12632 REG_SEPARATOR_LINE;
12634 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
12635 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
12636 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
12639 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
12642 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12643 struct hclge_vport *vport = hclge_get_vport(handle);
12644 struct hclge_dev *hdev = vport->back;
12645 u32 regs_num_32_bit, regs_num_64_bit;
12646 int i, reg_num, separator_num, ret;
12649 *version = hdev->fw_version;
12651 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
12653 dev_err(&hdev->pdev->dev,
12654 "Get register number failed, ret = %d.\n", ret);
12658 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
12660 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
12662 dev_err(&hdev->pdev->dev,
12663 "Get 32 bit register failed, ret = %d.\n", ret);
12666 reg_num = regs_num_32_bit;
12668 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12669 for (i = 0; i < separator_num; i++)
12670 *reg++ = SEPARATOR_VALUE;
12672 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
12674 dev_err(&hdev->pdev->dev,
12675 "Get 64 bit register failed, ret = %d.\n", ret);
12678 reg_num = regs_num_64_bit * 2;
12680 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12681 for (i = 0; i < separator_num; i++)
12682 *reg++ = SEPARATOR_VALUE;
12684 ret = hclge_get_dfx_reg(hdev, reg);
12686 dev_err(&hdev->pdev->dev,
12687 "Get dfx register failed, ret = %d.\n", ret);
12690 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12692 struct hclge_set_led_state_cmd *req;
12693 struct hclge_desc desc;
12696 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12698 req = (struct hclge_set_led_state_cmd *)desc.data;
12699 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12700 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12702 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12704 dev_err(&hdev->pdev->dev,
12705 "Send set led state cmd error, ret =%d\n", ret);
12710 enum hclge_led_status {
12713 HCLGE_LED_NO_CHANGE = 0xFF,
12716 static int hclge_set_led_id(struct hnae3_handle *handle,
12717 enum ethtool_phys_id_state status)
12719 struct hclge_vport *vport = hclge_get_vport(handle);
12720 struct hclge_dev *hdev = vport->back;
12723 case ETHTOOL_ID_ACTIVE:
12724 return hclge_set_led_status(hdev, HCLGE_LED_ON);
12725 case ETHTOOL_ID_INACTIVE:
12726 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12732 static void hclge_get_link_mode(struct hnae3_handle *handle,
12733 unsigned long *supported,
12734 unsigned long *advertising)
12736 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12737 struct hclge_vport *vport = hclge_get_vport(handle);
12738 struct hclge_dev *hdev = vport->back;
12739 unsigned int idx = 0;
12741 for (; idx < size; idx++) {
12742 supported[idx] = hdev->hw.mac.supported[idx];
12743 advertising[idx] = hdev->hw.mac.advertising[idx];
12747 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12749 struct hclge_vport *vport = hclge_get_vport(handle);
12750 struct hclge_dev *hdev = vport->back;
12751 bool gro_en_old = hdev->gro_en;
12754 hdev->gro_en = enable;
12755 ret = hclge_config_gro(hdev);
12757 hdev->gro_en = gro_en_old;
12762 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12764 struct hclge_vport *vport = &hdev->vport[0];
12765 struct hnae3_handle *handle = &vport->nic;
12770 if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12771 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12772 vport->last_promisc_flags = vport->overflow_promisc_flags;
12775 if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) {
12776 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12777 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12778 tmp_flags & HNAE3_MPE);
12780 clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12782 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
12787 for (i = 1; i < hdev->num_alloc_vport; i++) {
12788 bool uc_en = false;
12789 bool mc_en = false;
12792 vport = &hdev->vport[i];
12794 if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12798 if (vport->vf_info.trusted) {
12799 uc_en = vport->vf_info.request_uc_en > 0 ||
12800 vport->overflow_promisc_flags &
12801 HNAE3_OVERFLOW_UPE;
12802 mc_en = vport->vf_info.request_mc_en > 0 ||
12803 vport->overflow_promisc_flags &
12804 HNAE3_OVERFLOW_MPE;
12806 bc_en = vport->vf_info.request_bc_en > 0;
12808 ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
12811 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12815 hclge_set_vport_vlan_fltr_change(vport);
12819 static bool hclge_module_existed(struct hclge_dev *hdev)
12821 struct hclge_desc desc;
12825 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12826 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12828 dev_err(&hdev->pdev->dev,
12829 "failed to get SFP exist state, ret = %d\n", ret);
12833 existed = le32_to_cpu(desc.data[0]);
12835 return existed != 0;
12838 /* need 6 bds(total 140 bytes) in one reading
12839 * return the number of bytes actually read, 0 means read failed.
12841 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12844 struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12845 struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12851 /* setup all 6 bds to read module eeprom info. */
12852 for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12853 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12856 /* bd0~bd4 need next flag */
12857 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12858 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12861 /* setup bd0, this bd contains offset and read length. */
12862 sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12863 sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12864 read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12865 sfp_info_bd0->read_len = cpu_to_le16(read_len);
12867 ret = hclge_cmd_send(&hdev->hw, desc, i);
12869 dev_err(&hdev->pdev->dev,
12870 "failed to get SFP eeprom info, ret = %d\n", ret);
12874 /* copy sfp info from bd0 to out buffer. */
12875 copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12876 memcpy(data, sfp_info_bd0->data, copy_len);
12877 read_len = copy_len;
12879 /* copy sfp info from bd1~bd5 to out buffer if needed. */
12880 for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12881 if (read_len >= len)
12884 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12885 memcpy(data + read_len, desc[i].data, copy_len);
12886 read_len += copy_len;
12892 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12895 struct hclge_vport *vport = hclge_get_vport(handle);
12896 struct hclge_dev *hdev = vport->back;
12900 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12901 return -EOPNOTSUPP;
12903 if (!hclge_module_existed(hdev))
12906 while (read_len < len) {
12907 data_len = hclge_get_sfp_eeprom_info(hdev,
12914 read_len += data_len;
12920 static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle,
12923 struct hclge_vport *vport = hclge_get_vport(handle);
12924 struct hclge_dev *hdev = vport->back;
12925 struct hclge_desc desc;
12928 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2)
12929 return -EOPNOTSUPP;
12931 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true);
12932 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12934 dev_err(&hdev->pdev->dev,
12935 "failed to query link diagnosis info, ret = %d\n", ret);
12939 *status_code = le32_to_cpu(desc.data[0]);
12943 static const struct hnae3_ae_ops hclge_ops = {
12944 .init_ae_dev = hclge_init_ae_dev,
12945 .uninit_ae_dev = hclge_uninit_ae_dev,
12946 .reset_prepare = hclge_reset_prepare_general,
12947 .reset_done = hclge_reset_done,
12948 .init_client_instance = hclge_init_client_instance,
12949 .uninit_client_instance = hclge_uninit_client_instance,
12950 .map_ring_to_vector = hclge_map_ring_to_vector,
12951 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12952 .get_vector = hclge_get_vector,
12953 .put_vector = hclge_put_vector,
12954 .set_promisc_mode = hclge_set_promisc_mode,
12955 .request_update_promisc_mode = hclge_request_update_promisc_mode,
12956 .set_loopback = hclge_set_loopback,
12957 .start = hclge_ae_start,
12958 .stop = hclge_ae_stop,
12959 .client_start = hclge_client_start,
12960 .client_stop = hclge_client_stop,
12961 .get_status = hclge_get_status,
12962 .get_ksettings_an_result = hclge_get_ksettings_an_result,
12963 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12964 .get_media_type = hclge_get_media_type,
12965 .check_port_speed = hclge_check_port_speed,
12966 .get_fec = hclge_get_fec,
12967 .set_fec = hclge_set_fec,
12968 .get_rss_key_size = hclge_get_rss_key_size,
12969 .get_rss = hclge_get_rss,
12970 .set_rss = hclge_set_rss,
12971 .set_rss_tuple = hclge_set_rss_tuple,
12972 .get_rss_tuple = hclge_get_rss_tuple,
12973 .get_tc_size = hclge_get_tc_size,
12974 .get_mac_addr = hclge_get_mac_addr,
12975 .set_mac_addr = hclge_set_mac_addr,
12976 .do_ioctl = hclge_do_ioctl,
12977 .add_uc_addr = hclge_add_uc_addr,
12978 .rm_uc_addr = hclge_rm_uc_addr,
12979 .add_mc_addr = hclge_add_mc_addr,
12980 .rm_mc_addr = hclge_rm_mc_addr,
12981 .set_autoneg = hclge_set_autoneg,
12982 .get_autoneg = hclge_get_autoneg,
12983 .restart_autoneg = hclge_restart_autoneg,
12984 .halt_autoneg = hclge_halt_autoneg,
12985 .get_pauseparam = hclge_get_pauseparam,
12986 .set_pauseparam = hclge_set_pauseparam,
12987 .set_mtu = hclge_set_mtu,
12988 .reset_queue = hclge_reset_tqp,
12989 .get_stats = hclge_get_stats,
12990 .get_mac_stats = hclge_get_mac_stat,
12991 .update_stats = hclge_update_stats,
12992 .get_strings = hclge_get_strings,
12993 .get_sset_count = hclge_get_sset_count,
12994 .get_fw_version = hclge_get_fw_version,
12995 .get_mdix_mode = hclge_get_mdix_mode,
12996 .enable_vlan_filter = hclge_enable_vlan_filter,
12997 .set_vlan_filter = hclge_set_vlan_filter,
12998 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
12999 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
13000 .reset_event = hclge_reset_event,
13001 .get_reset_level = hclge_get_reset_level,
13002 .set_default_reset_request = hclge_set_def_reset_request,
13003 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
13004 .set_channels = hclge_set_channels,
13005 .get_channels = hclge_get_channels,
13006 .get_regs_len = hclge_get_regs_len,
13007 .get_regs = hclge_get_regs,
13008 .set_led_id = hclge_set_led_id,
13009 .get_link_mode = hclge_get_link_mode,
13010 .add_fd_entry = hclge_add_fd_entry,
13011 .del_fd_entry = hclge_del_fd_entry,
13012 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
13013 .get_fd_rule_info = hclge_get_fd_rule_info,
13014 .get_fd_all_rules = hclge_get_all_rules,
13015 .enable_fd = hclge_enable_fd,
13016 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
13017 .dbg_read_cmd = hclge_dbg_read_cmd,
13018 .handle_hw_ras_error = hclge_handle_hw_ras_error,
13019 .get_hw_reset_stat = hclge_get_hw_reset_stat,
13020 .ae_dev_resetting = hclge_ae_dev_resetting,
13021 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
13022 .set_gro_en = hclge_gro_en,
13023 .get_global_queue_id = hclge_covert_handle_qid_global,
13024 .set_timer_task = hclge_set_timer_task,
13025 .mac_connect_phy = hclge_mac_connect_phy,
13026 .mac_disconnect_phy = hclge_mac_disconnect_phy,
13027 .get_vf_config = hclge_get_vf_config,
13028 .set_vf_link_state = hclge_set_vf_link_state,
13029 .set_vf_spoofchk = hclge_set_vf_spoofchk,
13030 .set_vf_trust = hclge_set_vf_trust,
13031 .set_vf_rate = hclge_set_vf_rate,
13032 .set_vf_mac = hclge_set_vf_mac,
13033 .get_module_eeprom = hclge_get_module_eeprom,
13034 .get_cmdq_stat = hclge_get_cmdq_stat,
13035 .add_cls_flower = hclge_add_cls_flower,
13036 .del_cls_flower = hclge_del_cls_flower,
13037 .cls_flower_active = hclge_is_cls_flower_active,
13038 .get_phy_link_ksettings = hclge_get_phy_link_ksettings,
13039 .set_phy_link_ksettings = hclge_set_phy_link_ksettings,
13040 .set_tx_hwts_info = hclge_ptp_set_tx_info,
13041 .get_rx_hwts = hclge_ptp_get_rx_hwts,
13042 .get_ts_info = hclge_ptp_get_ts_info,
13043 .get_link_diagnosis_info = hclge_get_link_diagnosis_info,
13046 static struct hnae3_ae_algo ae_algo = {
13048 .pdev_id_table = ae_algo_pci_tbl,
13051 static int hclge_init(void)
13053 pr_info("%s is initializing\n", HCLGE_NAME);
13055 hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
13057 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
13061 hnae3_register_ae_algo(&ae_algo);
13066 static void hclge_exit(void)
13068 hnae3_unregister_ae_algo_prepare(&ae_algo);
13069 hnae3_unregister_ae_algo(&ae_algo);
13070 destroy_workqueue(hclge_wq);
13072 module_init(hclge_init);
13073 module_exit(hclge_exit);
13075 MODULE_LICENSE("GPL");
13076 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
13077 MODULE_DESCRIPTION("HCLGE Driver");
13078 MODULE_VERSION(HCLGE_MOD_VERSION);