1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
24 #include "hclge_err.h"
26 #include "hclge_devlink.h"
28 #define HCLGE_NAME "hclge"
29 #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
30 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
32 #define HCLGE_BUF_SIZE_UNIT 256U
33 #define HCLGE_BUF_MUL_BY 2
34 #define HCLGE_BUF_DIV_BY 2
35 #define NEED_RESERVE_TC_NUM 2
36 #define BUF_MAX_PERCENT 100
37 #define BUF_RESERVE_PERCENT 90
39 #define HCLGE_RESET_MAX_FAIL_CNT 5
40 #define HCLGE_RESET_SYNC_TIME 100
41 #define HCLGE_PF_RESET_SYNC_TIME 20
42 #define HCLGE_PF_RESET_SYNC_CNT 1500
44 /* Get DFX BD number offset */
45 #define HCLGE_DFX_BIOS_BD_OFFSET 1
46 #define HCLGE_DFX_SSU_0_BD_OFFSET 2
47 #define HCLGE_DFX_SSU_1_BD_OFFSET 3
48 #define HCLGE_DFX_IGU_BD_OFFSET 4
49 #define HCLGE_DFX_RPU_0_BD_OFFSET 5
50 #define HCLGE_DFX_RPU_1_BD_OFFSET 6
51 #define HCLGE_DFX_NCSI_BD_OFFSET 7
52 #define HCLGE_DFX_RTC_BD_OFFSET 8
53 #define HCLGE_DFX_PPP_BD_OFFSET 9
54 #define HCLGE_DFX_RCB_BD_OFFSET 10
55 #define HCLGE_DFX_TQP_BD_OFFSET 11
56 #define HCLGE_DFX_SSU_2_BD_OFFSET 12
58 #define HCLGE_LINK_STATUS_MS 10
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
66 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
67 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
69 static int hclge_set_default_loopback(struct hclge_dev *hdev);
71 static void hclge_sync_mac_table(struct hclge_dev *hdev);
72 static void hclge_restore_hw_table(struct hclge_dev *hdev);
73 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
74 static void hclge_sync_fd_table(struct hclge_dev *hdev);
76 static struct hnae3_ae_algo ae_algo;
78 static struct workqueue_struct *hclge_wq;
80 static const struct pci_device_id ae_algo_pci_tbl[] = {
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
85 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
87 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
88 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
89 /* required last entry */
93 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
95 static const u32 cmdq_reg_addr_list[] = {HCLGE_NIC_CSQ_BASEADDR_L_REG,
96 HCLGE_NIC_CSQ_BASEADDR_H_REG,
97 HCLGE_NIC_CSQ_DEPTH_REG,
98 HCLGE_NIC_CSQ_TAIL_REG,
99 HCLGE_NIC_CSQ_HEAD_REG,
100 HCLGE_NIC_CRQ_BASEADDR_L_REG,
101 HCLGE_NIC_CRQ_BASEADDR_H_REG,
102 HCLGE_NIC_CRQ_DEPTH_REG,
103 HCLGE_NIC_CRQ_TAIL_REG,
104 HCLGE_NIC_CRQ_HEAD_REG,
105 HCLGE_VECTOR0_CMDQ_SRC_REG,
106 HCLGE_CMDQ_INTR_STS_REG,
107 HCLGE_CMDQ_INTR_EN_REG,
108 HCLGE_CMDQ_INTR_GEN_REG};
110 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
111 HCLGE_PF_OTHER_INT_REG,
112 HCLGE_MISC_RESET_STS_REG,
113 HCLGE_MISC_VECTOR_INT_STS,
114 HCLGE_GLOBAL_RESET_REG,
118 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
119 HCLGE_RING_RX_ADDR_H_REG,
120 HCLGE_RING_RX_BD_NUM_REG,
121 HCLGE_RING_RX_BD_LENGTH_REG,
122 HCLGE_RING_RX_MERGE_EN_REG,
123 HCLGE_RING_RX_TAIL_REG,
124 HCLGE_RING_RX_HEAD_REG,
125 HCLGE_RING_RX_FBD_NUM_REG,
126 HCLGE_RING_RX_OFFSET_REG,
127 HCLGE_RING_RX_FBD_OFFSET_REG,
128 HCLGE_RING_RX_STASH_REG,
129 HCLGE_RING_RX_BD_ERR_REG,
130 HCLGE_RING_TX_ADDR_L_REG,
131 HCLGE_RING_TX_ADDR_H_REG,
132 HCLGE_RING_TX_BD_NUM_REG,
133 HCLGE_RING_TX_PRIORITY_REG,
134 HCLGE_RING_TX_TC_REG,
135 HCLGE_RING_TX_MERGE_EN_REG,
136 HCLGE_RING_TX_TAIL_REG,
137 HCLGE_RING_TX_HEAD_REG,
138 HCLGE_RING_TX_FBD_NUM_REG,
139 HCLGE_RING_TX_OFFSET_REG,
140 HCLGE_RING_TX_EBD_NUM_REG,
141 HCLGE_RING_TX_EBD_OFFSET_REG,
142 HCLGE_RING_TX_BD_ERR_REG,
145 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
146 HCLGE_TQP_INTR_GL0_REG,
147 HCLGE_TQP_INTR_GL1_REG,
148 HCLGE_TQP_INTR_GL2_REG,
149 HCLGE_TQP_INTR_RL_REG};
151 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
153 "Serdes serial Loopback test",
154 "Serdes parallel Loopback test",
158 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
159 {"mac_tx_mac_pause_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
161 {"mac_rx_mac_pause_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
163 {"mac_tx_control_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
165 {"mac_rx_control_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
167 {"mac_tx_pfc_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
169 {"mac_tx_pfc_pri0_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
171 {"mac_tx_pfc_pri1_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
173 {"mac_tx_pfc_pri2_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
175 {"mac_tx_pfc_pri3_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
177 {"mac_tx_pfc_pri4_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
179 {"mac_tx_pfc_pri5_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
181 {"mac_tx_pfc_pri6_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
183 {"mac_tx_pfc_pri7_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
185 {"mac_rx_pfc_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
187 {"mac_rx_pfc_pri0_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
189 {"mac_rx_pfc_pri1_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
191 {"mac_rx_pfc_pri2_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
193 {"mac_rx_pfc_pri3_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
195 {"mac_rx_pfc_pri4_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
197 {"mac_rx_pfc_pri5_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
199 {"mac_rx_pfc_pri6_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
201 {"mac_rx_pfc_pri7_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
203 {"mac_tx_total_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
205 {"mac_tx_total_oct_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
207 {"mac_tx_good_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
209 {"mac_tx_bad_pkt_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
211 {"mac_tx_good_oct_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
213 {"mac_tx_bad_oct_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
215 {"mac_tx_uni_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
217 {"mac_tx_multi_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
219 {"mac_tx_broad_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
221 {"mac_tx_undersize_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
223 {"mac_tx_oversize_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
225 {"mac_tx_64_oct_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
227 {"mac_tx_65_127_oct_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
229 {"mac_tx_128_255_oct_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
231 {"mac_tx_256_511_oct_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
233 {"mac_tx_512_1023_oct_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
235 {"mac_tx_1024_1518_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
237 {"mac_tx_1519_2047_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
239 {"mac_tx_2048_4095_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
241 {"mac_tx_4096_8191_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
243 {"mac_tx_8192_9216_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
245 {"mac_tx_9217_12287_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
247 {"mac_tx_12288_16383_oct_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
249 {"mac_tx_1519_max_good_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
251 {"mac_tx_1519_max_bad_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
253 {"mac_rx_total_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
255 {"mac_rx_total_oct_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
257 {"mac_rx_good_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
259 {"mac_rx_bad_pkt_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
261 {"mac_rx_good_oct_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
263 {"mac_rx_bad_oct_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
265 {"mac_rx_uni_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
267 {"mac_rx_multi_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
269 {"mac_rx_broad_pkt_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
271 {"mac_rx_undersize_pkt_num",
272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
273 {"mac_rx_oversize_pkt_num",
274 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
275 {"mac_rx_64_oct_pkt_num",
276 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
277 {"mac_rx_65_127_oct_pkt_num",
278 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
279 {"mac_rx_128_255_oct_pkt_num",
280 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
281 {"mac_rx_256_511_oct_pkt_num",
282 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
283 {"mac_rx_512_1023_oct_pkt_num",
284 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
285 {"mac_rx_1024_1518_oct_pkt_num",
286 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
287 {"mac_rx_1519_2047_oct_pkt_num",
288 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
289 {"mac_rx_2048_4095_oct_pkt_num",
290 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
291 {"mac_rx_4096_8191_oct_pkt_num",
292 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
293 {"mac_rx_8192_9216_oct_pkt_num",
294 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
295 {"mac_rx_9217_12287_oct_pkt_num",
296 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
297 {"mac_rx_12288_16383_oct_pkt_num",
298 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
299 {"mac_rx_1519_max_good_pkt_num",
300 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
301 {"mac_rx_1519_max_bad_pkt_num",
302 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
304 {"mac_tx_fragment_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
306 {"mac_tx_undermin_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
308 {"mac_tx_jabber_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
310 {"mac_tx_err_all_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
312 {"mac_tx_from_app_good_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
314 {"mac_tx_from_app_bad_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
316 {"mac_rx_fragment_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
318 {"mac_rx_undermin_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
320 {"mac_rx_jabber_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
322 {"mac_rx_fcs_err_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
324 {"mac_rx_send_app_good_pkt_num",
325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
326 {"mac_rx_send_app_bad_pkt_num",
327 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
330 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
332 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
333 .ethter_type = cpu_to_le16(ETH_P_LLDP),
334 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
335 .i_port_bitmap = 0x1,
339 static const u8 hclge_hash_key[] = {
340 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
341 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
342 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
343 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
344 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
347 static const u32 hclge_dfx_bd_offset_list[] = {
348 HCLGE_DFX_BIOS_BD_OFFSET,
349 HCLGE_DFX_SSU_0_BD_OFFSET,
350 HCLGE_DFX_SSU_1_BD_OFFSET,
351 HCLGE_DFX_IGU_BD_OFFSET,
352 HCLGE_DFX_RPU_0_BD_OFFSET,
353 HCLGE_DFX_RPU_1_BD_OFFSET,
354 HCLGE_DFX_NCSI_BD_OFFSET,
355 HCLGE_DFX_RTC_BD_OFFSET,
356 HCLGE_DFX_PPP_BD_OFFSET,
357 HCLGE_DFX_RCB_BD_OFFSET,
358 HCLGE_DFX_TQP_BD_OFFSET,
359 HCLGE_DFX_SSU_2_BD_OFFSET
362 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
363 HCLGE_OPC_DFX_BIOS_COMMON_REG,
364 HCLGE_OPC_DFX_SSU_REG_0,
365 HCLGE_OPC_DFX_SSU_REG_1,
366 HCLGE_OPC_DFX_IGU_EGU_REG,
367 HCLGE_OPC_DFX_RPU_REG_0,
368 HCLGE_OPC_DFX_RPU_REG_1,
369 HCLGE_OPC_DFX_NCSI_REG,
370 HCLGE_OPC_DFX_RTC_REG,
371 HCLGE_OPC_DFX_PPP_REG,
372 HCLGE_OPC_DFX_RCB_REG,
373 HCLGE_OPC_DFX_TQP_REG,
374 HCLGE_OPC_DFX_SSU_REG_2
377 static const struct key_info meta_data_key_info[] = {
378 { PACKET_TYPE_ID, 6 },
385 { TUNNEL_PACKET, 1 },
388 static const struct key_info tuple_key_info[] = {
389 { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
390 { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
391 { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
392 { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
393 { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
394 { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
395 { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
396 { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
397 { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
398 { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
399 { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
400 { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
401 { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
402 { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
403 { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
404 { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
405 { INNER_DST_MAC, 48, KEY_OPT_MAC,
406 offsetof(struct hclge_fd_rule, tuples.dst_mac),
407 offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
408 { INNER_SRC_MAC, 48, KEY_OPT_MAC,
409 offsetof(struct hclge_fd_rule, tuples.src_mac),
410 offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
411 { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
412 offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
413 offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
414 { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
415 { INNER_ETH_TYPE, 16, KEY_OPT_LE16,
416 offsetof(struct hclge_fd_rule, tuples.ether_proto),
417 offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
418 { INNER_L2_RSV, 16, KEY_OPT_LE16,
419 offsetof(struct hclge_fd_rule, tuples.l2_user_def),
420 offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
421 { INNER_IP_TOS, 8, KEY_OPT_U8,
422 offsetof(struct hclge_fd_rule, tuples.ip_tos),
423 offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
424 { INNER_IP_PROTO, 8, KEY_OPT_U8,
425 offsetof(struct hclge_fd_rule, tuples.ip_proto),
426 offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
427 { INNER_SRC_IP, 32, KEY_OPT_IP,
428 offsetof(struct hclge_fd_rule, tuples.src_ip),
429 offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
430 { INNER_DST_IP, 32, KEY_OPT_IP,
431 offsetof(struct hclge_fd_rule, tuples.dst_ip),
432 offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
433 { INNER_L3_RSV, 16, KEY_OPT_LE16,
434 offsetof(struct hclge_fd_rule, tuples.l3_user_def),
435 offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
436 { INNER_SRC_PORT, 16, KEY_OPT_LE16,
437 offsetof(struct hclge_fd_rule, tuples.src_port),
438 offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
439 { INNER_DST_PORT, 16, KEY_OPT_LE16,
440 offsetof(struct hclge_fd_rule, tuples.dst_port),
441 offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
442 { INNER_L4_RSV, 32, KEY_OPT_LE32,
443 offsetof(struct hclge_fd_rule, tuples.l4_user_def),
444 offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
447 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
449 #define HCLGE_MAC_CMD_NUM 21
451 u64 *data = (u64 *)(&hdev->mac_stats);
452 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
457 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
458 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
460 dev_err(&hdev->pdev->dev,
461 "Get MAC pkt stats fail, status = %d.\n", ret);
466 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
467 /* for special opcode 0032, only the first desc has the head */
468 if (unlikely(i == 0)) {
469 desc_data = (__le64 *)(&desc[i].data[0]);
470 n = HCLGE_RD_FIRST_STATS_NUM;
472 desc_data = (__le64 *)(&desc[i]);
473 n = HCLGE_RD_OTHER_STATS_NUM;
476 for (k = 0; k < n; k++) {
477 *data += le64_to_cpu(*desc_data);
486 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
488 u64 *data = (u64 *)(&hdev->mac_stats);
489 struct hclge_desc *desc;
494 /* This may be called inside atomic sections,
495 * so GFP_ATOMIC is more suitalbe here
497 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
501 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
502 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
508 for (i = 0; i < desc_num; i++) {
509 /* for special opcode 0034, only the first desc has the head */
511 desc_data = (__le64 *)(&desc[i].data[0]);
512 n = HCLGE_RD_FIRST_STATS_NUM;
514 desc_data = (__le64 *)(&desc[i]);
515 n = HCLGE_RD_OTHER_STATS_NUM;
518 for (k = 0; k < n; k++) {
519 *data += le64_to_cpu(*desc_data);
530 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
532 struct hclge_desc desc;
537 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
538 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
542 desc_data = (__le32 *)(&desc.data[0]);
543 reg_num = le32_to_cpu(*desc_data);
545 *desc_num = 1 + ((reg_num - 3) >> 2) +
546 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
551 static int hclge_mac_update_stats(struct hclge_dev *hdev)
556 ret = hclge_mac_query_reg_num(hdev, &desc_num);
557 /* The firmware supports the new statistics acquisition method */
559 ret = hclge_mac_update_stats_complete(hdev, desc_num);
560 else if (ret == -EOPNOTSUPP)
561 ret = hclge_mac_update_stats_defective(hdev);
563 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
568 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
570 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
571 struct hclge_vport *vport = hclge_get_vport(handle);
572 struct hclge_dev *hdev = vport->back;
573 struct hnae3_queue *queue;
574 struct hclge_desc desc[1];
575 struct hclge_tqp *tqp;
578 for (i = 0; i < kinfo->num_tqps; i++) {
579 queue = handle->kinfo.tqp[i];
580 tqp = container_of(queue, struct hclge_tqp, q);
581 /* command : HCLGE_OPC_QUERY_IGU_STAT */
582 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
585 desc[0].data[0] = cpu_to_le32(tqp->index);
586 ret = hclge_cmd_send(&hdev->hw, desc, 1);
588 dev_err(&hdev->pdev->dev,
589 "Query tqp stat fail, status = %d,queue = %d\n",
593 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
594 le32_to_cpu(desc[0].data[1]);
597 for (i = 0; i < kinfo->num_tqps; i++) {
598 queue = handle->kinfo.tqp[i];
599 tqp = container_of(queue, struct hclge_tqp, q);
600 /* command : HCLGE_OPC_QUERY_IGU_STAT */
601 hclge_cmd_setup_basic_desc(&desc[0],
602 HCLGE_OPC_QUERY_TX_STATS,
605 desc[0].data[0] = cpu_to_le32(tqp->index);
606 ret = hclge_cmd_send(&hdev->hw, desc, 1);
608 dev_err(&hdev->pdev->dev,
609 "Query tqp stat fail, status = %d,queue = %d\n",
613 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
614 le32_to_cpu(desc[0].data[1]);
620 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
622 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
623 struct hclge_tqp *tqp;
627 for (i = 0; i < kinfo->num_tqps; i++) {
628 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
629 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
632 for (i = 0; i < kinfo->num_tqps; i++) {
633 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
634 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
640 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
642 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
644 /* each tqp has TX & RX two queues */
645 return kinfo->num_tqps * (2);
648 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
650 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
654 for (i = 0; i < kinfo->num_tqps; i++) {
655 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
656 struct hclge_tqp, q);
657 snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
659 buff = buff + ETH_GSTRING_LEN;
662 for (i = 0; i < kinfo->num_tqps; i++) {
663 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
664 struct hclge_tqp, q);
665 snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
667 buff = buff + ETH_GSTRING_LEN;
673 static u64 *hclge_comm_get_stats(const void *comm_stats,
674 const struct hclge_comm_stats_str strs[],
680 for (i = 0; i < size; i++)
681 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
686 static u8 *hclge_comm_get_strings(u32 stringset,
687 const struct hclge_comm_stats_str strs[],
690 char *buff = (char *)data;
693 if (stringset != ETH_SS_STATS)
696 for (i = 0; i < size; i++) {
697 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
698 buff = buff + ETH_GSTRING_LEN;
704 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
706 struct hnae3_handle *handle;
709 handle = &hdev->vport[0].nic;
710 if (handle->client) {
711 status = hclge_tqps_update_stats(handle);
713 dev_err(&hdev->pdev->dev,
714 "Update TQPS stats fail, status = %d.\n",
719 status = hclge_mac_update_stats(hdev);
721 dev_err(&hdev->pdev->dev,
722 "Update MAC stats fail, status = %d.\n", status);
725 static void hclge_update_stats(struct hnae3_handle *handle,
726 struct net_device_stats *net_stats)
728 struct hclge_vport *vport = hclge_get_vport(handle);
729 struct hclge_dev *hdev = vport->back;
732 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
735 status = hclge_mac_update_stats(hdev);
737 dev_err(&hdev->pdev->dev,
738 "Update MAC stats fail, status = %d.\n",
741 status = hclge_tqps_update_stats(handle);
743 dev_err(&hdev->pdev->dev,
744 "Update TQPS stats fail, status = %d.\n",
747 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
750 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
752 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
753 HNAE3_SUPPORT_PHY_LOOPBACK | \
754 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
755 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
757 struct hclge_vport *vport = hclge_get_vport(handle);
758 struct hclge_dev *hdev = vport->back;
761 /* Loopback test support rules:
762 * mac: only GE mode support
763 * serdes: all mac mode will support include GE/XGE/LGE/CGE
764 * phy: only support when phy device exist on board
766 if (stringset == ETH_SS_TEST) {
767 /* clear loopback bit flags at first */
768 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
769 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
770 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
771 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
772 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
774 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
778 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
779 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
781 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
782 hdev->hw.mac.phydev->drv->set_loopback) ||
783 hnae3_dev_phy_imp_supported(hdev)) {
785 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
787 } else if (stringset == ETH_SS_STATS) {
788 count = ARRAY_SIZE(g_mac_stats_string) +
789 hclge_tqps_get_sset_count(handle, stringset);
795 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
798 u8 *p = (char *)data;
801 if (stringset == ETH_SS_STATS) {
802 size = ARRAY_SIZE(g_mac_stats_string);
803 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
805 p = hclge_tqps_get_strings(handle, p);
806 } else if (stringset == ETH_SS_TEST) {
807 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
808 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
810 p += ETH_GSTRING_LEN;
812 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
813 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
815 p += ETH_GSTRING_LEN;
817 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
819 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
821 p += ETH_GSTRING_LEN;
823 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
824 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
826 p += ETH_GSTRING_LEN;
831 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
833 struct hclge_vport *vport = hclge_get_vport(handle);
834 struct hclge_dev *hdev = vport->back;
837 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
838 ARRAY_SIZE(g_mac_stats_string), data);
839 p = hclge_tqps_get_stats(handle, p);
842 static void hclge_get_mac_stat(struct hnae3_handle *handle,
843 struct hns3_mac_stats *mac_stats)
845 struct hclge_vport *vport = hclge_get_vport(handle);
846 struct hclge_dev *hdev = vport->back;
848 hclge_update_stats(handle, NULL);
850 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
851 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
854 static int hclge_parse_func_status(struct hclge_dev *hdev,
855 struct hclge_func_status_cmd *status)
857 #define HCLGE_MAC_ID_MASK 0xF
859 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
862 /* Set the pf to main pf */
863 if (status->pf_state & HCLGE_PF_STATE_MAIN)
864 hdev->flag |= HCLGE_FLAG_MAIN;
866 hdev->flag &= ~HCLGE_FLAG_MAIN;
868 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
872 static int hclge_query_function_status(struct hclge_dev *hdev)
874 #define HCLGE_QUERY_MAX_CNT 5
876 struct hclge_func_status_cmd *req;
877 struct hclge_desc desc;
881 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
882 req = (struct hclge_func_status_cmd *)desc.data;
885 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
887 dev_err(&hdev->pdev->dev,
888 "query function status failed %d.\n", ret);
892 /* Check pf reset is done */
895 usleep_range(1000, 2000);
896 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
898 return hclge_parse_func_status(hdev, req);
901 static int hclge_query_pf_resource(struct hclge_dev *hdev)
903 struct hclge_pf_res_cmd *req;
904 struct hclge_desc desc;
907 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
908 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
910 dev_err(&hdev->pdev->dev,
911 "query pf resource failed %d.\n", ret);
915 req = (struct hclge_pf_res_cmd *)desc.data;
916 hdev->num_tqps = le16_to_cpu(req->tqp_num) +
917 le16_to_cpu(req->ext_tqp_num);
918 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
920 if (req->tx_buf_size)
922 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
924 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
926 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
928 if (req->dv_buf_size)
930 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
932 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
934 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
936 hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
937 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
938 dev_err(&hdev->pdev->dev,
939 "only %u msi resources available, not enough for pf(min:2).\n",
944 if (hnae3_dev_roce_supported(hdev)) {
946 le16_to_cpu(req->pf_intr_vector_number_roce);
948 /* PF should have NIC vectors and Roce vectors,
949 * NIC vectors are queued before Roce vectors.
951 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
953 hdev->num_msi = hdev->num_nic_msi;
959 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
962 case HCLGE_FW_MAC_SPEED_10M:
963 *speed = HCLGE_MAC_SPEED_10M;
965 case HCLGE_FW_MAC_SPEED_100M:
966 *speed = HCLGE_MAC_SPEED_100M;
968 case HCLGE_FW_MAC_SPEED_1G:
969 *speed = HCLGE_MAC_SPEED_1G;
971 case HCLGE_FW_MAC_SPEED_10G:
972 *speed = HCLGE_MAC_SPEED_10G;
974 case HCLGE_FW_MAC_SPEED_25G:
975 *speed = HCLGE_MAC_SPEED_25G;
977 case HCLGE_FW_MAC_SPEED_40G:
978 *speed = HCLGE_MAC_SPEED_40G;
980 case HCLGE_FW_MAC_SPEED_50G:
981 *speed = HCLGE_MAC_SPEED_50G;
983 case HCLGE_FW_MAC_SPEED_100G:
984 *speed = HCLGE_MAC_SPEED_100G;
986 case HCLGE_FW_MAC_SPEED_200G:
987 *speed = HCLGE_MAC_SPEED_200G;
996 static const struct hclge_speed_bit_map speed_bit_map[] = {
997 {HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT},
998 {HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT},
999 {HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT},
1000 {HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT},
1001 {HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT},
1002 {HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT},
1003 {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BIT},
1004 {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BIT},
1005 {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT},
1008 static int hclge_get_speed_bit(u32 speed, u32 *speed_bit)
1012 for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) {
1013 if (speed == speed_bit_map[i].speed) {
1014 *speed_bit = speed_bit_map[i].speed_bit;
1022 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
1024 struct hclge_vport *vport = hclge_get_vport(handle);
1025 struct hclge_dev *hdev = vport->back;
1026 u32 speed_ability = hdev->hw.mac.speed_ability;
1030 ret = hclge_get_speed_bit(speed, &speed_bit);
1034 if (speed_bit & speed_ability)
1040 static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
1042 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1043 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1045 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1046 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1048 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1049 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1051 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1052 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1054 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1055 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1057 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1058 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1062 static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
1064 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1065 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1067 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1068 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1070 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1071 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1073 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1074 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1076 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1077 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1079 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1081 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1085 static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
1087 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1088 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1090 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1091 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1093 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1094 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1096 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1097 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1099 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1100 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1102 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1103 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1107 static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
1109 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1110 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1112 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1113 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1115 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1116 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1118 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1119 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1121 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1122 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1124 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1125 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1127 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1128 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1132 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1134 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1135 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1137 switch (mac->speed) {
1138 case HCLGE_MAC_SPEED_10G:
1139 case HCLGE_MAC_SPEED_40G:
1140 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1143 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1145 case HCLGE_MAC_SPEED_25G:
1146 case HCLGE_MAC_SPEED_50G:
1147 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1150 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1151 BIT(HNAE3_FEC_AUTO);
1153 case HCLGE_MAC_SPEED_100G:
1154 case HCLGE_MAC_SPEED_200G:
1155 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1156 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1159 mac->fec_ability = 0;
1164 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1167 struct hclge_mac *mac = &hdev->hw.mac;
1169 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1170 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1173 hclge_convert_setting_sr(mac, speed_ability);
1174 hclge_convert_setting_lr(mac, speed_ability);
1175 hclge_convert_setting_cr(mac, speed_ability);
1176 if (hnae3_dev_fec_supported(hdev))
1177 hclge_convert_setting_fec(mac);
1179 if (hnae3_dev_pause_supported(hdev))
1180 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1182 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1183 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1186 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1189 struct hclge_mac *mac = &hdev->hw.mac;
1191 hclge_convert_setting_kr(mac, speed_ability);
1192 if (hnae3_dev_fec_supported(hdev))
1193 hclge_convert_setting_fec(mac);
1195 if (hnae3_dev_pause_supported(hdev))
1196 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1198 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1199 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1202 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1205 unsigned long *supported = hdev->hw.mac.supported;
1207 /* default to support all speed for GE port */
1209 speed_ability = HCLGE_SUPPORT_GE;
1211 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1212 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1215 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1216 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1218 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1222 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1223 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1224 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1227 if (hnae3_dev_pause_supported(hdev)) {
1228 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1229 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1232 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1233 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1236 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1238 u8 media_type = hdev->hw.mac.media_type;
1240 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1241 hclge_parse_fiber_link_mode(hdev, speed_ability);
1242 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1243 hclge_parse_copper_link_mode(hdev, speed_ability);
1244 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1245 hclge_parse_backplane_link_mode(hdev, speed_ability);
1248 static u32 hclge_get_max_speed(u16 speed_ability)
1250 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1251 return HCLGE_MAC_SPEED_200G;
1253 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1254 return HCLGE_MAC_SPEED_100G;
1256 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1257 return HCLGE_MAC_SPEED_50G;
1259 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1260 return HCLGE_MAC_SPEED_40G;
1262 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1263 return HCLGE_MAC_SPEED_25G;
1265 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1266 return HCLGE_MAC_SPEED_10G;
1268 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1269 return HCLGE_MAC_SPEED_1G;
1271 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1272 return HCLGE_MAC_SPEED_100M;
1274 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1275 return HCLGE_MAC_SPEED_10M;
1277 return HCLGE_MAC_SPEED_1G;
1280 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1282 #define HCLGE_TX_SPARE_SIZE_UNIT 4096
1283 #define SPEED_ABILITY_EXT_SHIFT 8
1285 struct hclge_cfg_param_cmd *req;
1286 u64 mac_addr_tmp_high;
1287 u16 speed_ability_ext;
1291 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1293 /* get the configuration */
1294 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1295 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1296 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1297 HCLGE_CFG_TQP_DESC_N_M,
1298 HCLGE_CFG_TQP_DESC_N_S);
1300 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1301 HCLGE_CFG_PHY_ADDR_M,
1302 HCLGE_CFG_PHY_ADDR_S);
1303 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1304 HCLGE_CFG_MEDIA_TP_M,
1305 HCLGE_CFG_MEDIA_TP_S);
1306 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1307 HCLGE_CFG_RX_BUF_LEN_M,
1308 HCLGE_CFG_RX_BUF_LEN_S);
1309 /* get mac_address */
1310 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1311 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1312 HCLGE_CFG_MAC_ADDR_H_M,
1313 HCLGE_CFG_MAC_ADDR_H_S);
1315 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1317 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1318 HCLGE_CFG_DEFAULT_SPEED_M,
1319 HCLGE_CFG_DEFAULT_SPEED_S);
1320 cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1321 HCLGE_CFG_RSS_SIZE_M,
1322 HCLGE_CFG_RSS_SIZE_S);
1324 for (i = 0; i < ETH_ALEN; i++)
1325 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1327 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1328 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1330 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1331 HCLGE_CFG_SPEED_ABILITY_M,
1332 HCLGE_CFG_SPEED_ABILITY_S);
1333 speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1334 HCLGE_CFG_SPEED_ABILITY_EXT_M,
1335 HCLGE_CFG_SPEED_ABILITY_EXT_S);
1336 cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1338 cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
1339 HCLGE_CFG_VLAN_FLTR_CAP_M,
1340 HCLGE_CFG_VLAN_FLTR_CAP_S);
1342 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1343 HCLGE_CFG_UMV_TBL_SPACE_M,
1344 HCLGE_CFG_UMV_TBL_SPACE_S);
1345 if (!cfg->umv_space)
1346 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1348 cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1349 HCLGE_CFG_PF_RSS_SIZE_M,
1350 HCLGE_CFG_PF_RSS_SIZE_S);
1352 /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1353 * power of 2, instead of reading out directly. This would
1354 * be more flexible for future changes and expansions.
1355 * When VF max rss size field is HCLGE_CFG_RSS_SIZE_S,
1356 * it does not make sense if PF's field is 0. In this case, PF and VF
1357 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1359 cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1360 1U << cfg->pf_rss_size_max :
1361 cfg->vf_rss_size_max;
1363 /* The unit of the tx spare buffer size queried from configuration
1364 * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is
1367 cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]),
1368 HCLGE_CFG_TX_SPARE_BUF_SIZE_M,
1369 HCLGE_CFG_TX_SPARE_BUF_SIZE_S);
1370 cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT;
1373 /* hclge_get_cfg: query the static parameter from flash
1374 * @hdev: pointer to struct hclge_dev
1375 * @hcfg: the config structure to be getted
1377 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1379 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1380 struct hclge_cfg_param_cmd *req;
1384 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1387 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1388 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1390 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1391 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1392 /* Len should be united by 4 bytes when send to hardware */
1393 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1394 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1395 req->offset = cpu_to_le32(offset);
1398 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1400 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1404 hclge_parse_cfg(hcfg, desc);
1409 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1411 #define HCLGE_MAX_NON_TSO_BD_NUM 8U
1413 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1415 ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1416 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1417 ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
1418 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1419 ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1420 ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1421 ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1424 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1425 struct hclge_desc *desc)
1427 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1428 struct hclge_dev_specs_0_cmd *req0;
1429 struct hclge_dev_specs_1_cmd *req1;
1431 req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1432 req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1434 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1435 ae_dev->dev_specs.rss_ind_tbl_size =
1436 le16_to_cpu(req0->rss_ind_tbl_size);
1437 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1438 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1439 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1440 ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1441 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1442 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1445 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1447 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1449 if (!dev_specs->max_non_tso_bd_num)
1450 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1451 if (!dev_specs->rss_ind_tbl_size)
1452 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1453 if (!dev_specs->rss_key_size)
1454 dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
1455 if (!dev_specs->max_tm_rate)
1456 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1457 if (!dev_specs->max_qset_num)
1458 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1459 if (!dev_specs->max_int_gl)
1460 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1461 if (!dev_specs->max_frm_size)
1462 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1465 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1467 struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1471 /* set default specifications as devices lower than version V3 do not
1472 * support querying specifications from firmware.
1474 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1475 hclge_set_default_dev_specs(hdev);
1479 for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1480 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1482 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1484 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1486 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1490 hclge_parse_dev_specs(hdev, desc);
1491 hclge_check_dev_specs(hdev);
1496 static int hclge_get_cap(struct hclge_dev *hdev)
1500 ret = hclge_query_function_status(hdev);
1502 dev_err(&hdev->pdev->dev,
1503 "query function status error %d.\n", ret);
1507 /* get pf resource */
1508 return hclge_query_pf_resource(hdev);
1511 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1513 #define HCLGE_MIN_TX_DESC 64
1514 #define HCLGE_MIN_RX_DESC 64
1516 if (!is_kdump_kernel())
1519 dev_info(&hdev->pdev->dev,
1520 "Running kdump kernel. Using minimal resources\n");
1522 /* minimal queue pairs equals to the number of vports */
1523 hdev->num_tqps = hdev->num_req_vfs + 1;
1524 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1525 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1528 static int hclge_configure(struct hclge_dev *hdev)
1530 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1531 struct hclge_cfg cfg;
1535 ret = hclge_get_cfg(hdev, &cfg);
1539 hdev->base_tqp_pid = 0;
1540 hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1541 hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1542 hdev->rx_buf_len = cfg.rx_buf_len;
1543 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1544 hdev->hw.mac.media_type = cfg.media_type;
1545 hdev->hw.mac.phy_addr = cfg.phy_addr;
1546 hdev->num_tx_desc = cfg.tqp_desc_num;
1547 hdev->num_rx_desc = cfg.tqp_desc_num;
1548 hdev->tm_info.num_pg = 1;
1549 hdev->tc_max = cfg.tc_num;
1550 hdev->tm_info.hw_pfc_map = 0;
1551 hdev->wanted_umv_size = cfg.umv_space;
1552 hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
1553 hdev->gro_en = true;
1554 if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
1555 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
1557 if (hnae3_dev_fd_supported(hdev)) {
1559 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1562 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1564 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1565 cfg.default_speed, ret);
1569 hclge_parse_link_mode(hdev, cfg.speed_ability);
1571 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1573 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1574 (hdev->tc_max < 1)) {
1575 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1580 /* Dev does not support DCB */
1581 if (!hnae3_dev_dcb_supported(hdev)) {
1585 hdev->pfc_max = hdev->tc_max;
1588 hdev->tm_info.num_tc = 1;
1590 /* Currently not support uncontiuous tc */
1591 for (i = 0; i < hdev->tm_info.num_tc; i++)
1592 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1594 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1596 hclge_init_kdump_kernel_config(hdev);
1598 /* Set the init affinity based on pci func number */
1599 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1600 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1601 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1602 &hdev->affinity_mask);
1607 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1610 struct hclge_cfg_tso_status_cmd *req;
1611 struct hclge_desc desc;
1613 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1615 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1616 req->tso_mss_min = cpu_to_le16(tso_mss_min);
1617 req->tso_mss_max = cpu_to_le16(tso_mss_max);
1619 return hclge_cmd_send(&hdev->hw, &desc, 1);
1622 static int hclge_config_gro(struct hclge_dev *hdev)
1624 struct hclge_cfg_gro_status_cmd *req;
1625 struct hclge_desc desc;
1628 if (!hnae3_dev_gro_supported(hdev))
1631 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1632 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1634 req->gro_en = hdev->gro_en ? 1 : 0;
1636 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1638 dev_err(&hdev->pdev->dev,
1639 "GRO hardware config cmd failed, ret = %d\n", ret);
1644 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1646 struct hclge_tqp *tqp;
1649 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1650 sizeof(struct hclge_tqp), GFP_KERNEL);
1656 for (i = 0; i < hdev->num_tqps; i++) {
1657 tqp->dev = &hdev->pdev->dev;
1660 tqp->q.ae_algo = &ae_algo;
1661 tqp->q.buf_size = hdev->rx_buf_len;
1662 tqp->q.tx_desc_num = hdev->num_tx_desc;
1663 tqp->q.rx_desc_num = hdev->num_rx_desc;
1665 /* need an extended offset to configure queues >=
1666 * HCLGE_TQP_MAX_SIZE_DEV_V2
1668 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1669 tqp->q.io_base = hdev->hw.io_base +
1670 HCLGE_TQP_REG_OFFSET +
1671 i * HCLGE_TQP_REG_SIZE;
1673 tqp->q.io_base = hdev->hw.io_base +
1674 HCLGE_TQP_REG_OFFSET +
1675 HCLGE_TQP_EXT_REG_OFFSET +
1676 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1685 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1686 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1688 struct hclge_tqp_map_cmd *req;
1689 struct hclge_desc desc;
1692 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1694 req = (struct hclge_tqp_map_cmd *)desc.data;
1695 req->tqp_id = cpu_to_le16(tqp_pid);
1696 req->tqp_vf = func_id;
1697 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1699 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1700 req->tqp_vid = cpu_to_le16(tqp_vid);
1702 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1704 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1709 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1711 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1712 struct hclge_dev *hdev = vport->back;
1715 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1716 alloced < num_tqps; i++) {
1717 if (!hdev->htqp[i].alloced) {
1718 hdev->htqp[i].q.handle = &vport->nic;
1719 hdev->htqp[i].q.tqp_index = alloced;
1720 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1721 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1722 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1723 hdev->htqp[i].alloced = true;
1727 vport->alloc_tqps = alloced;
1728 kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1729 vport->alloc_tqps / hdev->tm_info.num_tc);
1731 /* ensure one to one mapping between irq and queue at default */
1732 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1733 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1738 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1739 u16 num_tx_desc, u16 num_rx_desc)
1742 struct hnae3_handle *nic = &vport->nic;
1743 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1744 struct hclge_dev *hdev = vport->back;
1747 kinfo->num_tx_desc = num_tx_desc;
1748 kinfo->num_rx_desc = num_rx_desc;
1750 kinfo->rx_buf_len = hdev->rx_buf_len;
1751 kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size;
1753 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1754 sizeof(struct hnae3_queue *), GFP_KERNEL);
1758 ret = hclge_assign_tqp(vport, num_tqps);
1760 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1765 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1766 struct hclge_vport *vport)
1768 struct hnae3_handle *nic = &vport->nic;
1769 struct hnae3_knic_private_info *kinfo;
1772 kinfo = &nic->kinfo;
1773 for (i = 0; i < vport->alloc_tqps; i++) {
1774 struct hclge_tqp *q =
1775 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1779 is_pf = !(vport->vport_id);
1780 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1789 static int hclge_map_tqp(struct hclge_dev *hdev)
1791 struct hclge_vport *vport = hdev->vport;
1794 num_vport = hdev->num_req_vfs + 1;
1795 for (i = 0; i < num_vport; i++) {
1798 ret = hclge_map_tqp_to_vport(hdev, vport);
1808 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1810 struct hnae3_handle *nic = &vport->nic;
1811 struct hclge_dev *hdev = vport->back;
1814 nic->pdev = hdev->pdev;
1815 nic->ae_algo = &ae_algo;
1816 nic->numa_node_mask = hdev->numa_node_mask;
1817 nic->kinfo.io_base = hdev->hw.io_base;
1819 ret = hclge_knic_setup(vport, num_tqps,
1820 hdev->num_tx_desc, hdev->num_rx_desc);
1822 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1827 static int hclge_alloc_vport(struct hclge_dev *hdev)
1829 struct pci_dev *pdev = hdev->pdev;
1830 struct hclge_vport *vport;
1836 /* We need to alloc a vport for main NIC of PF */
1837 num_vport = hdev->num_req_vfs + 1;
1839 if (hdev->num_tqps < num_vport) {
1840 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1841 hdev->num_tqps, num_vport);
1845 /* Alloc the same number of TQPs for every vport */
1846 tqp_per_vport = hdev->num_tqps / num_vport;
1847 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1849 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1854 hdev->vport = vport;
1855 hdev->num_alloc_vport = num_vport;
1857 if (IS_ENABLED(CONFIG_PCI_IOV))
1858 hdev->num_alloc_vfs = hdev->num_req_vfs;
1860 for (i = 0; i < num_vport; i++) {
1862 vport->vport_id = i;
1863 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1864 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1865 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1866 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1867 vport->req_vlan_fltr_en = true;
1868 INIT_LIST_HEAD(&vport->vlan_list);
1869 INIT_LIST_HEAD(&vport->uc_mac_list);
1870 INIT_LIST_HEAD(&vport->mc_mac_list);
1871 spin_lock_init(&vport->mac_list_lock);
1874 ret = hclge_vport_setup(vport, tqp_main_vport);
1876 ret = hclge_vport_setup(vport, tqp_per_vport);
1879 "vport setup failed for vport %d, %d\n",
1890 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1891 struct hclge_pkt_buf_alloc *buf_alloc)
1893 /* TX buffer size is unit by 128 byte */
1894 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1895 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1896 struct hclge_tx_buff_alloc_cmd *req;
1897 struct hclge_desc desc;
1901 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1903 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1904 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1905 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1907 req->tx_pkt_buff[i] =
1908 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1909 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1912 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1914 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1920 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1921 struct hclge_pkt_buf_alloc *buf_alloc)
1923 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1926 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1931 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1936 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1937 if (hdev->hw_tc_map & BIT(i))
1942 /* Get the number of pfc enabled TCs, which have private buffer */
1943 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1944 struct hclge_pkt_buf_alloc *buf_alloc)
1946 struct hclge_priv_buf *priv;
1950 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1951 priv = &buf_alloc->priv_buf[i];
1952 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1960 /* Get the number of pfc disabled TCs, which have private buffer */
1961 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1962 struct hclge_pkt_buf_alloc *buf_alloc)
1964 struct hclge_priv_buf *priv;
1968 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1969 priv = &buf_alloc->priv_buf[i];
1970 if (hdev->hw_tc_map & BIT(i) &&
1971 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1979 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1981 struct hclge_priv_buf *priv;
1985 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1986 priv = &buf_alloc->priv_buf[i];
1988 rx_priv += priv->buf_size;
1993 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1995 u32 i, total_tx_size = 0;
1997 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1998 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
2000 return total_tx_size;
2003 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
2004 struct hclge_pkt_buf_alloc *buf_alloc,
2007 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
2008 u32 tc_num = hclge_get_tc_num(hdev);
2009 u32 shared_buf, aligned_mps;
2013 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2015 if (hnae3_dev_dcb_supported(hdev))
2016 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
2019 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
2020 + hdev->dv_buf_size;
2022 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2023 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
2024 HCLGE_BUF_SIZE_UNIT);
2026 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
2027 if (rx_all < rx_priv + shared_std)
2030 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
2031 buf_alloc->s_buf.buf_size = shared_buf;
2032 if (hnae3_dev_dcb_supported(hdev)) {
2033 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2034 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2035 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
2036 HCLGE_BUF_SIZE_UNIT);
2038 buf_alloc->s_buf.self.high = aligned_mps +
2039 HCLGE_NON_DCB_ADDITIONAL_BUF;
2040 buf_alloc->s_buf.self.low = aligned_mps;
2043 if (hnae3_dev_dcb_supported(hdev)) {
2044 hi_thrd = shared_buf - hdev->dv_buf_size;
2046 if (tc_num <= NEED_RESERVE_TC_NUM)
2047 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2051 hi_thrd = hi_thrd / tc_num;
2053 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2054 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2055 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2057 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2058 lo_thrd = aligned_mps;
2061 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2062 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2063 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2069 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2070 struct hclge_pkt_buf_alloc *buf_alloc)
2074 total_size = hdev->pkt_buf_size;
2076 /* alloc tx buffer for all enabled tc */
2077 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2078 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2080 if (hdev->hw_tc_map & BIT(i)) {
2081 if (total_size < hdev->tx_buf_size)
2084 priv->tx_buf_size = hdev->tx_buf_size;
2086 priv->tx_buf_size = 0;
2089 total_size -= priv->tx_buf_size;
2095 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2096 struct hclge_pkt_buf_alloc *buf_alloc)
2098 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2099 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2102 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2103 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2110 if (!(hdev->hw_tc_map & BIT(i)))
2115 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2116 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2117 priv->wl.high = roundup(priv->wl.low + aligned_mps,
2118 HCLGE_BUF_SIZE_UNIT);
2121 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2125 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2128 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2131 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2132 struct hclge_pkt_buf_alloc *buf_alloc)
2134 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2135 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2138 /* let the last to be cleared first */
2139 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2140 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2141 unsigned int mask = BIT((unsigned int)i);
2143 if (hdev->hw_tc_map & mask &&
2144 !(hdev->tm_info.hw_pfc_map & mask)) {
2145 /* Clear the no pfc TC private buffer */
2153 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2154 no_pfc_priv_num == 0)
2158 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2161 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2162 struct hclge_pkt_buf_alloc *buf_alloc)
2164 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2165 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2168 /* let the last to be cleared first */
2169 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2170 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2171 unsigned int mask = BIT((unsigned int)i);
2173 if (hdev->hw_tc_map & mask &&
2174 hdev->tm_info.hw_pfc_map & mask) {
2175 /* Reduce the number of pfc TC with private buffer */
2183 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2188 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2191 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2192 struct hclge_pkt_buf_alloc *buf_alloc)
2194 #define COMPENSATE_BUFFER 0x3C00
2195 #define COMPENSATE_HALF_MPS_NUM 5
2196 #define PRIV_WL_GAP 0x1800
2198 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2199 u32 tc_num = hclge_get_tc_num(hdev);
2200 u32 half_mps = hdev->mps >> 1;
2205 rx_priv = rx_priv / tc_num;
2207 if (tc_num <= NEED_RESERVE_TC_NUM)
2208 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2210 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2211 COMPENSATE_HALF_MPS_NUM * half_mps;
2212 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2213 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2214 if (rx_priv < min_rx_priv)
2217 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2218 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2225 if (!(hdev->hw_tc_map & BIT(i)))
2229 priv->buf_size = rx_priv;
2230 priv->wl.high = rx_priv - hdev->dv_buf_size;
2231 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2234 buf_alloc->s_buf.buf_size = 0;
2239 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2240 * @hdev: pointer to struct hclge_dev
2241 * @buf_alloc: pointer to buffer calculation data
2242 * @return: 0: calculate successful, negative: fail
2244 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2245 struct hclge_pkt_buf_alloc *buf_alloc)
2247 /* When DCB is not supported, rx private buffer is not allocated. */
2248 if (!hnae3_dev_dcb_supported(hdev)) {
2249 u32 rx_all = hdev->pkt_buf_size;
2251 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2252 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2258 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2261 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2264 /* try to decrease the buffer size */
2265 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2268 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2271 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2277 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2278 struct hclge_pkt_buf_alloc *buf_alloc)
2280 struct hclge_rx_priv_buff_cmd *req;
2281 struct hclge_desc desc;
2285 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2286 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2288 /* Alloc private buffer TCs */
2289 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2290 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2293 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2295 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2299 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2300 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2302 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2304 dev_err(&hdev->pdev->dev,
2305 "rx private buffer alloc cmd failed %d\n", ret);
2310 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2311 struct hclge_pkt_buf_alloc *buf_alloc)
2313 struct hclge_rx_priv_wl_buf *req;
2314 struct hclge_priv_buf *priv;
2315 struct hclge_desc desc[2];
2319 for (i = 0; i < 2; i++) {
2320 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2322 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2324 /* The first descriptor set the NEXT bit to 1 */
2326 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2328 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2330 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2331 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2333 priv = &buf_alloc->priv_buf[idx];
2334 req->tc_wl[j].high =
2335 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2336 req->tc_wl[j].high |=
2337 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2339 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2340 req->tc_wl[j].low |=
2341 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2345 /* Send 2 descriptor at one time */
2346 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2348 dev_err(&hdev->pdev->dev,
2349 "rx private waterline config cmd failed %d\n",
2354 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2355 struct hclge_pkt_buf_alloc *buf_alloc)
2357 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2358 struct hclge_rx_com_thrd *req;
2359 struct hclge_desc desc[2];
2360 struct hclge_tc_thrd *tc;
2364 for (i = 0; i < 2; i++) {
2365 hclge_cmd_setup_basic_desc(&desc[i],
2366 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2367 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2369 /* The first descriptor set the NEXT bit to 1 */
2371 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2373 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2375 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2376 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2378 req->com_thrd[j].high =
2379 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2380 req->com_thrd[j].high |=
2381 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2382 req->com_thrd[j].low =
2383 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2384 req->com_thrd[j].low |=
2385 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2389 /* Send 2 descriptors at one time */
2390 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2392 dev_err(&hdev->pdev->dev,
2393 "common threshold config cmd failed %d\n", ret);
2397 static int hclge_common_wl_config(struct hclge_dev *hdev,
2398 struct hclge_pkt_buf_alloc *buf_alloc)
2400 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2401 struct hclge_rx_com_wl *req;
2402 struct hclge_desc desc;
2405 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2407 req = (struct hclge_rx_com_wl *)desc.data;
2408 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2409 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2411 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2412 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2414 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2416 dev_err(&hdev->pdev->dev,
2417 "common waterline config cmd failed %d\n", ret);
2422 int hclge_buffer_alloc(struct hclge_dev *hdev)
2424 struct hclge_pkt_buf_alloc *pkt_buf;
2427 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2431 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2433 dev_err(&hdev->pdev->dev,
2434 "could not calc tx buffer size for all TCs %d\n", ret);
2438 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2440 dev_err(&hdev->pdev->dev,
2441 "could not alloc tx buffers %d\n", ret);
2445 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2447 dev_err(&hdev->pdev->dev,
2448 "could not calc rx priv buffer size for all TCs %d\n",
2453 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2455 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2460 if (hnae3_dev_dcb_supported(hdev)) {
2461 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2463 dev_err(&hdev->pdev->dev,
2464 "could not configure rx private waterline %d\n",
2469 ret = hclge_common_thrd_config(hdev, pkt_buf);
2471 dev_err(&hdev->pdev->dev,
2472 "could not configure common threshold %d\n",
2478 ret = hclge_common_wl_config(hdev, pkt_buf);
2480 dev_err(&hdev->pdev->dev,
2481 "could not configure common waterline %d\n", ret);
2488 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2490 struct hnae3_handle *roce = &vport->roce;
2491 struct hnae3_handle *nic = &vport->nic;
2492 struct hclge_dev *hdev = vport->back;
2494 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2496 if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2499 roce->rinfo.base_vector = hdev->roce_base_vector;
2501 roce->rinfo.netdev = nic->kinfo.netdev;
2502 roce->rinfo.roce_io_base = hdev->hw.io_base;
2503 roce->rinfo.roce_mem_base = hdev->hw.mem_base;
2505 roce->pdev = nic->pdev;
2506 roce->ae_algo = nic->ae_algo;
2507 roce->numa_node_mask = nic->numa_node_mask;
2512 static int hclge_init_msi(struct hclge_dev *hdev)
2514 struct pci_dev *pdev = hdev->pdev;
2518 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2520 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2523 "failed(%d) to allocate MSI/MSI-X vectors\n",
2527 if (vectors < hdev->num_msi)
2528 dev_warn(&hdev->pdev->dev,
2529 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2530 hdev->num_msi, vectors);
2532 hdev->num_msi = vectors;
2533 hdev->num_msi_left = vectors;
2535 hdev->base_msi_vector = pdev->irq;
2536 hdev->roce_base_vector = hdev->base_msi_vector +
2539 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2540 sizeof(u16), GFP_KERNEL);
2541 if (!hdev->vector_status) {
2542 pci_free_irq_vectors(pdev);
2546 for (i = 0; i < hdev->num_msi; i++)
2547 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2549 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2550 sizeof(int), GFP_KERNEL);
2551 if (!hdev->vector_irq) {
2552 pci_free_irq_vectors(pdev);
2559 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2561 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2562 duplex = HCLGE_MAC_FULL;
2567 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2570 struct hclge_config_mac_speed_dup_cmd *req;
2571 struct hclge_desc desc;
2574 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2576 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2579 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2582 case HCLGE_MAC_SPEED_10M:
2583 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2584 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10M);
2586 case HCLGE_MAC_SPEED_100M:
2587 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2588 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100M);
2590 case HCLGE_MAC_SPEED_1G:
2591 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2592 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_1G);
2594 case HCLGE_MAC_SPEED_10G:
2595 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2596 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10G);
2598 case HCLGE_MAC_SPEED_25G:
2599 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2600 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_25G);
2602 case HCLGE_MAC_SPEED_40G:
2603 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2604 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_40G);
2606 case HCLGE_MAC_SPEED_50G:
2607 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2608 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_50G);
2610 case HCLGE_MAC_SPEED_100G:
2611 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2612 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100G);
2614 case HCLGE_MAC_SPEED_200G:
2615 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2616 HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_200G);
2619 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2623 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2626 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2628 dev_err(&hdev->pdev->dev,
2629 "mac speed/duplex config cmd failed %d.\n", ret);
2636 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2638 struct hclge_mac *mac = &hdev->hw.mac;
2641 duplex = hclge_check_speed_dup(duplex, speed);
2642 if (!mac->support_autoneg && mac->speed == speed &&
2643 mac->duplex == duplex)
2646 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2650 hdev->hw.mac.speed = speed;
2651 hdev->hw.mac.duplex = duplex;
2656 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2659 struct hclge_vport *vport = hclge_get_vport(handle);
2660 struct hclge_dev *hdev = vport->back;
2662 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2665 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2667 struct hclge_config_auto_neg_cmd *req;
2668 struct hclge_desc desc;
2672 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2674 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2676 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2677 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2679 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2681 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2687 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2689 struct hclge_vport *vport = hclge_get_vport(handle);
2690 struct hclge_dev *hdev = vport->back;
2692 if (!hdev->hw.mac.support_autoneg) {
2694 dev_err(&hdev->pdev->dev,
2695 "autoneg is not supported by current port\n");
2702 return hclge_set_autoneg_en(hdev, enable);
2705 static int hclge_get_autoneg(struct hnae3_handle *handle)
2707 struct hclge_vport *vport = hclge_get_vport(handle);
2708 struct hclge_dev *hdev = vport->back;
2709 struct phy_device *phydev = hdev->hw.mac.phydev;
2712 return phydev->autoneg;
2714 return hdev->hw.mac.autoneg;
2717 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2719 struct hclge_vport *vport = hclge_get_vport(handle);
2720 struct hclge_dev *hdev = vport->back;
2723 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2725 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2728 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2731 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2733 struct hclge_vport *vport = hclge_get_vport(handle);
2734 struct hclge_dev *hdev = vport->back;
2736 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2737 return hclge_set_autoneg_en(hdev, !halt);
2742 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2744 struct hclge_config_fec_cmd *req;
2745 struct hclge_desc desc;
2748 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2750 req = (struct hclge_config_fec_cmd *)desc.data;
2751 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2752 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2753 if (fec_mode & BIT(HNAE3_FEC_RS))
2754 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2755 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2756 if (fec_mode & BIT(HNAE3_FEC_BASER))
2757 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2758 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2760 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2762 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2767 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2769 struct hclge_vport *vport = hclge_get_vport(handle);
2770 struct hclge_dev *hdev = vport->back;
2771 struct hclge_mac *mac = &hdev->hw.mac;
2774 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2775 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2779 ret = hclge_set_fec_hw(hdev, fec_mode);
2783 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2787 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2790 struct hclge_vport *vport = hclge_get_vport(handle);
2791 struct hclge_dev *hdev = vport->back;
2792 struct hclge_mac *mac = &hdev->hw.mac;
2795 *fec_ability = mac->fec_ability;
2797 *fec_mode = mac->fec_mode;
2800 static int hclge_mac_init(struct hclge_dev *hdev)
2802 struct hclge_mac *mac = &hdev->hw.mac;
2805 hdev->support_sfp_query = true;
2806 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2807 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2808 hdev->hw.mac.duplex);
2812 if (hdev->hw.mac.support_autoneg) {
2813 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2820 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2821 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2826 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2828 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2832 ret = hclge_set_default_loopback(hdev);
2836 ret = hclge_buffer_alloc(hdev);
2838 dev_err(&hdev->pdev->dev,
2839 "allocate buffer fail, ret=%d\n", ret);
2844 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2846 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2847 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2848 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2849 hclge_wq, &hdev->service_task, 0);
2852 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2854 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2855 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2856 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2857 hclge_wq, &hdev->service_task, 0);
2860 static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
2862 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2863 !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
2864 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2865 hclge_wq, &hdev->service_task, 0);
2868 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2870 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2871 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2872 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2873 hclge_wq, &hdev->service_task,
2877 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2879 struct hclge_link_status_cmd *req;
2880 struct hclge_desc desc;
2883 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2884 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2886 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2891 req = (struct hclge_link_status_cmd *)desc.data;
2892 *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2893 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2898 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2900 struct phy_device *phydev = hdev->hw.mac.phydev;
2902 *link_status = HCLGE_LINK_STATUS_DOWN;
2904 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2907 if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2910 return hclge_get_mac_link_status(hdev, link_status);
2913 static void hclge_push_link_status(struct hclge_dev *hdev)
2915 struct hclge_vport *vport;
2919 for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
2920 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
2922 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
2923 vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
2926 ret = hclge_push_vf_link_status(vport);
2928 dev_err(&hdev->pdev->dev,
2929 "failed to push link status to vf%u, ret = %d\n",
2935 static void hclge_update_link_status(struct hclge_dev *hdev)
2937 struct hnae3_handle *rhandle = &hdev->vport[0].roce;
2938 struct hnae3_handle *handle = &hdev->vport[0].nic;
2939 struct hnae3_client *rclient = hdev->roce_client;
2940 struct hnae3_client *client = hdev->nic_client;
2947 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2950 ret = hclge_get_mac_phy_link(hdev, &state);
2952 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2956 if (state != hdev->hw.mac.link) {
2957 hdev->hw.mac.link = state;
2958 client->ops->link_status_change(handle, state);
2959 hclge_config_mac_tnl_int(hdev, state);
2960 if (rclient && rclient->ops->link_status_change)
2961 rclient->ops->link_status_change(rhandle, state);
2963 hclge_push_link_status(hdev);
2966 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2969 static void hclge_update_port_capability(struct hclge_dev *hdev,
2970 struct hclge_mac *mac)
2972 if (hnae3_dev_fec_supported(hdev))
2973 /* update fec ability by speed */
2974 hclge_convert_setting_fec(mac);
2976 /* firmware can not identify back plane type, the media type
2977 * read from configuration can help deal it
2979 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2980 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2981 mac->module_type = HNAE3_MODULE_TYPE_KR;
2982 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2983 mac->module_type = HNAE3_MODULE_TYPE_TP;
2985 if (mac->support_autoneg) {
2986 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2987 linkmode_copy(mac->advertising, mac->supported);
2989 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2991 linkmode_zero(mac->advertising);
2995 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2997 struct hclge_sfp_info_cmd *resp;
2998 struct hclge_desc desc;
3001 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3002 resp = (struct hclge_sfp_info_cmd *)desc.data;
3003 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3004 if (ret == -EOPNOTSUPP) {
3005 dev_warn(&hdev->pdev->dev,
3006 "IMP do not support get SFP speed %d\n", ret);
3009 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
3013 *speed = le32_to_cpu(resp->speed);
3018 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
3020 struct hclge_sfp_info_cmd *resp;
3021 struct hclge_desc desc;
3024 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3025 resp = (struct hclge_sfp_info_cmd *)desc.data;
3027 resp->query_type = QUERY_ACTIVE_SPEED;
3029 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3030 if (ret == -EOPNOTSUPP) {
3031 dev_warn(&hdev->pdev->dev,
3032 "IMP does not support get SFP info %d\n", ret);
3035 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
3039 /* In some case, mac speed get from IMP may be 0, it shouldn't be
3040 * set to mac->speed.
3042 if (!le32_to_cpu(resp->speed))
3045 mac->speed = le32_to_cpu(resp->speed);
3046 /* if resp->speed_ability is 0, it means it's an old version
3047 * firmware, do not update these params
3049 if (resp->speed_ability) {
3050 mac->module_type = le32_to_cpu(resp->module_type);
3051 mac->speed_ability = le32_to_cpu(resp->speed_ability);
3052 mac->autoneg = resp->autoneg;
3053 mac->support_autoneg = resp->autoneg_ability;
3054 mac->speed_type = QUERY_ACTIVE_SPEED;
3055 if (!resp->active_fec)
3058 mac->fec_mode = BIT(resp->active_fec);
3060 mac->speed_type = QUERY_SFP_SPEED;
3066 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3067 struct ethtool_link_ksettings *cmd)
3069 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3070 struct hclge_vport *vport = hclge_get_vport(handle);
3071 struct hclge_phy_link_ksetting_0_cmd *req0;
3072 struct hclge_phy_link_ksetting_1_cmd *req1;
3073 u32 supported, advertising, lp_advertising;
3074 struct hclge_dev *hdev = vport->back;
3077 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3079 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3080 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3083 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3085 dev_err(&hdev->pdev->dev,
3086 "failed to get phy link ksetting, ret = %d.\n", ret);
3090 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3091 cmd->base.autoneg = req0->autoneg;
3092 cmd->base.speed = le32_to_cpu(req0->speed);
3093 cmd->base.duplex = req0->duplex;
3094 cmd->base.port = req0->port;
3095 cmd->base.transceiver = req0->transceiver;
3096 cmd->base.phy_address = req0->phy_address;
3097 cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3098 cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3099 supported = le32_to_cpu(req0->supported);
3100 advertising = le32_to_cpu(req0->advertising);
3101 lp_advertising = le32_to_cpu(req0->lp_advertising);
3102 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3104 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3106 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3109 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3110 cmd->base.master_slave_cfg = req1->master_slave_cfg;
3111 cmd->base.master_slave_state = req1->master_slave_state;
3117 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3118 const struct ethtool_link_ksettings *cmd)
3120 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3121 struct hclge_vport *vport = hclge_get_vport(handle);
3122 struct hclge_phy_link_ksetting_0_cmd *req0;
3123 struct hclge_phy_link_ksetting_1_cmd *req1;
3124 struct hclge_dev *hdev = vport->back;
3128 if (cmd->base.autoneg == AUTONEG_DISABLE &&
3129 ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3130 (cmd->base.duplex != DUPLEX_HALF &&
3131 cmd->base.duplex != DUPLEX_FULL)))
3134 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3136 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
3137 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3140 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3141 req0->autoneg = cmd->base.autoneg;
3142 req0->speed = cpu_to_le32(cmd->base.speed);
3143 req0->duplex = cmd->base.duplex;
3144 ethtool_convert_link_mode_to_legacy_u32(&advertising,
3145 cmd->link_modes.advertising);
3146 req0->advertising = cpu_to_le32(advertising);
3147 req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3149 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3150 req1->master_slave_cfg = cmd->base.master_slave_cfg;
3152 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3154 dev_err(&hdev->pdev->dev,
3155 "failed to set phy link ksettings, ret = %d.\n", ret);
3159 hdev->hw.mac.autoneg = cmd->base.autoneg;
3160 hdev->hw.mac.speed = cmd->base.speed;
3161 hdev->hw.mac.duplex = cmd->base.duplex;
3162 linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3167 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3169 struct ethtool_link_ksettings cmd;
3172 if (!hnae3_dev_phy_imp_supported(hdev))
3175 ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3179 hdev->hw.mac.autoneg = cmd.base.autoneg;
3180 hdev->hw.mac.speed = cmd.base.speed;
3181 hdev->hw.mac.duplex = cmd.base.duplex;
3186 static int hclge_tp_port_init(struct hclge_dev *hdev)
3188 struct ethtool_link_ksettings cmd;
3190 if (!hnae3_dev_phy_imp_supported(hdev))
3193 cmd.base.autoneg = hdev->hw.mac.autoneg;
3194 cmd.base.speed = hdev->hw.mac.speed;
3195 cmd.base.duplex = hdev->hw.mac.duplex;
3196 linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3198 return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3201 static int hclge_update_port_info(struct hclge_dev *hdev)
3203 struct hclge_mac *mac = &hdev->hw.mac;
3204 int speed = HCLGE_MAC_SPEED_UNKNOWN;
3207 /* get the port info from SFP cmd if not copper port */
3208 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3209 return hclge_update_tp_port_info(hdev);
3211 /* if IMP does not support get SFP/qSFP info, return directly */
3212 if (!hdev->support_sfp_query)
3215 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
3216 ret = hclge_get_sfp_info(hdev, mac);
3218 ret = hclge_get_sfp_speed(hdev, &speed);
3220 if (ret == -EOPNOTSUPP) {
3221 hdev->support_sfp_query = false;
3227 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3228 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3229 hclge_update_port_capability(hdev, mac);
3232 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3235 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3236 return 0; /* do nothing if no SFP */
3238 /* must config full duplex for SFP */
3239 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3243 static int hclge_get_status(struct hnae3_handle *handle)
3245 struct hclge_vport *vport = hclge_get_vport(handle);
3246 struct hclge_dev *hdev = vport->back;
3248 hclge_update_link_status(hdev);
3250 return hdev->hw.mac.link;
3253 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3255 if (!pci_num_vf(hdev->pdev)) {
3256 dev_err(&hdev->pdev->dev,
3257 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3261 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3262 dev_err(&hdev->pdev->dev,
3263 "vf id(%d) is out of range(0 <= vfid < %d)\n",
3264 vf, pci_num_vf(hdev->pdev));
3268 /* VF start from 1 in vport */
3269 vf += HCLGE_VF_VPORT_START_NUM;
3270 return &hdev->vport[vf];
3273 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3274 struct ifla_vf_info *ivf)
3276 struct hclge_vport *vport = hclge_get_vport(handle);
3277 struct hclge_dev *hdev = vport->back;
3279 vport = hclge_get_vf_vport(hdev, vf);
3284 ivf->linkstate = vport->vf_info.link_state;
3285 ivf->spoofchk = vport->vf_info.spoofchk;
3286 ivf->trusted = vport->vf_info.trusted;
3287 ivf->min_tx_rate = 0;
3288 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3289 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3290 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3291 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3292 ether_addr_copy(ivf->mac, vport->vf_info.mac);
3297 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3300 struct hclge_vport *vport = hclge_get_vport(handle);
3301 struct hclge_dev *hdev = vport->back;
3305 vport = hclge_get_vf_vport(hdev, vf);
3309 link_state_old = vport->vf_info.link_state;
3310 vport->vf_info.link_state = link_state;
3312 ret = hclge_push_vf_link_status(vport);
3314 vport->vf_info.link_state = link_state_old;
3315 dev_err(&hdev->pdev->dev,
3316 "failed to push vf%d link status, ret = %d\n", vf, ret);
3322 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3324 u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
3326 /* fetch the events from their corresponding regs */
3327 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3328 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3329 hw_err_src_reg = hclge_read_dev(&hdev->hw,
3330 HCLGE_RAS_PF_OTHER_INT_STS_REG);
3332 /* Assumption: If by any chance reset and mailbox events are reported
3333 * together then we will only process reset event in this go and will
3334 * defer the processing of the mailbox events. Since, we would have not
3335 * cleared RX CMDQ event this time we would receive again another
3336 * interrupt from H/W just for the mailbox.
3338 * check for vector0 reset event sources
3340 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3341 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3342 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3343 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3344 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3345 hdev->rst_stats.imp_rst_cnt++;
3346 return HCLGE_VECTOR0_EVENT_RST;
3349 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3350 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3351 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3352 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3353 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3354 hdev->rst_stats.global_rst_cnt++;
3355 return HCLGE_VECTOR0_EVENT_RST;
3358 /* check for vector0 msix event and hardware error event source */
3359 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK ||
3360 hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK)
3361 return HCLGE_VECTOR0_EVENT_ERR;
3363 /* check for vector0 ptp event source */
3364 if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) {
3365 *clearval = msix_src_reg;
3366 return HCLGE_VECTOR0_EVENT_PTP;
3369 /* check for vector0 mailbox(=CMDQ RX) event source */
3370 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3371 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3372 *clearval = cmdq_src_reg;
3373 return HCLGE_VECTOR0_EVENT_MBX;
3376 /* print other vector0 event source */
3377 dev_info(&hdev->pdev->dev,
3378 "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n",
3379 cmdq_src_reg, hw_err_src_reg, msix_src_reg);
3381 return HCLGE_VECTOR0_EVENT_OTHER;
3384 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3387 switch (event_type) {
3388 case HCLGE_VECTOR0_EVENT_PTP:
3389 case HCLGE_VECTOR0_EVENT_RST:
3390 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3392 case HCLGE_VECTOR0_EVENT_MBX:
3393 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3400 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3402 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3403 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3404 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3405 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3406 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3409 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3411 writel(enable ? 1 : 0, vector->addr);
3414 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3416 struct hclge_dev *hdev = data;
3417 unsigned long flags;
3421 hclge_enable_vector(&hdev->misc_vector, false);
3422 event_cause = hclge_check_event_cause(hdev, &clearval);
3424 /* vector 0 interrupt is shared with reset and mailbox source events. */
3425 switch (event_cause) {
3426 case HCLGE_VECTOR0_EVENT_ERR:
3427 hclge_errhand_task_schedule(hdev);
3429 case HCLGE_VECTOR0_EVENT_RST:
3430 hclge_reset_task_schedule(hdev);
3432 case HCLGE_VECTOR0_EVENT_PTP:
3433 spin_lock_irqsave(&hdev->ptp->lock, flags);
3434 hclge_ptp_clean_tx_hwts(hdev);
3435 spin_unlock_irqrestore(&hdev->ptp->lock, flags);
3437 case HCLGE_VECTOR0_EVENT_MBX:
3438 /* If we are here then,
3439 * 1. Either we are not handling any mbx task and we are not
3442 * 2. We could be handling a mbx task but nothing more is
3444 * In both cases, we should schedule mbx task as there are more
3445 * mbx messages reported by this interrupt.
3447 hclge_mbx_task_schedule(hdev);
3450 dev_warn(&hdev->pdev->dev,
3451 "received unknown or unhandled event of vector0\n");
3455 hclge_clear_event_cause(hdev, event_cause, clearval);
3457 /* Enable interrupt if it is not caused by reset event or error event */
3458 if (event_cause == HCLGE_VECTOR0_EVENT_PTP ||
3459 event_cause == HCLGE_VECTOR0_EVENT_MBX ||
3460 event_cause == HCLGE_VECTOR0_EVENT_OTHER)
3461 hclge_enable_vector(&hdev->misc_vector, true);
3466 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3468 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3469 dev_warn(&hdev->pdev->dev,
3470 "vector(vector_id %d) has been freed.\n", vector_id);
3474 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3475 hdev->num_msi_left += 1;
3476 hdev->num_msi_used -= 1;
3479 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3481 struct hclge_misc_vector *vector = &hdev->misc_vector;
3483 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3485 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3486 hdev->vector_status[0] = 0;
3488 hdev->num_msi_left -= 1;
3489 hdev->num_msi_used += 1;
3492 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3493 const cpumask_t *mask)
3495 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3498 cpumask_copy(&hdev->affinity_mask, mask);
3501 static void hclge_irq_affinity_release(struct kref *ref)
3505 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3507 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3508 &hdev->affinity_mask);
3510 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3511 hdev->affinity_notify.release = hclge_irq_affinity_release;
3512 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3513 &hdev->affinity_notify);
3516 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3518 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3519 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3522 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3526 hclge_get_misc_vector(hdev);
3528 /* this would be explicitly freed in the end */
3529 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3530 HCLGE_NAME, pci_name(hdev->pdev));
3531 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3532 0, hdev->misc_vector.name, hdev);
3534 hclge_free_vector(hdev, 0);
3535 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3536 hdev->misc_vector.vector_irq);
3542 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3544 free_irq(hdev->misc_vector.vector_irq, hdev);
3545 hclge_free_vector(hdev, 0);
3548 int hclge_notify_client(struct hclge_dev *hdev,
3549 enum hnae3_reset_notify_type type)
3551 struct hnae3_handle *handle = &hdev->vport[0].nic;
3552 struct hnae3_client *client = hdev->nic_client;
3555 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3558 if (!client->ops->reset_notify)
3561 ret = client->ops->reset_notify(handle, type);
3563 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3569 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3570 enum hnae3_reset_notify_type type)
3572 struct hnae3_handle *handle = &hdev->vport[0].roce;
3573 struct hnae3_client *client = hdev->roce_client;
3576 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3579 if (!client->ops->reset_notify)
3582 ret = client->ops->reset_notify(handle, type);
3584 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3590 static int hclge_reset_wait(struct hclge_dev *hdev)
3592 #define HCLGE_RESET_WATI_MS 100
3593 #define HCLGE_RESET_WAIT_CNT 350
3595 u32 val, reg, reg_bit;
3598 switch (hdev->reset_type) {
3599 case HNAE3_IMP_RESET:
3600 reg = HCLGE_GLOBAL_RESET_REG;
3601 reg_bit = HCLGE_IMP_RESET_BIT;
3603 case HNAE3_GLOBAL_RESET:
3604 reg = HCLGE_GLOBAL_RESET_REG;
3605 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3607 case HNAE3_FUNC_RESET:
3608 reg = HCLGE_FUN_RST_ING;
3609 reg_bit = HCLGE_FUN_RST_ING_B;
3612 dev_err(&hdev->pdev->dev,
3613 "Wait for unsupported reset type: %d\n",
3618 val = hclge_read_dev(&hdev->hw, reg);
3619 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3620 msleep(HCLGE_RESET_WATI_MS);
3621 val = hclge_read_dev(&hdev->hw, reg);
3625 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3626 dev_warn(&hdev->pdev->dev,
3627 "Wait for reset timeout: %d\n", hdev->reset_type);
3634 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3636 struct hclge_vf_rst_cmd *req;
3637 struct hclge_desc desc;
3639 req = (struct hclge_vf_rst_cmd *)desc.data;
3640 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3641 req->dest_vfid = func_id;
3646 return hclge_cmd_send(&hdev->hw, &desc, 1);
3649 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3653 for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3654 struct hclge_vport *vport = &hdev->vport[i];
3657 /* Send cmd to set/clear VF's FUNC_RST_ING */
3658 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3660 dev_err(&hdev->pdev->dev,
3661 "set vf(%u) rst failed %d!\n",
3662 vport->vport_id, ret);
3666 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3669 /* Inform VF to process the reset.
3670 * hclge_inform_reset_assert_to_vf may fail if VF
3671 * driver is not loaded.
3673 ret = hclge_inform_reset_assert_to_vf(vport);
3675 dev_warn(&hdev->pdev->dev,
3676 "inform reset to vf(%u) failed %d!\n",
3677 vport->vport_id, ret);
3683 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3685 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3686 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3687 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3690 hclge_mbx_handler(hdev);
3692 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3695 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3697 struct hclge_pf_rst_sync_cmd *req;
3698 struct hclge_desc desc;
3702 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3703 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3706 /* vf need to down netdev by mbx during PF or FLR reset */
3707 hclge_mailbox_service_task(hdev);
3709 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3710 /* for compatible with old firmware, wait
3711 * 100 ms for VF to stop IO
3713 if (ret == -EOPNOTSUPP) {
3714 msleep(HCLGE_RESET_SYNC_TIME);
3717 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3720 } else if (req->all_vf_ready) {
3723 msleep(HCLGE_PF_RESET_SYNC_TIME);
3724 hclge_cmd_reuse_desc(&desc, true);
3725 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3727 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3730 void hclge_report_hw_error(struct hclge_dev *hdev,
3731 enum hnae3_hw_error_type type)
3733 struct hnae3_client *client = hdev->nic_client;
3735 if (!client || !client->ops->process_hw_error ||
3736 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3739 client->ops->process_hw_error(&hdev->vport[0].nic, type);
3742 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3746 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3747 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3748 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3749 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3750 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3753 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3754 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3755 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3756 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3760 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3762 struct hclge_desc desc;
3763 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3766 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3767 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3768 req->fun_reset_vfid = func_id;
3770 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3772 dev_err(&hdev->pdev->dev,
3773 "send function reset cmd fail, status =%d\n", ret);
3778 static void hclge_do_reset(struct hclge_dev *hdev)
3780 struct hnae3_handle *handle = &hdev->vport[0].nic;
3781 struct pci_dev *pdev = hdev->pdev;
3784 if (hclge_get_hw_reset_stat(handle)) {
3785 dev_info(&pdev->dev, "hardware reset not finish\n");
3786 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3787 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3788 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3792 switch (hdev->reset_type) {
3793 case HNAE3_IMP_RESET:
3794 dev_info(&pdev->dev, "IMP reset requested\n");
3795 val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3796 hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1);
3797 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val);
3799 case HNAE3_GLOBAL_RESET:
3800 dev_info(&pdev->dev, "global reset requested\n");
3801 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3802 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3803 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3805 case HNAE3_FUNC_RESET:
3806 dev_info(&pdev->dev, "PF reset requested\n");
3807 /* schedule again to check later */
3808 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3809 hclge_reset_task_schedule(hdev);
3812 dev_warn(&pdev->dev,
3813 "unsupported reset type: %d\n", hdev->reset_type);
3818 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3819 unsigned long *addr)
3821 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3822 struct hclge_dev *hdev = ae_dev->priv;
3824 /* return the highest priority reset level amongst all */
3825 if (test_bit(HNAE3_IMP_RESET, addr)) {
3826 rst_level = HNAE3_IMP_RESET;
3827 clear_bit(HNAE3_IMP_RESET, addr);
3828 clear_bit(HNAE3_GLOBAL_RESET, addr);
3829 clear_bit(HNAE3_FUNC_RESET, addr);
3830 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3831 rst_level = HNAE3_GLOBAL_RESET;
3832 clear_bit(HNAE3_GLOBAL_RESET, addr);
3833 clear_bit(HNAE3_FUNC_RESET, addr);
3834 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3835 rst_level = HNAE3_FUNC_RESET;
3836 clear_bit(HNAE3_FUNC_RESET, addr);
3837 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3838 rst_level = HNAE3_FLR_RESET;
3839 clear_bit(HNAE3_FLR_RESET, addr);
3842 if (hdev->reset_type != HNAE3_NONE_RESET &&
3843 rst_level < hdev->reset_type)
3844 return HNAE3_NONE_RESET;
3849 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3853 switch (hdev->reset_type) {
3854 case HNAE3_IMP_RESET:
3855 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3857 case HNAE3_GLOBAL_RESET:
3858 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3867 /* For revision 0x20, the reset interrupt source
3868 * can only be cleared after hardware reset done
3870 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3871 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3874 hclge_enable_vector(&hdev->misc_vector, true);
3877 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3881 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3883 reg_val |= HCLGE_NIC_SW_RST_RDY;
3885 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3887 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3890 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3894 ret = hclge_set_all_vf_rst(hdev, true);
3898 hclge_func_reset_sync_vf(hdev);
3903 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3908 switch (hdev->reset_type) {
3909 case HNAE3_FUNC_RESET:
3910 ret = hclge_func_reset_notify_vf(hdev);
3914 ret = hclge_func_reset_cmd(hdev, 0);
3916 dev_err(&hdev->pdev->dev,
3917 "asserting function reset fail %d!\n", ret);
3921 /* After performaning pf reset, it is not necessary to do the
3922 * mailbox handling or send any command to firmware, because
3923 * any mailbox handling or command to firmware is only valid
3924 * after hclge_cmd_init is called.
3926 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3927 hdev->rst_stats.pf_rst_cnt++;
3929 case HNAE3_FLR_RESET:
3930 ret = hclge_func_reset_notify_vf(hdev);
3934 case HNAE3_IMP_RESET:
3935 hclge_handle_imp_error(hdev);
3936 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3937 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3938 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3944 /* inform hardware that preparatory work is done */
3945 msleep(HCLGE_RESET_SYNC_TIME);
3946 hclge_reset_handshake(hdev, true);
3947 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3952 static void hclge_show_rst_info(struct hclge_dev *hdev)
3956 buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL);
3960 hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
3962 dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
3967 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3969 #define MAX_RESET_FAIL_CNT 5
3971 if (hdev->reset_pending) {
3972 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3973 hdev->reset_pending);
3975 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3976 HCLGE_RESET_INT_M) {
3977 dev_info(&hdev->pdev->dev,
3978 "reset failed because new reset interrupt\n");
3979 hclge_clear_reset_cause(hdev);
3981 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3982 hdev->rst_stats.reset_fail_cnt++;
3983 set_bit(hdev->reset_type, &hdev->reset_pending);
3984 dev_info(&hdev->pdev->dev,
3985 "re-schedule reset task(%u)\n",
3986 hdev->rst_stats.reset_fail_cnt);
3990 hclge_clear_reset_cause(hdev);
3992 /* recover the handshake status when reset fail */
3993 hclge_reset_handshake(hdev, true);
3995 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3997 hclge_show_rst_info(hdev);
3999 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4004 static void hclge_update_reset_level(struct hclge_dev *hdev)
4006 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4007 enum hnae3_reset_type reset_level;
4009 /* reset request will not be set during reset, so clear
4010 * pending reset request to avoid unnecessary reset
4011 * caused by the same reason.
4013 hclge_get_reset_level(ae_dev, &hdev->reset_request);
4015 /* if default_reset_request has a higher level reset request,
4016 * it should be handled as soon as possible. since some errors
4017 * need this kind of reset to fix.
4019 reset_level = hclge_get_reset_level(ae_dev,
4020 &hdev->default_reset_request);
4021 if (reset_level != HNAE3_NONE_RESET)
4022 set_bit(reset_level, &hdev->reset_request);
4025 static int hclge_set_rst_done(struct hclge_dev *hdev)
4027 struct hclge_pf_rst_done_cmd *req;
4028 struct hclge_desc desc;
4031 req = (struct hclge_pf_rst_done_cmd *)desc.data;
4032 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
4033 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
4035 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4036 /* To be compatible with the old firmware, which does not support
4037 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
4040 if (ret == -EOPNOTSUPP) {
4041 dev_warn(&hdev->pdev->dev,
4042 "current firmware does not support command(0x%x)!\n",
4043 HCLGE_OPC_PF_RST_DONE);
4046 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
4053 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
4057 switch (hdev->reset_type) {
4058 case HNAE3_FUNC_RESET:
4059 case HNAE3_FLR_RESET:
4060 ret = hclge_set_all_vf_rst(hdev, false);
4062 case HNAE3_GLOBAL_RESET:
4063 case HNAE3_IMP_RESET:
4064 ret = hclge_set_rst_done(hdev);
4070 /* clear up the handshake status after re-initialize done */
4071 hclge_reset_handshake(hdev, false);
4076 static int hclge_reset_stack(struct hclge_dev *hdev)
4080 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4084 ret = hclge_reset_ae_dev(hdev->ae_dev);
4088 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4091 static int hclge_reset_prepare(struct hclge_dev *hdev)
4095 hdev->rst_stats.reset_cnt++;
4096 /* perform reset of the stack & ae device for a client */
4097 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4102 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4107 return hclge_reset_prepare_wait(hdev);
4110 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4114 hdev->rst_stats.hw_reset_done_cnt++;
4116 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4121 ret = hclge_reset_stack(hdev);
4126 hclge_clear_reset_cause(hdev);
4128 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4129 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4133 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4136 ret = hclge_reset_prepare_up(hdev);
4141 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4146 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4150 hdev->last_reset_time = jiffies;
4151 hdev->rst_stats.reset_fail_cnt = 0;
4152 hdev->rst_stats.reset_done_cnt++;
4153 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4155 hclge_update_reset_level(hdev);
4160 static void hclge_reset(struct hclge_dev *hdev)
4162 if (hclge_reset_prepare(hdev))
4165 if (hclge_reset_wait(hdev))
4168 if (hclge_reset_rebuild(hdev))
4174 if (hclge_reset_err_handle(hdev))
4175 hclge_reset_task_schedule(hdev);
4178 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4180 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4181 struct hclge_dev *hdev = ae_dev->priv;
4183 /* We might end up getting called broadly because of 2 below cases:
4184 * 1. Recoverable error was conveyed through APEI and only way to bring
4185 * normalcy is to reset.
4186 * 2. A new reset request from the stack due to timeout
4188 * check if this is a new reset request and we are not here just because
4189 * last reset attempt did not succeed and watchdog hit us again. We will
4190 * know this if last reset request did not occur very recently (watchdog
4191 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4192 * In case of new request we reset the "reset level" to PF reset.
4193 * And if it is a repeat reset request of the most recent one then we
4194 * want to make sure we throttle the reset request. Therefore, we will
4195 * not allow it again before 3*HZ times.
4198 if (time_before(jiffies, (hdev->last_reset_time +
4199 HCLGE_RESET_INTERVAL))) {
4200 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4204 if (hdev->default_reset_request) {
4206 hclge_get_reset_level(ae_dev,
4207 &hdev->default_reset_request);
4208 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4209 hdev->reset_level = HNAE3_FUNC_RESET;
4212 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4215 /* request reset & schedule reset task */
4216 set_bit(hdev->reset_level, &hdev->reset_request);
4217 hclge_reset_task_schedule(hdev);
4219 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4220 hdev->reset_level++;
4223 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4224 enum hnae3_reset_type rst_type)
4226 struct hclge_dev *hdev = ae_dev->priv;
4228 set_bit(rst_type, &hdev->default_reset_request);
4231 static void hclge_reset_timer(struct timer_list *t)
4233 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4235 /* if default_reset_request has no value, it means that this reset
4236 * request has already be handled, so just return here
4238 if (!hdev->default_reset_request)
4241 dev_info(&hdev->pdev->dev,
4242 "triggering reset in reset timer\n");
4243 hclge_reset_event(hdev->pdev, NULL);
4246 static void hclge_reset_subtask(struct hclge_dev *hdev)
4248 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4250 /* check if there is any ongoing reset in the hardware. This status can
4251 * be checked from reset_pending. If there is then, we need to wait for
4252 * hardware to complete reset.
4253 * a. If we are able to figure out in reasonable time that hardware
4254 * has fully resetted then, we can proceed with driver, client
4256 * b. else, we can come back later to check this status so re-sched
4259 hdev->last_reset_time = jiffies;
4260 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4261 if (hdev->reset_type != HNAE3_NONE_RESET)
4264 /* check if we got any *new* reset requests to be honored */
4265 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4266 if (hdev->reset_type != HNAE3_NONE_RESET)
4267 hclge_do_reset(hdev);
4269 hdev->reset_type = HNAE3_NONE_RESET;
4272 static void hclge_handle_err_reset_request(struct hclge_dev *hdev)
4274 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4275 enum hnae3_reset_type reset_type;
4277 if (ae_dev->hw_err_reset_req) {
4278 reset_type = hclge_get_reset_level(ae_dev,
4279 &ae_dev->hw_err_reset_req);
4280 hclge_set_def_reset_request(ae_dev, reset_type);
4283 if (hdev->default_reset_request && ae_dev->ops->reset_event)
4284 ae_dev->ops->reset_event(hdev->pdev, NULL);
4286 /* enable interrupt after error handling complete */
4287 hclge_enable_vector(&hdev->misc_vector, true);
4290 static void hclge_handle_err_recovery(struct hclge_dev *hdev)
4292 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4294 ae_dev->hw_err_reset_req = 0;
4296 if (hclge_find_error_source(hdev)) {
4297 hclge_handle_error_info_log(ae_dev);
4298 hclge_handle_mac_tnl(hdev);
4301 hclge_handle_err_reset_request(hdev);
4304 static void hclge_misc_err_recovery(struct hclge_dev *hdev)
4306 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4307 struct device *dev = &hdev->pdev->dev;
4310 msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
4311 if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
4312 if (hclge_handle_hw_msix_error
4313 (hdev, &hdev->default_reset_request))
4314 dev_info(dev, "received msix interrupt 0x%x\n",
4318 hclge_handle_hw_ras_error(ae_dev);
4320 hclge_handle_err_reset_request(hdev);
4323 static void hclge_errhand_service_task(struct hclge_dev *hdev)
4325 if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
4328 if (hnae3_dev_ras_imp_supported(hdev))
4329 hclge_handle_err_recovery(hdev);
4331 hclge_misc_err_recovery(hdev);
4334 static void hclge_reset_service_task(struct hclge_dev *hdev)
4336 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4339 down(&hdev->reset_sem);
4340 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4342 hclge_reset_subtask(hdev);
4344 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4345 up(&hdev->reset_sem);
4348 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4352 /* start from vport 1 for PF is always alive */
4353 for (i = 1; i < hdev->num_alloc_vport; i++) {
4354 struct hclge_vport *vport = &hdev->vport[i];
4356 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4357 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4359 /* If vf is not alive, set to default value */
4360 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4361 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4365 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4367 unsigned long delta = round_jiffies_relative(HZ);
4369 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4372 /* Always handle the link updating to make sure link state is
4373 * updated when it is triggered by mbx.
4375 hclge_update_link_status(hdev);
4376 hclge_sync_mac_table(hdev);
4377 hclge_sync_promisc_mode(hdev);
4378 hclge_sync_fd_table(hdev);
4380 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4381 delta = jiffies - hdev->last_serv_processed;
4383 if (delta < round_jiffies_relative(HZ)) {
4384 delta = round_jiffies_relative(HZ) - delta;
4389 hdev->serv_processed_cnt++;
4390 hclge_update_vport_alive(hdev);
4392 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4393 hdev->last_serv_processed = jiffies;
4397 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4398 hclge_update_stats_for_all(hdev);
4400 hclge_update_port_info(hdev);
4401 hclge_sync_vlan_filter(hdev);
4403 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4404 hclge_rfs_filter_expire(hdev);
4406 hdev->last_serv_processed = jiffies;
4409 hclge_task_schedule(hdev, delta);
4412 static void hclge_ptp_service_task(struct hclge_dev *hdev)
4414 unsigned long flags;
4416 if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) ||
4417 !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) ||
4418 !time_is_before_jiffies(hdev->ptp->tx_start + HZ))
4421 /* to prevent concurrence with the irq handler */
4422 spin_lock_irqsave(&hdev->ptp->lock, flags);
4424 /* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq
4425 * handler may handle it just before spin_lock_irqsave().
4427 if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state))
4428 hclge_ptp_clean_tx_hwts(hdev);
4430 spin_unlock_irqrestore(&hdev->ptp->lock, flags);
4433 static void hclge_service_task(struct work_struct *work)
4435 struct hclge_dev *hdev =
4436 container_of(work, struct hclge_dev, service_task.work);
4438 hclge_errhand_service_task(hdev);
4439 hclge_reset_service_task(hdev);
4440 hclge_ptp_service_task(hdev);
4441 hclge_mailbox_service_task(hdev);
4442 hclge_periodic_service_task(hdev);
4444 /* Handle error recovery, reset and mbx again in case periodical task
4445 * delays the handling by calling hclge_task_schedule() in
4446 * hclge_periodic_service_task().
4448 hclge_errhand_service_task(hdev);
4449 hclge_reset_service_task(hdev);
4450 hclge_mailbox_service_task(hdev);
4453 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4455 /* VF handle has no client */
4456 if (!handle->client)
4457 return container_of(handle, struct hclge_vport, nic);
4458 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4459 return container_of(handle, struct hclge_vport, roce);
4461 return container_of(handle, struct hclge_vport, nic);
4464 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4465 struct hnae3_vector_info *vector_info)
4467 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 64
4469 vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4471 /* need an extend offset to config vector >= 64 */
4472 if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4473 vector_info->io_addr = hdev->hw.io_base +
4474 HCLGE_VECTOR_REG_BASE +
4475 (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4477 vector_info->io_addr = hdev->hw.io_base +
4478 HCLGE_VECTOR_EXT_REG_BASE +
4479 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4480 HCLGE_VECTOR_REG_OFFSET_H +
4481 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4482 HCLGE_VECTOR_REG_OFFSET;
4484 hdev->vector_status[idx] = hdev->vport[0].vport_id;
4485 hdev->vector_irq[idx] = vector_info->vector;
4488 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4489 struct hnae3_vector_info *vector_info)
4491 struct hclge_vport *vport = hclge_get_vport(handle);
4492 struct hnae3_vector_info *vector = vector_info;
4493 struct hclge_dev *hdev = vport->back;
4498 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4499 vector_num = min(hdev->num_msi_left, vector_num);
4501 for (j = 0; j < vector_num; j++) {
4502 while (++i < hdev->num_nic_msi) {
4503 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4504 hclge_get_vector_info(hdev, i, vector);
4512 hdev->num_msi_left -= alloc;
4513 hdev->num_msi_used += alloc;
4518 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4522 for (i = 0; i < hdev->num_msi; i++)
4523 if (vector == hdev->vector_irq[i])
4529 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4531 struct hclge_vport *vport = hclge_get_vport(handle);
4532 struct hclge_dev *hdev = vport->back;
4535 vector_id = hclge_get_vector_index(hdev, vector);
4536 if (vector_id < 0) {
4537 dev_err(&hdev->pdev->dev,
4538 "Get vector index fail. vector = %d\n", vector);
4542 hclge_free_vector(hdev, vector_id);
4547 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4549 return HCLGE_RSS_KEY_SIZE;
4552 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4553 const u8 hfunc, const u8 *key)
4555 struct hclge_rss_config_cmd *req;
4556 unsigned int key_offset = 0;
4557 struct hclge_desc desc;
4562 key_counts = HCLGE_RSS_KEY_SIZE;
4563 req = (struct hclge_rss_config_cmd *)desc.data;
4565 while (key_counts) {
4566 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4569 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4570 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4572 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4573 memcpy(req->hash_key,
4574 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4576 key_counts -= key_size;
4578 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4580 dev_err(&hdev->pdev->dev,
4581 "Configure RSS config fail, status = %d\n",
4589 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
4591 struct hclge_rss_indirection_table_cmd *req;
4592 struct hclge_desc desc;
4593 int rss_cfg_tbl_num;
4601 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4602 rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
4603 HCLGE_RSS_CFG_TBL_SIZE;
4605 for (i = 0; i < rss_cfg_tbl_num; i++) {
4606 hclge_cmd_setup_basic_desc
4607 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4609 req->start_table_index =
4610 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4611 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4612 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
4613 qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4614 req->rss_qid_l[j] = qid & 0xff;
4616 j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
4617 rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
4618 (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
4619 req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
4621 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4623 dev_err(&hdev->pdev->dev,
4624 "Configure rss indir table fail,status = %d\n",
4632 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4633 u16 *tc_size, u16 *tc_offset)
4635 struct hclge_rss_tc_mode_cmd *req;
4636 struct hclge_desc desc;
4640 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4641 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4643 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4646 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4647 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4648 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4649 hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
4650 tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
4651 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4652 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4654 req->rss_tc_mode[i] = cpu_to_le16(mode);
4657 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4659 dev_err(&hdev->pdev->dev,
4660 "Configure rss tc mode fail, status = %d\n", ret);
4665 static void hclge_get_rss_type(struct hclge_vport *vport)
4667 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4668 vport->rss_tuple_sets.ipv4_udp_en ||
4669 vport->rss_tuple_sets.ipv4_sctp_en ||
4670 vport->rss_tuple_sets.ipv6_tcp_en ||
4671 vport->rss_tuple_sets.ipv6_udp_en ||
4672 vport->rss_tuple_sets.ipv6_sctp_en)
4673 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4674 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4675 vport->rss_tuple_sets.ipv6_fragment_en)
4676 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4678 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4681 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4683 struct hclge_rss_input_tuple_cmd *req;
4684 struct hclge_desc desc;
4687 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4689 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4691 /* Get the tuple cfg from pf */
4692 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4693 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4694 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4695 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4696 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4697 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4698 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4699 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4700 hclge_get_rss_type(&hdev->vport[0]);
4701 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4703 dev_err(&hdev->pdev->dev,
4704 "Configure rss input fail, status = %d\n", ret);
4708 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4711 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4712 struct hclge_vport *vport = hclge_get_vport(handle);
4715 /* Get hash algorithm */
4717 switch (vport->rss_algo) {
4718 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4719 *hfunc = ETH_RSS_HASH_TOP;
4721 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4722 *hfunc = ETH_RSS_HASH_XOR;
4725 *hfunc = ETH_RSS_HASH_UNKNOWN;
4730 /* Get the RSS Key required by the user */
4732 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4734 /* Get indirect table */
4736 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4737 indir[i] = vport->rss_indirection_tbl[i];
4742 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4743 const u8 *key, const u8 hfunc)
4745 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4746 struct hclge_vport *vport = hclge_get_vport(handle);
4747 struct hclge_dev *hdev = vport->back;
4751 /* Set the RSS Hash Key if specififed by the user */
4754 case ETH_RSS_HASH_TOP:
4755 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4757 case ETH_RSS_HASH_XOR:
4758 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4760 case ETH_RSS_HASH_NO_CHANGE:
4761 hash_algo = vport->rss_algo;
4767 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4771 /* Update the shadow RSS key with user specified qids */
4772 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4773 vport->rss_algo = hash_algo;
4776 /* Update the shadow RSS table with user specified qids */
4777 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4778 vport->rss_indirection_tbl[i] = indir[i];
4780 /* Update the hardware */
4781 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4784 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4786 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4788 if (nfc->data & RXH_L4_B_2_3)
4789 hash_sets |= HCLGE_D_PORT_BIT;
4791 hash_sets &= ~HCLGE_D_PORT_BIT;
4793 if (nfc->data & RXH_IP_SRC)
4794 hash_sets |= HCLGE_S_IP_BIT;
4796 hash_sets &= ~HCLGE_S_IP_BIT;
4798 if (nfc->data & RXH_IP_DST)
4799 hash_sets |= HCLGE_D_IP_BIT;
4801 hash_sets &= ~HCLGE_D_IP_BIT;
4803 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4804 hash_sets |= HCLGE_V_TAG_BIT;
4809 static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
4810 struct ethtool_rxnfc *nfc,
4811 struct hclge_rss_input_tuple_cmd *req)
4813 struct hclge_dev *hdev = vport->back;
4816 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4817 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4818 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4819 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4820 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4821 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4822 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4823 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4825 tuple_sets = hclge_get_rss_hash_bits(nfc);
4826 switch (nfc->flow_type) {
4828 req->ipv4_tcp_en = tuple_sets;
4831 req->ipv6_tcp_en = tuple_sets;
4834 req->ipv4_udp_en = tuple_sets;
4837 req->ipv6_udp_en = tuple_sets;
4840 req->ipv4_sctp_en = tuple_sets;
4843 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
4844 (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
4847 req->ipv6_sctp_en = tuple_sets;
4850 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4853 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4862 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4863 struct ethtool_rxnfc *nfc)
4865 struct hclge_vport *vport = hclge_get_vport(handle);
4866 struct hclge_dev *hdev = vport->back;
4867 struct hclge_rss_input_tuple_cmd *req;
4868 struct hclge_desc desc;
4871 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4872 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4875 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4876 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4878 ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
4880 dev_err(&hdev->pdev->dev,
4881 "failed to init rss tuple cmd, ret = %d\n", ret);
4885 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4887 dev_err(&hdev->pdev->dev,
4888 "Set rss tuple fail, status = %d\n", ret);
4892 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4893 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4894 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4895 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4896 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4897 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4898 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4899 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4900 hclge_get_rss_type(vport);
4904 static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
4907 switch (flow_type) {
4909 *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4912 *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4915 *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4918 *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4921 *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4924 *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4928 *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4937 static u64 hclge_convert_rss_tuple(u8 tuple_sets)
4941 if (tuple_sets & HCLGE_D_PORT_BIT)
4942 tuple_data |= RXH_L4_B_2_3;
4943 if (tuple_sets & HCLGE_S_PORT_BIT)
4944 tuple_data |= RXH_L4_B_0_1;
4945 if (tuple_sets & HCLGE_D_IP_BIT)
4946 tuple_data |= RXH_IP_DST;
4947 if (tuple_sets & HCLGE_S_IP_BIT)
4948 tuple_data |= RXH_IP_SRC;
4953 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4954 struct ethtool_rxnfc *nfc)
4956 struct hclge_vport *vport = hclge_get_vport(handle);
4962 ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
4963 if (ret || !tuple_sets)
4966 nfc->data = hclge_convert_rss_tuple(tuple_sets);
4971 static int hclge_get_tc_size(struct hnae3_handle *handle)
4973 struct hclge_vport *vport = hclge_get_vport(handle);
4974 struct hclge_dev *hdev = vport->back;
4976 return hdev->pf_rss_size_max;
4979 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4981 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4982 struct hclge_vport *vport = hdev->vport;
4983 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4984 u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4985 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4986 struct hnae3_tc_info *tc_info;
4991 tc_info = &vport->nic.kinfo.tc_info;
4992 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4993 rss_size = tc_info->tqp_count[i];
4996 if (!(hdev->hw_tc_map & BIT(i)))
4999 /* tc_size set to hardware is the log2 of roundup power of two
5000 * of rss_size, the acutal queue size is limited by indirection
5003 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
5005 dev_err(&hdev->pdev->dev,
5006 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
5011 roundup_size = roundup_pow_of_two(rss_size);
5012 roundup_size = ilog2(roundup_size);
5015 tc_size[i] = roundup_size;
5016 tc_offset[i] = tc_info->tqp_offset[i];
5019 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
5022 int hclge_rss_init_hw(struct hclge_dev *hdev)
5024 struct hclge_vport *vport = hdev->vport;
5025 u16 *rss_indir = vport[0].rss_indirection_tbl;
5026 u8 *key = vport[0].rss_hash_key;
5027 u8 hfunc = vport[0].rss_algo;
5030 ret = hclge_set_rss_indir_table(hdev, rss_indir);
5034 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
5038 ret = hclge_set_rss_input_tuple(hdev);
5042 return hclge_init_rss_tc_mode(hdev);
5045 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
5047 struct hclge_vport *vport = &hdev->vport[0];
5050 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
5051 vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size;
5054 static int hclge_rss_init_cfg(struct hclge_dev *hdev)
5056 u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
5057 int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
5058 struct hclge_vport *vport = &hdev->vport[0];
5061 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
5062 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
5064 vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5065 vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5066 vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
5067 vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5068 vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5069 vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5070 vport->rss_tuple_sets.ipv6_sctp_en =
5071 hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
5072 HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
5073 HCLGE_RSS_INPUT_TUPLE_SCTP;
5074 vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
5076 vport->rss_algo = rss_algo;
5078 rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
5079 sizeof(*rss_ind_tbl), GFP_KERNEL);
5083 vport->rss_indirection_tbl = rss_ind_tbl;
5084 memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE);
5086 hclge_rss_indir_init_cfg(hdev);
5091 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
5092 int vector_id, bool en,
5093 struct hnae3_ring_chain_node *ring_chain)
5095 struct hclge_dev *hdev = vport->back;
5096 struct hnae3_ring_chain_node *node;
5097 struct hclge_desc desc;
5098 struct hclge_ctrl_vector_chain_cmd *req =
5099 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
5100 enum hclge_cmd_status status;
5101 enum hclge_opcode_type op;
5102 u16 tqp_type_and_id;
5105 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
5106 hclge_cmd_setup_basic_desc(&desc, op, false);
5107 req->int_vector_id_l = hnae3_get_field(vector_id,
5108 HCLGE_VECTOR_ID_L_M,
5109 HCLGE_VECTOR_ID_L_S);
5110 req->int_vector_id_h = hnae3_get_field(vector_id,
5111 HCLGE_VECTOR_ID_H_M,
5112 HCLGE_VECTOR_ID_H_S);
5115 for (node = ring_chain; node; node = node->next) {
5116 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
5117 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
5119 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
5120 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
5121 HCLGE_TQP_ID_S, node->tqp_index);
5122 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
5124 hnae3_get_field(node->int_gl_idx,
5125 HNAE3_RING_GL_IDX_M,
5126 HNAE3_RING_GL_IDX_S));
5127 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
5128 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
5129 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
5130 req->vfid = vport->vport_id;
5132 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5134 dev_err(&hdev->pdev->dev,
5135 "Map TQP fail, status is %d.\n",
5141 hclge_cmd_setup_basic_desc(&desc,
5144 req->int_vector_id_l =
5145 hnae3_get_field(vector_id,
5146 HCLGE_VECTOR_ID_L_M,
5147 HCLGE_VECTOR_ID_L_S);
5148 req->int_vector_id_h =
5149 hnae3_get_field(vector_id,
5150 HCLGE_VECTOR_ID_H_M,
5151 HCLGE_VECTOR_ID_H_S);
5156 req->int_cause_num = i;
5157 req->vfid = vport->vport_id;
5158 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5160 dev_err(&hdev->pdev->dev,
5161 "Map TQP fail, status is %d.\n", status);
5169 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5170 struct hnae3_ring_chain_node *ring_chain)
5172 struct hclge_vport *vport = hclge_get_vport(handle);
5173 struct hclge_dev *hdev = vport->back;
5176 vector_id = hclge_get_vector_index(hdev, vector);
5177 if (vector_id < 0) {
5178 dev_err(&hdev->pdev->dev,
5179 "failed to get vector index. vector=%d\n", vector);
5183 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5186 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5187 struct hnae3_ring_chain_node *ring_chain)
5189 struct hclge_vport *vport = hclge_get_vport(handle);
5190 struct hclge_dev *hdev = vport->back;
5193 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5196 vector_id = hclge_get_vector_index(hdev, vector);
5197 if (vector_id < 0) {
5198 dev_err(&handle->pdev->dev,
5199 "Get vector index fail. ret =%d\n", vector_id);
5203 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5205 dev_err(&handle->pdev->dev,
5206 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5212 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5213 bool en_uc, bool en_mc, bool en_bc)
5215 struct hclge_vport *vport = &hdev->vport[vf_id];
5216 struct hnae3_handle *handle = &vport->nic;
5217 struct hclge_promisc_cfg_cmd *req;
5218 struct hclge_desc desc;
5219 bool uc_tx_en = en_uc;
5223 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5225 req = (struct hclge_promisc_cfg_cmd *)desc.data;
5228 if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5231 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5232 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5233 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5234 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5235 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5236 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5237 req->extend_promisc = promisc_cfg;
5239 /* to be compatible with DEVICE_VERSION_V1/2 */
5241 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5242 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5243 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5244 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5245 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5246 req->promisc = promisc_cfg;
5248 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5250 dev_err(&hdev->pdev->dev,
5251 "failed to set vport %u promisc mode, ret = %d.\n",
5257 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5258 bool en_mc_pmc, bool en_bc_pmc)
5260 return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5261 en_uc_pmc, en_mc_pmc, en_bc_pmc);
5264 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5267 struct hclge_vport *vport = hclge_get_vport(handle);
5268 struct hclge_dev *hdev = vport->back;
5269 bool en_bc_pmc = true;
5271 /* For device whose version below V2, if broadcast promisc enabled,
5272 * vlan filter is always bypassed. So broadcast promisc should be
5273 * disabled until user enable promisc mode
5275 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5276 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5278 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5282 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5284 struct hclge_vport *vport = hclge_get_vport(handle);
5286 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
5289 static void hclge_sync_fd_state(struct hclge_dev *hdev)
5291 if (hlist_empty(&hdev->fd_rule_list))
5292 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5295 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
5297 if (!test_bit(location, hdev->fd_bmap)) {
5298 set_bit(location, hdev->fd_bmap);
5299 hdev->hclge_fd_rule_num++;
5303 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
5305 if (test_bit(location, hdev->fd_bmap)) {
5306 clear_bit(location, hdev->fd_bmap);
5307 hdev->hclge_fd_rule_num--;
5311 static void hclge_fd_free_node(struct hclge_dev *hdev,
5312 struct hclge_fd_rule *rule)
5314 hlist_del(&rule->rule_node);
5316 hclge_sync_fd_state(hdev);
5319 static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5320 struct hclge_fd_rule *old_rule,
5321 struct hclge_fd_rule *new_rule,
5322 enum HCLGE_FD_NODE_STATE state)
5325 case HCLGE_FD_TO_ADD:
5326 case HCLGE_FD_ACTIVE:
5327 /* 1) if the new state is TO_ADD, just replace the old rule
5328 * with the same location, no matter its state, because the
5329 * new rule will be configured to the hardware.
5330 * 2) if the new state is ACTIVE, it means the new rule
5331 * has been configured to the hardware, so just replace
5332 * the old rule node with the same location.
5333 * 3) for it doesn't add a new node to the list, so it's
5334 * unnecessary to update the rule number and fd_bmap.
5336 new_rule->rule_node.next = old_rule->rule_node.next;
5337 new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5338 memcpy(old_rule, new_rule, sizeof(*old_rule));
5341 case HCLGE_FD_DELETED:
5342 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5343 hclge_fd_free_node(hdev, old_rule);
5345 case HCLGE_FD_TO_DEL:
5346 /* if new request is TO_DEL, and old rule is existent
5347 * 1) the state of old rule is TO_DEL, we need do nothing,
5348 * because we delete rule by location, other rule content
5350 * 2) the state of old rule is ACTIVE, we need to change its
5351 * state to TO_DEL, so the rule will be deleted when periodic
5352 * task being scheduled.
5353 * 3) the state of old rule is TO_ADD, it means the rule hasn't
5354 * been added to hardware, so we just delete the rule node from
5355 * fd_rule_list directly.
5357 if (old_rule->state == HCLGE_FD_TO_ADD) {
5358 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5359 hclge_fd_free_node(hdev, old_rule);
5362 old_rule->state = HCLGE_FD_TO_DEL;
5367 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5369 struct hclge_fd_rule **parent)
5371 struct hclge_fd_rule *rule;
5372 struct hlist_node *node;
5374 hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5375 if (rule->location == location)
5377 else if (rule->location > location)
5379 /* record the parent node, use to keep the nodes in fd_rule_list
5388 /* insert fd rule node in ascend order according to rule->location */
5389 static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5390 struct hclge_fd_rule *rule,
5391 struct hclge_fd_rule *parent)
5393 INIT_HLIST_NODE(&rule->rule_node);
5396 hlist_add_behind(&rule->rule_node, &parent->rule_node);
5398 hlist_add_head(&rule->rule_node, hlist);
5401 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5402 struct hclge_fd_user_def_cfg *cfg)
5404 struct hclge_fd_user_def_cfg_cmd *req;
5405 struct hclge_desc desc;
5409 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5411 req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5413 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5414 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5415 HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5416 req->ol2_cfg = cpu_to_le16(data);
5419 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5420 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5421 HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5422 req->ol3_cfg = cpu_to_le16(data);
5425 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5426 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5427 HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5428 req->ol4_cfg = cpu_to_le16(data);
5430 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5432 dev_err(&hdev->pdev->dev,
5433 "failed to set fd user def data, ret= %d\n", ret);
5437 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5441 if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5445 spin_lock_bh(&hdev->fd_rule_lock);
5447 ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5449 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5452 spin_unlock_bh(&hdev->fd_rule_lock);
5455 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5456 struct hclge_fd_rule *rule)
5458 struct hlist_head *hlist = &hdev->fd_rule_list;
5459 struct hclge_fd_rule *fd_rule, *parent = NULL;
5460 struct hclge_fd_user_def_info *info, *old_info;
5461 struct hclge_fd_user_def_cfg *cfg;
5463 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5464 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5467 /* for valid layer is start from 1, so need minus 1 to get the cfg */
5468 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5469 info = &rule->ep.user_def;
5471 if (!cfg->ref_cnt || cfg->offset == info->offset)
5474 if (cfg->ref_cnt > 1)
5477 fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5479 old_info = &fd_rule->ep.user_def;
5480 if (info->layer == old_info->layer)
5485 dev_err(&hdev->pdev->dev,
5486 "No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5491 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5492 struct hclge_fd_rule *rule)
5494 struct hclge_fd_user_def_cfg *cfg;
5496 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5497 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5500 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5501 if (!cfg->ref_cnt) {
5502 cfg->offset = rule->ep.user_def.offset;
5503 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5508 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5509 struct hclge_fd_rule *rule)
5511 struct hclge_fd_user_def_cfg *cfg;
5513 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5514 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5517 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5522 if (!cfg->ref_cnt) {
5524 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5528 static void hclge_update_fd_list(struct hclge_dev *hdev,
5529 enum HCLGE_FD_NODE_STATE state, u16 location,
5530 struct hclge_fd_rule *new_rule)
5532 struct hlist_head *hlist = &hdev->fd_rule_list;
5533 struct hclge_fd_rule *fd_rule, *parent = NULL;
5535 fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5537 hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5538 if (state == HCLGE_FD_ACTIVE)
5539 hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5540 hclge_sync_fd_user_def_cfg(hdev, true);
5542 hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5546 /* it's unlikely to fail here, because we have checked the rule
5549 if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5550 dev_warn(&hdev->pdev->dev,
5551 "failed to delete fd rule %u, it's inexistent\n",
5556 hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5557 hclge_sync_fd_user_def_cfg(hdev, true);
5559 hclge_fd_insert_rule_node(hlist, new_rule, parent);
5560 hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5562 if (state == HCLGE_FD_TO_ADD) {
5563 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5564 hclge_task_schedule(hdev, 0);
5568 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5570 struct hclge_get_fd_mode_cmd *req;
5571 struct hclge_desc desc;
5574 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5576 req = (struct hclge_get_fd_mode_cmd *)desc.data;
5578 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5580 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5584 *fd_mode = req->mode;
5589 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5590 u32 *stage1_entry_num,
5591 u32 *stage2_entry_num,
5592 u16 *stage1_counter_num,
5593 u16 *stage2_counter_num)
5595 struct hclge_get_fd_allocation_cmd *req;
5596 struct hclge_desc desc;
5599 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5601 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5603 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5605 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5610 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5611 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5612 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5613 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5618 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5619 enum HCLGE_FD_STAGE stage_num)
5621 struct hclge_set_fd_key_config_cmd *req;
5622 struct hclge_fd_key_cfg *stage;
5623 struct hclge_desc desc;
5626 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5628 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5629 stage = &hdev->fd_cfg.key_cfg[stage_num];
5630 req->stage = stage_num;
5631 req->key_select = stage->key_sel;
5632 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5633 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5634 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5635 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5636 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5637 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5639 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5641 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5646 static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5648 struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5650 spin_lock_bh(&hdev->fd_rule_lock);
5651 memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5652 spin_unlock_bh(&hdev->fd_rule_lock);
5654 hclge_fd_set_user_def_cmd(hdev, cfg);
5657 static int hclge_init_fd_config(struct hclge_dev *hdev)
5659 #define LOW_2_WORDS 0x03
5660 struct hclge_fd_key_cfg *key_cfg;
5663 if (!hnae3_dev_fd_supported(hdev))
5666 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5670 switch (hdev->fd_cfg.fd_mode) {
5671 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5672 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5674 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5675 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5678 dev_err(&hdev->pdev->dev,
5679 "Unsupported flow director mode %u\n",
5680 hdev->fd_cfg.fd_mode);
5684 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5685 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5686 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5687 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5688 key_cfg->outer_sipv6_word_en = 0;
5689 key_cfg->outer_dipv6_word_en = 0;
5691 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5692 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5693 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5694 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5696 /* If use max 400bit key, we can support tuples for ether type */
5697 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5698 key_cfg->tuple_active |=
5699 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5700 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5701 key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5704 /* roce_type is used to filter roce frames
5705 * dst_vport is used to specify the rule
5707 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5709 ret = hclge_get_fd_allocation(hdev,
5710 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5711 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5712 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5713 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5717 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5720 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5721 int loc, u8 *key, bool is_add)
5723 struct hclge_fd_tcam_config_1_cmd *req1;
5724 struct hclge_fd_tcam_config_2_cmd *req2;
5725 struct hclge_fd_tcam_config_3_cmd *req3;
5726 struct hclge_desc desc[3];
5729 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5730 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5731 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5732 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
5733 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5735 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5736 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5737 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5739 req1->stage = stage;
5740 req1->xy_sel = sel_x ? 1 : 0;
5741 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5742 req1->index = cpu_to_le32(loc);
5743 req1->entry_vld = sel_x ? is_add : 0;
5746 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5747 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5748 sizeof(req2->tcam_data));
5749 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5750 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5753 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5755 dev_err(&hdev->pdev->dev,
5756 "config tcam key fail, ret=%d\n",
5762 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5763 struct hclge_fd_ad_data *action)
5765 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5766 struct hclge_fd_ad_config_cmd *req;
5767 struct hclge_desc desc;
5771 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5773 req = (struct hclge_fd_ad_config_cmd *)desc.data;
5774 req->index = cpu_to_le32(loc);
5777 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5778 action->write_rule_id_to_bd);
5779 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5781 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5782 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5783 action->override_tc);
5784 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5785 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5788 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5789 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5790 action->forward_to_direct_queue);
5791 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5793 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5794 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5795 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5796 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5797 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5798 action->counter_id);
5800 req->ad_data = cpu_to_le64(ad_data);
5801 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5803 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5808 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5809 struct hclge_fd_rule *rule)
5811 int offset, moffset, ip_offset;
5812 enum HCLGE_FD_KEY_OPT key_opt;
5813 u16 tmp_x_s, tmp_y_s;
5814 u32 tmp_x_l, tmp_y_l;
5818 if (rule->unused_tuple & BIT(tuple_bit))
5821 key_opt = tuple_key_info[tuple_bit].key_opt;
5822 offset = tuple_key_info[tuple_bit].offset;
5823 moffset = tuple_key_info[tuple_bit].moffset;
5827 calc_x(*key_x, p[offset], p[moffset]);
5828 calc_y(*key_y, p[offset], p[moffset]);
5832 calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5833 calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5834 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5835 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5839 calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5840 calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5841 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5842 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5846 for (i = 0; i < ETH_ALEN; i++) {
5847 calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
5849 calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
5855 ip_offset = IPV4_INDEX * sizeof(u32);
5856 calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
5857 *(u32 *)(&p[moffset + ip_offset]));
5858 calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
5859 *(u32 *)(&p[moffset + ip_offset]));
5860 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5861 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5869 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5870 u8 vf_id, u8 network_port_id)
5872 u32 port_number = 0;
5874 if (port_type == HOST_PORT) {
5875 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5877 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5879 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5881 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5882 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5883 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5889 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5890 __le32 *key_x, __le32 *key_y,
5891 struct hclge_fd_rule *rule)
5893 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5894 u8 cur_pos = 0, tuple_size, shift_bits;
5897 for (i = 0; i < MAX_META_DATA; i++) {
5898 tuple_size = meta_data_key_info[i].key_length;
5899 tuple_bit = key_cfg->meta_data_active & BIT(i);
5901 switch (tuple_bit) {
5902 case BIT(ROCE_TYPE):
5903 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5904 cur_pos += tuple_size;
5906 case BIT(DST_VPORT):
5907 port_number = hclge_get_port_number(HOST_PORT, 0,
5909 hnae3_set_field(meta_data,
5910 GENMASK(cur_pos + tuple_size, cur_pos),
5911 cur_pos, port_number);
5912 cur_pos += tuple_size;
5919 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5920 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5921 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5923 *key_x = cpu_to_le32(tmp_x << shift_bits);
5924 *key_y = cpu_to_le32(tmp_y << shift_bits);
5927 /* A complete key is combined with meta data key and tuple key.
5928 * Meta data key is stored at the MSB region, and tuple key is stored at
5929 * the LSB region, unused bits will be filled 0.
5931 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5932 struct hclge_fd_rule *rule)
5934 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5935 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5936 u8 *cur_key_x, *cur_key_y;
5937 u8 meta_data_region;
5942 memset(key_x, 0, sizeof(key_x));
5943 memset(key_y, 0, sizeof(key_y));
5947 for (i = 0; i < MAX_TUPLE; i++) {
5950 tuple_size = tuple_key_info[i].key_length / 8;
5951 if (!(key_cfg->tuple_active & BIT(i)))
5954 tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
5957 cur_key_x += tuple_size;
5958 cur_key_y += tuple_size;
5962 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5963 MAX_META_DATA_LENGTH / 8;
5965 hclge_fd_convert_meta_data(key_cfg,
5966 (__le32 *)(key_x + meta_data_region),
5967 (__le32 *)(key_y + meta_data_region),
5970 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5973 dev_err(&hdev->pdev->dev,
5974 "fd key_y config fail, loc=%u, ret=%d\n",
5975 rule->queue_id, ret);
5979 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5982 dev_err(&hdev->pdev->dev,
5983 "fd key_x config fail, loc=%u, ret=%d\n",
5984 rule->queue_id, ret);
5988 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5989 struct hclge_fd_rule *rule)
5991 struct hclge_vport *vport = hdev->vport;
5992 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5993 struct hclge_fd_ad_data ad_data;
5995 memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5996 ad_data.ad_id = rule->location;
5998 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5999 ad_data.drop_packet = true;
6000 } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
6001 ad_data.override_tc = true;
6003 kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
6005 ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
6007 ad_data.forward_to_direct_queue = true;
6008 ad_data.queue_id = rule->queue_id;
6011 if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) {
6012 ad_data.use_counter = true;
6013 ad_data.counter_id = rule->vf_id %
6014 hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1];
6016 ad_data.use_counter = false;
6017 ad_data.counter_id = 0;
6020 ad_data.use_next_stage = false;
6021 ad_data.next_input_key = 0;
6023 ad_data.write_rule_id_to_bd = true;
6024 ad_data.rule_id = rule->location;
6026 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
6029 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
6032 if (!spec || !unused_tuple)
6035 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6038 *unused_tuple |= BIT(INNER_SRC_IP);
6041 *unused_tuple |= BIT(INNER_DST_IP);
6044 *unused_tuple |= BIT(INNER_SRC_PORT);
6047 *unused_tuple |= BIT(INNER_DST_PORT);
6050 *unused_tuple |= BIT(INNER_IP_TOS);
6055 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
6058 if (!spec || !unused_tuple)
6061 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6062 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6065 *unused_tuple |= BIT(INNER_SRC_IP);
6068 *unused_tuple |= BIT(INNER_DST_IP);
6071 *unused_tuple |= BIT(INNER_IP_TOS);
6074 *unused_tuple |= BIT(INNER_IP_PROTO);
6076 if (spec->l4_4_bytes)
6079 if (spec->ip_ver != ETH_RX_NFC_IP4)
6085 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
6088 if (!spec || !unused_tuple)
6091 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
6093 /* check whether src/dst ip address used */
6094 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6095 *unused_tuple |= BIT(INNER_SRC_IP);
6097 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6098 *unused_tuple |= BIT(INNER_DST_IP);
6101 *unused_tuple |= BIT(INNER_SRC_PORT);
6104 *unused_tuple |= BIT(INNER_DST_PORT);
6107 *unused_tuple |= BIT(INNER_IP_TOS);
6112 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
6115 if (!spec || !unused_tuple)
6118 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6119 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6121 /* check whether src/dst ip address used */
6122 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6123 *unused_tuple |= BIT(INNER_SRC_IP);
6125 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6126 *unused_tuple |= BIT(INNER_DST_IP);
6128 if (!spec->l4_proto)
6129 *unused_tuple |= BIT(INNER_IP_PROTO);
6132 *unused_tuple |= BIT(INNER_IP_TOS);
6134 if (spec->l4_4_bytes)
6140 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
6142 if (!spec || !unused_tuple)
6145 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
6146 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
6147 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
6149 if (is_zero_ether_addr(spec->h_source))
6150 *unused_tuple |= BIT(INNER_SRC_MAC);
6152 if (is_zero_ether_addr(spec->h_dest))
6153 *unused_tuple |= BIT(INNER_DST_MAC);
6156 *unused_tuple |= BIT(INNER_ETH_TYPE);
6161 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
6162 struct ethtool_rx_flow_spec *fs,
6165 if (fs->flow_type & FLOW_EXT) {
6166 if (fs->h_ext.vlan_etype) {
6167 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
6171 if (!fs->h_ext.vlan_tci)
6172 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6174 if (fs->m_ext.vlan_tci &&
6175 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
6176 dev_err(&hdev->pdev->dev,
6177 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
6178 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
6182 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6185 if (fs->flow_type & FLOW_MAC_EXT) {
6186 if (hdev->fd_cfg.fd_mode !=
6187 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6188 dev_err(&hdev->pdev->dev,
6189 "FLOW_MAC_EXT is not supported in current fd mode!\n");
6193 if (is_zero_ether_addr(fs->h_ext.h_dest))
6194 *unused_tuple |= BIT(INNER_DST_MAC);
6196 *unused_tuple &= ~BIT(INNER_DST_MAC);
6202 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
6203 struct hclge_fd_user_def_info *info)
6205 switch (flow_type) {
6207 info->layer = HCLGE_FD_USER_DEF_L2;
6208 *unused_tuple &= ~BIT(INNER_L2_RSV);
6211 case IPV6_USER_FLOW:
6212 info->layer = HCLGE_FD_USER_DEF_L3;
6213 *unused_tuple &= ~BIT(INNER_L3_RSV);
6219 info->layer = HCLGE_FD_USER_DEF_L4;
6220 *unused_tuple &= ~BIT(INNER_L4_RSV);
6229 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
6231 return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
6234 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
6235 struct ethtool_rx_flow_spec *fs,
6237 struct hclge_fd_user_def_info *info)
6239 u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
6240 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6241 u16 data, offset, data_mask, offset_mask;
6244 info->layer = HCLGE_FD_USER_DEF_NONE;
6245 *unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
6247 if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
6250 /* user-def data from ethtool is 64 bit value, the bit0~15 is used
6251 * for data, and bit32~47 is used for offset.
6253 data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6254 data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6255 offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6256 offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6258 if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
6259 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
6263 if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
6264 dev_err(&hdev->pdev->dev,
6265 "user-def offset[%u] should be no more than %u\n",
6266 offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
6270 if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
6271 dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
6275 ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
6277 dev_err(&hdev->pdev->dev,
6278 "unsupported flow type for user-def bytes, ret = %d\n",
6284 info->data_mask = data_mask;
6285 info->offset = offset;
6290 static int hclge_fd_check_spec(struct hclge_dev *hdev,
6291 struct ethtool_rx_flow_spec *fs,
6293 struct hclge_fd_user_def_info *info)
6298 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6299 dev_err(&hdev->pdev->dev,
6300 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
6302 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
6306 ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
6310 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6311 switch (flow_type) {
6315 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
6319 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
6325 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6328 case IPV6_USER_FLOW:
6329 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6333 if (hdev->fd_cfg.fd_mode !=
6334 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6335 dev_err(&hdev->pdev->dev,
6336 "ETHER_FLOW is not supported in current fd mode!\n");
6340 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6344 dev_err(&hdev->pdev->dev,
6345 "unsupported protocol type, protocol type = %#x\n",
6351 dev_err(&hdev->pdev->dev,
6352 "failed to check flow union tuple, ret = %d\n",
6357 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6360 static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
6361 struct ethtool_rx_flow_spec *fs,
6362 struct hclge_fd_rule *rule, u8 ip_proto)
6364 rule->tuples.src_ip[IPV4_INDEX] =
6365 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6366 rule->tuples_mask.src_ip[IPV4_INDEX] =
6367 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6369 rule->tuples.dst_ip[IPV4_INDEX] =
6370 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6371 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6372 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6374 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6375 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6377 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6378 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6380 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6381 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6383 rule->tuples.ether_proto = ETH_P_IP;
6384 rule->tuples_mask.ether_proto = 0xFFFF;
6386 rule->tuples.ip_proto = ip_proto;
6387 rule->tuples_mask.ip_proto = 0xFF;
6390 static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
6391 struct ethtool_rx_flow_spec *fs,
6392 struct hclge_fd_rule *rule)
6394 rule->tuples.src_ip[IPV4_INDEX] =
6395 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6396 rule->tuples_mask.src_ip[IPV4_INDEX] =
6397 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6399 rule->tuples.dst_ip[IPV4_INDEX] =
6400 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6401 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6402 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6404 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6405 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6407 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6408 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6410 rule->tuples.ether_proto = ETH_P_IP;
6411 rule->tuples_mask.ether_proto = 0xFFFF;
6414 static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
6415 struct ethtool_rx_flow_spec *fs,
6416 struct hclge_fd_rule *rule, u8 ip_proto)
6418 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
6420 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6423 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6425 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6428 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6429 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6431 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6432 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6434 rule->tuples.ether_proto = ETH_P_IPV6;
6435 rule->tuples_mask.ether_proto = 0xFFFF;
6437 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6438 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6440 rule->tuples.ip_proto = ip_proto;
6441 rule->tuples_mask.ip_proto = 0xFF;
6444 static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
6445 struct ethtool_rx_flow_spec *fs,
6446 struct hclge_fd_rule *rule)
6448 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6450 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6453 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6455 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6458 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6459 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6461 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6462 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6464 rule->tuples.ether_proto = ETH_P_IPV6;
6465 rule->tuples_mask.ether_proto = 0xFFFF;
6468 static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
6469 struct ethtool_rx_flow_spec *fs,
6470 struct hclge_fd_rule *rule)
6472 ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6473 ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6475 ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6476 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6478 rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6479 rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6482 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6483 struct hclge_fd_rule *rule)
6485 switch (info->layer) {
6486 case HCLGE_FD_USER_DEF_L2:
6487 rule->tuples.l2_user_def = info->data;
6488 rule->tuples_mask.l2_user_def = info->data_mask;
6490 case HCLGE_FD_USER_DEF_L3:
6491 rule->tuples.l3_user_def = info->data;
6492 rule->tuples_mask.l3_user_def = info->data_mask;
6494 case HCLGE_FD_USER_DEF_L4:
6495 rule->tuples.l4_user_def = (u32)info->data << 16;
6496 rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6502 rule->ep.user_def = *info;
6505 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
6506 struct ethtool_rx_flow_spec *fs,
6507 struct hclge_fd_rule *rule,
6508 struct hclge_fd_user_def_info *info)
6510 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6512 switch (flow_type) {
6514 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
6517 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
6520 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
6523 hclge_fd_get_ip4_tuple(hdev, fs, rule);
6526 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
6529 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
6532 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
6534 case IPV6_USER_FLOW:
6535 hclge_fd_get_ip6_tuple(hdev, fs, rule);
6538 hclge_fd_get_ether_tuple(hdev, fs, rule);
6544 if (fs->flow_type & FLOW_EXT) {
6545 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6546 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6547 hclge_fd_get_user_def_tuple(info, rule);
6550 if (fs->flow_type & FLOW_MAC_EXT) {
6551 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6552 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6558 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6559 struct hclge_fd_rule *rule)
6563 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6567 return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6570 static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6571 struct hclge_fd_rule *rule)
6575 spin_lock_bh(&hdev->fd_rule_lock);
6577 if (hdev->fd_active_type != rule->rule_type &&
6578 (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6579 hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6580 dev_err(&hdev->pdev->dev,
6581 "mode conflict(new type %d, active type %d), please delete existent rules first\n",
6582 rule->rule_type, hdev->fd_active_type);
6583 spin_unlock_bh(&hdev->fd_rule_lock);
6587 ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6591 ret = hclge_clear_arfs_rules(hdev);
6595 ret = hclge_fd_config_rule(hdev, rule);
6599 rule->state = HCLGE_FD_ACTIVE;
6600 hdev->fd_active_type = rule->rule_type;
6601 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6604 spin_unlock_bh(&hdev->fd_rule_lock);
6608 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6610 struct hclge_vport *vport = hclge_get_vport(handle);
6611 struct hclge_dev *hdev = vport->back;
6613 return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6616 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6617 u16 *vport_id, u8 *action, u16 *queue_id)
6619 struct hclge_vport *vport = hdev->vport;
6621 if (ring_cookie == RX_CLS_FLOW_DISC) {
6622 *action = HCLGE_FD_ACTION_DROP_PACKET;
6624 u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6625 u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6628 if (vf > hdev->num_req_vfs) {
6629 dev_err(&hdev->pdev->dev,
6630 "Error: vf id (%u) > max vf num (%u)\n",
6631 vf, hdev->num_req_vfs);
6635 *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6636 tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6639 dev_err(&hdev->pdev->dev,
6640 "Error: queue id (%u) > max tqp num (%u)\n",
6645 *action = HCLGE_FD_ACTION_SELECT_QUEUE;
6652 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6653 struct ethtool_rxnfc *cmd)
6655 struct hclge_vport *vport = hclge_get_vport(handle);
6656 struct hclge_dev *hdev = vport->back;
6657 struct hclge_fd_user_def_info info;
6658 u16 dst_vport_id = 0, q_index = 0;
6659 struct ethtool_rx_flow_spec *fs;
6660 struct hclge_fd_rule *rule;
6665 if (!hnae3_dev_fd_supported(hdev)) {
6666 dev_err(&hdev->pdev->dev,
6667 "flow table director is not supported\n");
6672 dev_err(&hdev->pdev->dev,
6673 "please enable flow director first\n");
6677 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6679 ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6683 ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6688 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6692 ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
6698 rule->flow_type = fs->flow_type;
6699 rule->location = fs->location;
6700 rule->unused_tuple = unused;
6701 rule->vf_id = dst_vport_id;
6702 rule->queue_id = q_index;
6703 rule->action = action;
6704 rule->rule_type = HCLGE_FD_EP_ACTIVE;
6706 ret = hclge_add_fd_entry_common(hdev, rule);
6713 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6714 struct ethtool_rxnfc *cmd)
6716 struct hclge_vport *vport = hclge_get_vport(handle);
6717 struct hclge_dev *hdev = vport->back;
6718 struct ethtool_rx_flow_spec *fs;
6721 if (!hnae3_dev_fd_supported(hdev))
6724 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6726 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6729 spin_lock_bh(&hdev->fd_rule_lock);
6730 if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6731 !test_bit(fs->location, hdev->fd_bmap)) {
6732 dev_err(&hdev->pdev->dev,
6733 "Delete fail, rule %u is inexistent\n", fs->location);
6734 spin_unlock_bh(&hdev->fd_rule_lock);
6738 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6743 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6746 spin_unlock_bh(&hdev->fd_rule_lock);
6750 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6753 struct hclge_fd_rule *rule;
6754 struct hlist_node *node;
6757 if (!hnae3_dev_fd_supported(hdev))
6760 spin_lock_bh(&hdev->fd_rule_lock);
6762 for_each_set_bit(location, hdev->fd_bmap,
6763 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6764 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6768 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6770 hlist_del(&rule->rule_node);
6773 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6774 hdev->hclge_fd_rule_num = 0;
6775 bitmap_zero(hdev->fd_bmap,
6776 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6779 spin_unlock_bh(&hdev->fd_rule_lock);
6782 static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6784 hclge_clear_fd_rules_in_list(hdev, true);
6785 hclge_fd_disable_user_def(hdev);
6788 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6790 struct hclge_vport *vport = hclge_get_vport(handle);
6791 struct hclge_dev *hdev = vport->back;
6792 struct hclge_fd_rule *rule;
6793 struct hlist_node *node;
6795 /* Return ok here, because reset error handling will check this
6796 * return value. If error is returned here, the reset process will
6799 if (!hnae3_dev_fd_supported(hdev))
6802 /* if fd is disabled, should not restore it when reset */
6806 spin_lock_bh(&hdev->fd_rule_lock);
6807 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6808 if (rule->state == HCLGE_FD_ACTIVE)
6809 rule->state = HCLGE_FD_TO_ADD;
6811 spin_unlock_bh(&hdev->fd_rule_lock);
6812 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6817 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6818 struct ethtool_rxnfc *cmd)
6820 struct hclge_vport *vport = hclge_get_vport(handle);
6821 struct hclge_dev *hdev = vport->back;
6823 if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6826 cmd->rule_cnt = hdev->hclge_fd_rule_num;
6827 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6832 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6833 struct ethtool_tcpip4_spec *spec,
6834 struct ethtool_tcpip4_spec *spec_mask)
6836 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6837 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6838 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6840 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6841 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6842 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6844 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6845 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6846 0 : cpu_to_be16(rule->tuples_mask.src_port);
6848 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6849 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6850 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6852 spec->tos = rule->tuples.ip_tos;
6853 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6854 0 : rule->tuples_mask.ip_tos;
6857 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6858 struct ethtool_usrip4_spec *spec,
6859 struct ethtool_usrip4_spec *spec_mask)
6861 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6862 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6863 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6865 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6866 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6867 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6869 spec->tos = rule->tuples.ip_tos;
6870 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6871 0 : rule->tuples_mask.ip_tos;
6873 spec->proto = rule->tuples.ip_proto;
6874 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6875 0 : rule->tuples_mask.ip_proto;
6877 spec->ip_ver = ETH_RX_NFC_IP4;
6880 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6881 struct ethtool_tcpip6_spec *spec,
6882 struct ethtool_tcpip6_spec *spec_mask)
6884 cpu_to_be32_array(spec->ip6src,
6885 rule->tuples.src_ip, IPV6_SIZE);
6886 cpu_to_be32_array(spec->ip6dst,
6887 rule->tuples.dst_ip, IPV6_SIZE);
6888 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6889 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6891 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6894 if (rule->unused_tuple & BIT(INNER_DST_IP))
6895 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6897 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6900 spec->tclass = rule->tuples.ip_tos;
6901 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6902 0 : rule->tuples_mask.ip_tos;
6904 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6905 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6906 0 : cpu_to_be16(rule->tuples_mask.src_port);
6908 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6909 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6910 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6913 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6914 struct ethtool_usrip6_spec *spec,
6915 struct ethtool_usrip6_spec *spec_mask)
6917 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6918 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6919 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6920 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6922 cpu_to_be32_array(spec_mask->ip6src,
6923 rule->tuples_mask.src_ip, IPV6_SIZE);
6925 if (rule->unused_tuple & BIT(INNER_DST_IP))
6926 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6928 cpu_to_be32_array(spec_mask->ip6dst,
6929 rule->tuples_mask.dst_ip, IPV6_SIZE);
6931 spec->tclass = rule->tuples.ip_tos;
6932 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6933 0 : rule->tuples_mask.ip_tos;
6935 spec->l4_proto = rule->tuples.ip_proto;
6936 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6937 0 : rule->tuples_mask.ip_proto;
6940 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6941 struct ethhdr *spec,
6942 struct ethhdr *spec_mask)
6944 ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6945 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6947 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6948 eth_zero_addr(spec_mask->h_source);
6950 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6952 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6953 eth_zero_addr(spec_mask->h_dest);
6955 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6957 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6958 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6959 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6962 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
6963 struct hclge_fd_rule *rule)
6965 if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
6966 HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
6967 fs->h_ext.data[0] = 0;
6968 fs->h_ext.data[1] = 0;
6969 fs->m_ext.data[0] = 0;
6970 fs->m_ext.data[1] = 0;
6972 fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
6973 fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
6975 cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
6976 fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
6980 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6981 struct hclge_fd_rule *rule)
6983 if (fs->flow_type & FLOW_EXT) {
6984 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6985 fs->m_ext.vlan_tci =
6986 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6987 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6989 hclge_fd_get_user_def_info(fs, rule);
6992 if (fs->flow_type & FLOW_MAC_EXT) {
6993 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6994 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6995 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6997 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6998 rule->tuples_mask.dst_mac);
7002 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
7003 struct ethtool_rxnfc *cmd)
7005 struct hclge_vport *vport = hclge_get_vport(handle);
7006 struct hclge_fd_rule *rule = NULL;
7007 struct hclge_dev *hdev = vport->back;
7008 struct ethtool_rx_flow_spec *fs;
7009 struct hlist_node *node2;
7011 if (!hnae3_dev_fd_supported(hdev))
7014 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
7016 spin_lock_bh(&hdev->fd_rule_lock);
7018 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
7019 if (rule->location >= fs->location)
7023 if (!rule || fs->location != rule->location) {
7024 spin_unlock_bh(&hdev->fd_rule_lock);
7029 fs->flow_type = rule->flow_type;
7030 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
7034 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
7035 &fs->m_u.tcp_ip4_spec);
7038 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
7039 &fs->m_u.usr_ip4_spec);
7044 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
7045 &fs->m_u.tcp_ip6_spec);
7047 case IPV6_USER_FLOW:
7048 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
7049 &fs->m_u.usr_ip6_spec);
7051 /* The flow type of fd rule has been checked before adding in to rule
7052 * list. As other flow types have been handled, it must be ETHER_FLOW
7053 * for the default case
7056 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
7057 &fs->m_u.ether_spec);
7061 hclge_fd_get_ext_info(fs, rule);
7063 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
7064 fs->ring_cookie = RX_CLS_FLOW_DISC;
7068 fs->ring_cookie = rule->queue_id;
7069 vf_id = rule->vf_id;
7070 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
7071 fs->ring_cookie |= vf_id;
7074 spin_unlock_bh(&hdev->fd_rule_lock);
7079 static int hclge_get_all_rules(struct hnae3_handle *handle,
7080 struct ethtool_rxnfc *cmd, u32 *rule_locs)
7082 struct hclge_vport *vport = hclge_get_vport(handle);
7083 struct hclge_dev *hdev = vport->back;
7084 struct hclge_fd_rule *rule;
7085 struct hlist_node *node2;
7088 if (!hnae3_dev_fd_supported(hdev))
7091 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
7093 spin_lock_bh(&hdev->fd_rule_lock);
7094 hlist_for_each_entry_safe(rule, node2,
7095 &hdev->fd_rule_list, rule_node) {
7096 if (cnt == cmd->rule_cnt) {
7097 spin_unlock_bh(&hdev->fd_rule_lock);
7101 if (rule->state == HCLGE_FD_TO_DEL)
7104 rule_locs[cnt] = rule->location;
7108 spin_unlock_bh(&hdev->fd_rule_lock);
7110 cmd->rule_cnt = cnt;
7115 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
7116 struct hclge_fd_rule_tuples *tuples)
7118 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
7119 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
7121 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
7122 tuples->ip_proto = fkeys->basic.ip_proto;
7123 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
7125 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
7126 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
7127 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
7131 for (i = 0; i < IPV6_SIZE; i++) {
7132 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
7133 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
7138 /* traverse all rules, check whether an existed rule has the same tuples */
7139 static struct hclge_fd_rule *
7140 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
7141 const struct hclge_fd_rule_tuples *tuples)
7143 struct hclge_fd_rule *rule = NULL;
7144 struct hlist_node *node;
7146 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7147 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
7154 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
7155 struct hclge_fd_rule *rule)
7157 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
7158 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
7159 BIT(INNER_SRC_PORT);
7162 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
7163 rule->state = HCLGE_FD_TO_ADD;
7164 if (tuples->ether_proto == ETH_P_IP) {
7165 if (tuples->ip_proto == IPPROTO_TCP)
7166 rule->flow_type = TCP_V4_FLOW;
7168 rule->flow_type = UDP_V4_FLOW;
7170 if (tuples->ip_proto == IPPROTO_TCP)
7171 rule->flow_type = TCP_V6_FLOW;
7173 rule->flow_type = UDP_V6_FLOW;
7175 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
7176 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
7179 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
7180 u16 flow_id, struct flow_keys *fkeys)
7182 struct hclge_vport *vport = hclge_get_vport(handle);
7183 struct hclge_fd_rule_tuples new_tuples = {};
7184 struct hclge_dev *hdev = vport->back;
7185 struct hclge_fd_rule *rule;
7188 if (!hnae3_dev_fd_supported(hdev))
7191 /* when there is already fd rule existed add by user,
7192 * arfs should not work
7194 spin_lock_bh(&hdev->fd_rule_lock);
7195 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
7196 hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
7197 spin_unlock_bh(&hdev->fd_rule_lock);
7201 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
7203 /* check is there flow director filter existed for this flow,
7204 * if not, create a new filter for it;
7205 * if filter exist with different queue id, modify the filter;
7206 * if filter exist with same queue id, do nothing
7208 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7210 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
7211 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7212 spin_unlock_bh(&hdev->fd_rule_lock);
7216 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
7218 spin_unlock_bh(&hdev->fd_rule_lock);
7222 rule->location = bit_id;
7223 rule->arfs.flow_id = flow_id;
7224 rule->queue_id = queue_id;
7225 hclge_fd_build_arfs_rule(&new_tuples, rule);
7226 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
7227 hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
7228 } else if (rule->queue_id != queue_id) {
7229 rule->queue_id = queue_id;
7230 rule->state = HCLGE_FD_TO_ADD;
7231 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7232 hclge_task_schedule(hdev, 0);
7234 spin_unlock_bh(&hdev->fd_rule_lock);
7235 return rule->location;
7238 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
7240 #ifdef CONFIG_RFS_ACCEL
7241 struct hnae3_handle *handle = &hdev->vport[0].nic;
7242 struct hclge_fd_rule *rule;
7243 struct hlist_node *node;
7245 spin_lock_bh(&hdev->fd_rule_lock);
7246 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
7247 spin_unlock_bh(&hdev->fd_rule_lock);
7250 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7251 if (rule->state != HCLGE_FD_ACTIVE)
7253 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
7254 rule->arfs.flow_id, rule->location)) {
7255 rule->state = HCLGE_FD_TO_DEL;
7256 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7259 spin_unlock_bh(&hdev->fd_rule_lock);
7263 /* make sure being called after lock up with fd_rule_lock */
7264 static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
7266 #ifdef CONFIG_RFS_ACCEL
7267 struct hclge_fd_rule *rule;
7268 struct hlist_node *node;
7271 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
7274 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7275 switch (rule->state) {
7276 case HCLGE_FD_TO_DEL:
7277 case HCLGE_FD_ACTIVE:
7278 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7279 rule->location, NULL, false);
7283 case HCLGE_FD_TO_ADD:
7284 hclge_fd_dec_rule_cnt(hdev, rule->location);
7285 hlist_del(&rule->rule_node);
7292 hclge_sync_fd_state(hdev);
7298 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
7299 struct hclge_fd_rule *rule)
7301 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
7302 struct flow_match_basic match;
7303 u16 ethtype_key, ethtype_mask;
7305 flow_rule_match_basic(flow, &match);
7306 ethtype_key = ntohs(match.key->n_proto);
7307 ethtype_mask = ntohs(match.mask->n_proto);
7309 if (ethtype_key == ETH_P_ALL) {
7313 rule->tuples.ether_proto = ethtype_key;
7314 rule->tuples_mask.ether_proto = ethtype_mask;
7315 rule->tuples.ip_proto = match.key->ip_proto;
7316 rule->tuples_mask.ip_proto = match.mask->ip_proto;
7318 rule->unused_tuple |= BIT(INNER_IP_PROTO);
7319 rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7323 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7324 struct hclge_fd_rule *rule)
7326 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7327 struct flow_match_eth_addrs match;
7329 flow_rule_match_eth_addrs(flow, &match);
7330 ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7331 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7332 ether_addr_copy(rule->tuples.src_mac, match.key->src);
7333 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7335 rule->unused_tuple |= BIT(INNER_DST_MAC);
7336 rule->unused_tuple |= BIT(INNER_SRC_MAC);
7340 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7341 struct hclge_fd_rule *rule)
7343 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7344 struct flow_match_vlan match;
7346 flow_rule_match_vlan(flow, &match);
7347 rule->tuples.vlan_tag1 = match.key->vlan_id |
7348 (match.key->vlan_priority << VLAN_PRIO_SHIFT);
7349 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7350 (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7352 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7356 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
7357 struct hclge_fd_rule *rule)
7361 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7362 struct flow_match_control match;
7364 flow_rule_match_control(flow, &match);
7365 addr_type = match.key->addr_type;
7368 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7369 struct flow_match_ipv4_addrs match;
7371 flow_rule_match_ipv4_addrs(flow, &match);
7372 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7373 rule->tuples_mask.src_ip[IPV4_INDEX] =
7374 be32_to_cpu(match.mask->src);
7375 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7376 rule->tuples_mask.dst_ip[IPV4_INDEX] =
7377 be32_to_cpu(match.mask->dst);
7378 } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7379 struct flow_match_ipv6_addrs match;
7381 flow_rule_match_ipv6_addrs(flow, &match);
7382 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
7384 be32_to_cpu_array(rule->tuples_mask.src_ip,
7385 match.mask->src.s6_addr32, IPV6_SIZE);
7386 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
7388 be32_to_cpu_array(rule->tuples_mask.dst_ip,
7389 match.mask->dst.s6_addr32, IPV6_SIZE);
7391 rule->unused_tuple |= BIT(INNER_SRC_IP);
7392 rule->unused_tuple |= BIT(INNER_DST_IP);
7396 static void hclge_get_cls_key_port(const struct flow_rule *flow,
7397 struct hclge_fd_rule *rule)
7399 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7400 struct flow_match_ports match;
7402 flow_rule_match_ports(flow, &match);
7404 rule->tuples.src_port = be16_to_cpu(match.key->src);
7405 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7406 rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7407 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7409 rule->unused_tuple |= BIT(INNER_SRC_PORT);
7410 rule->unused_tuple |= BIT(INNER_DST_PORT);
7414 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7415 struct flow_cls_offload *cls_flower,
7416 struct hclge_fd_rule *rule)
7418 struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7419 struct flow_dissector *dissector = flow->match.dissector;
7421 if (dissector->used_keys &
7422 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7423 BIT(FLOW_DISSECTOR_KEY_BASIC) |
7424 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7425 BIT(FLOW_DISSECTOR_KEY_VLAN) |
7426 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7427 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7428 BIT(FLOW_DISSECTOR_KEY_PORTS))) {
7429 dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
7430 dissector->used_keys);
7434 hclge_get_cls_key_basic(flow, rule);
7435 hclge_get_cls_key_mac(flow, rule);
7436 hclge_get_cls_key_vlan(flow, rule);
7437 hclge_get_cls_key_ip(flow, rule);
7438 hclge_get_cls_key_port(flow, rule);
7443 static int hclge_check_cls_flower(struct hclge_dev *hdev,
7444 struct flow_cls_offload *cls_flower, int tc)
7446 u32 prio = cls_flower->common.prio;
7448 if (tc < 0 || tc > hdev->tc_max) {
7449 dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7454 prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7455 dev_err(&hdev->pdev->dev,
7456 "prio %u should be in range[1, %u]\n",
7457 prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7461 if (test_bit(prio - 1, hdev->fd_bmap)) {
7462 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7468 static int hclge_add_cls_flower(struct hnae3_handle *handle,
7469 struct flow_cls_offload *cls_flower,
7472 struct hclge_vport *vport = hclge_get_vport(handle);
7473 struct hclge_dev *hdev = vport->back;
7474 struct hclge_fd_rule *rule;
7477 ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7479 dev_err(&hdev->pdev->dev,
7480 "failed to check cls flower params, ret = %d\n", ret);
7484 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7488 ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7494 rule->action = HCLGE_FD_ACTION_SELECT_TC;
7495 rule->cls_flower.tc = tc;
7496 rule->location = cls_flower->common.prio - 1;
7498 rule->cls_flower.cookie = cls_flower->cookie;
7499 rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7501 ret = hclge_add_fd_entry_common(hdev, rule);
7508 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7509 unsigned long cookie)
7511 struct hclge_fd_rule *rule;
7512 struct hlist_node *node;
7514 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7515 if (rule->cls_flower.cookie == cookie)
7522 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7523 struct flow_cls_offload *cls_flower)
7525 struct hclge_vport *vport = hclge_get_vport(handle);
7526 struct hclge_dev *hdev = vport->back;
7527 struct hclge_fd_rule *rule;
7530 spin_lock_bh(&hdev->fd_rule_lock);
7532 rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7534 spin_unlock_bh(&hdev->fd_rule_lock);
7538 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7541 spin_unlock_bh(&hdev->fd_rule_lock);
7545 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7546 spin_unlock_bh(&hdev->fd_rule_lock);
7551 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7553 struct hclge_fd_rule *rule;
7554 struct hlist_node *node;
7557 if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7560 spin_lock_bh(&hdev->fd_rule_lock);
7562 hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7563 switch (rule->state) {
7564 case HCLGE_FD_TO_ADD:
7565 ret = hclge_fd_config_rule(hdev, rule);
7568 rule->state = HCLGE_FD_ACTIVE;
7570 case HCLGE_FD_TO_DEL:
7571 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7572 rule->location, NULL, false);
7575 hclge_fd_dec_rule_cnt(hdev, rule->location);
7576 hclge_fd_free_node(hdev, rule);
7585 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7587 spin_unlock_bh(&hdev->fd_rule_lock);
7590 static void hclge_sync_fd_table(struct hclge_dev *hdev)
7592 if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7593 bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7595 hclge_clear_fd_rules_in_list(hdev, clear_list);
7598 hclge_sync_fd_user_def_cfg(hdev, false);
7600 hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7603 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7605 struct hclge_vport *vport = hclge_get_vport(handle);
7606 struct hclge_dev *hdev = vport->back;
7608 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7609 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7612 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7614 struct hclge_vport *vport = hclge_get_vport(handle);
7615 struct hclge_dev *hdev = vport->back;
7617 return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
7620 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7622 struct hclge_vport *vport = hclge_get_vport(handle);
7623 struct hclge_dev *hdev = vport->back;
7625 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7628 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7630 struct hclge_vport *vport = hclge_get_vport(handle);
7631 struct hclge_dev *hdev = vport->back;
7633 return hdev->rst_stats.hw_reset_done_cnt;
7636 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7638 struct hclge_vport *vport = hclge_get_vport(handle);
7639 struct hclge_dev *hdev = vport->back;
7641 hdev->fd_en = enable;
7644 set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7646 hclge_restore_fd_entries(handle);
7648 hclge_task_schedule(hdev, 0);
7651 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7653 struct hclge_desc desc;
7654 struct hclge_config_mac_mode_cmd *req =
7655 (struct hclge_config_mac_mode_cmd *)desc.data;
7659 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7662 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7663 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7664 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7665 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7666 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7667 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7668 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7669 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7670 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7671 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7674 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7676 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7678 dev_err(&hdev->pdev->dev,
7679 "mac enable fail, ret =%d.\n", ret);
7682 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7683 u8 switch_param, u8 param_mask)
7685 struct hclge_mac_vlan_switch_cmd *req;
7686 struct hclge_desc desc;
7690 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7691 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7693 /* read current config parameter */
7694 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7696 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7697 req->func_id = cpu_to_le32(func_id);
7699 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7701 dev_err(&hdev->pdev->dev,
7702 "read mac vlan switch parameter fail, ret = %d\n", ret);
7706 /* modify and write new config parameter */
7707 hclge_cmd_reuse_desc(&desc, false);
7708 req->switch_param = (req->switch_param & param_mask) | switch_param;
7709 req->param_mask = param_mask;
7711 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7713 dev_err(&hdev->pdev->dev,
7714 "set mac vlan switch parameter fail, ret = %d\n", ret);
7718 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7721 #define HCLGE_PHY_LINK_STATUS_NUM 200
7723 struct phy_device *phydev = hdev->hw.mac.phydev;
7728 ret = phy_read_status(phydev);
7730 dev_err(&hdev->pdev->dev,
7731 "phy update link status fail, ret = %d\n", ret);
7735 if (phydev->link == link_ret)
7738 msleep(HCLGE_LINK_STATUS_MS);
7739 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7742 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7744 #define HCLGE_MAC_LINK_STATUS_NUM 100
7751 ret = hclge_get_mac_link_status(hdev, &link_status);
7754 if (link_status == link_ret)
7757 msleep(HCLGE_LINK_STATUS_MS);
7758 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7762 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7767 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7770 hclge_phy_link_status_wait(hdev, link_ret);
7772 return hclge_mac_link_status_wait(hdev, link_ret);
7775 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7777 struct hclge_config_mac_mode_cmd *req;
7778 struct hclge_desc desc;
7782 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7783 /* 1 Read out the MAC mode config at first */
7784 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7785 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7787 dev_err(&hdev->pdev->dev,
7788 "mac loopback get fail, ret =%d.\n", ret);
7792 /* 2 Then setup the loopback flag */
7793 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7794 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7796 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7798 /* 3 Config mac work mode with loopback flag
7799 * and its original configure parameters
7801 hclge_cmd_reuse_desc(&desc, false);
7802 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7804 dev_err(&hdev->pdev->dev,
7805 "mac loopback set fail, ret =%d.\n", ret);
7809 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7810 enum hnae3_loop loop_mode)
7812 #define HCLGE_COMMON_LB_RETRY_MS 10
7813 #define HCLGE_COMMON_LB_RETRY_NUM 100
7815 struct hclge_common_lb_cmd *req;
7816 struct hclge_desc desc;
7820 req = (struct hclge_common_lb_cmd *)desc.data;
7821 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7823 switch (loop_mode) {
7824 case HNAE3_LOOP_SERIAL_SERDES:
7825 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7827 case HNAE3_LOOP_PARALLEL_SERDES:
7828 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7830 case HNAE3_LOOP_PHY:
7831 loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7834 dev_err(&hdev->pdev->dev,
7835 "unsupported common loopback mode %d\n", loop_mode);
7840 req->enable = loop_mode_b;
7841 req->mask = loop_mode_b;
7843 req->mask = loop_mode_b;
7846 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7848 dev_err(&hdev->pdev->dev,
7849 "common loopback set fail, ret = %d\n", ret);
7854 msleep(HCLGE_COMMON_LB_RETRY_MS);
7855 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7857 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7859 dev_err(&hdev->pdev->dev,
7860 "common loopback get, ret = %d\n", ret);
7863 } while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7864 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7866 if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7867 dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
7869 } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7870 dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
7876 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7877 enum hnae3_loop loop_mode)
7881 ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7885 hclge_cfg_mac_mode(hdev, en);
7887 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7889 dev_err(&hdev->pdev->dev,
7890 "serdes loopback config mac mode timeout\n");
7895 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7896 struct phy_device *phydev)
7900 if (!phydev->suspended) {
7901 ret = phy_suspend(phydev);
7906 ret = phy_resume(phydev);
7910 return phy_loopback(phydev, true);
7913 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7914 struct phy_device *phydev)
7918 ret = phy_loopback(phydev, false);
7922 return phy_suspend(phydev);
7925 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7927 struct phy_device *phydev = hdev->hw.mac.phydev;
7931 if (hnae3_dev_phy_imp_supported(hdev))
7932 return hclge_set_common_loopback(hdev, en,
7938 ret = hclge_enable_phy_loopback(hdev, phydev);
7940 ret = hclge_disable_phy_loopback(hdev, phydev);
7942 dev_err(&hdev->pdev->dev,
7943 "set phy loopback fail, ret = %d\n", ret);
7947 hclge_cfg_mac_mode(hdev, en);
7949 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7951 dev_err(&hdev->pdev->dev,
7952 "phy loopback config mac mode timeout\n");
7957 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
7958 u16 stream_id, bool enable)
7960 struct hclge_desc desc;
7961 struct hclge_cfg_com_tqp_queue_cmd *req =
7962 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7964 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7965 req->tqp_id = cpu_to_le16(tqp_id);
7966 req->stream_id = cpu_to_le16(stream_id);
7968 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7970 return hclge_cmd_send(&hdev->hw, &desc, 1);
7973 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
7975 struct hclge_vport *vport = hclge_get_vport(handle);
7976 struct hclge_dev *hdev = vport->back;
7980 for (i = 0; i < handle->kinfo.num_tqps; i++) {
7981 ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
7988 static int hclge_set_loopback(struct hnae3_handle *handle,
7989 enum hnae3_loop loop_mode, bool en)
7991 struct hclge_vport *vport = hclge_get_vport(handle);
7992 struct hclge_dev *hdev = vport->back;
7995 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7996 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7997 * the same, the packets are looped back in the SSU. If SSU loopback
7998 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
8000 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
8001 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
8003 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
8004 HCLGE_SWITCH_ALW_LPBK_MASK);
8009 switch (loop_mode) {
8010 case HNAE3_LOOP_APP:
8011 ret = hclge_set_app_loopback(hdev, en);
8013 case HNAE3_LOOP_SERIAL_SERDES:
8014 case HNAE3_LOOP_PARALLEL_SERDES:
8015 ret = hclge_set_common_loopback(hdev, en, loop_mode);
8017 case HNAE3_LOOP_PHY:
8018 ret = hclge_set_phy_loopback(hdev, en);
8022 dev_err(&hdev->pdev->dev,
8023 "loop_mode %d is not supported\n", loop_mode);
8030 ret = hclge_tqp_enable(handle, en);
8032 dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
8033 en ? "enable" : "disable", ret);
8038 static int hclge_set_default_loopback(struct hclge_dev *hdev)
8042 ret = hclge_set_app_loopback(hdev, false);
8046 ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
8050 return hclge_cfg_common_loopback(hdev, false,
8051 HNAE3_LOOP_PARALLEL_SERDES);
8054 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
8056 struct hclge_vport *vport = hclge_get_vport(handle);
8057 struct hnae3_knic_private_info *kinfo;
8058 struct hnae3_queue *queue;
8059 struct hclge_tqp *tqp;
8062 kinfo = &vport->nic.kinfo;
8063 for (i = 0; i < kinfo->num_tqps; i++) {
8064 queue = handle->kinfo.tqp[i];
8065 tqp = container_of(queue, struct hclge_tqp, q);
8066 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
8070 static void hclge_flush_link_update(struct hclge_dev *hdev)
8072 #define HCLGE_FLUSH_LINK_TIMEOUT 100000
8074 unsigned long last = hdev->serv_processed_cnt;
8077 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
8078 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
8079 last == hdev->serv_processed_cnt)
8083 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
8085 struct hclge_vport *vport = hclge_get_vport(handle);
8086 struct hclge_dev *hdev = vport->back;
8089 hclge_task_schedule(hdev, 0);
8091 /* Set the DOWN flag here to disable link updating */
8092 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8094 /* flush memory to make sure DOWN is seen by service task */
8095 smp_mb__before_atomic();
8096 hclge_flush_link_update(hdev);
8100 static int hclge_ae_start(struct hnae3_handle *handle)
8102 struct hclge_vport *vport = hclge_get_vport(handle);
8103 struct hclge_dev *hdev = vport->back;
8106 hclge_cfg_mac_mode(hdev, true);
8107 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
8108 hdev->hw.mac.link = 0;
8110 /* reset tqp stats */
8111 hclge_reset_tqp_stats(handle);
8113 hclge_mac_start_phy(hdev);
8118 static void hclge_ae_stop(struct hnae3_handle *handle)
8120 struct hclge_vport *vport = hclge_get_vport(handle);
8121 struct hclge_dev *hdev = vport->back;
8123 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8124 spin_lock_bh(&hdev->fd_rule_lock);
8125 hclge_clear_arfs_rules(hdev);
8126 spin_unlock_bh(&hdev->fd_rule_lock);
8128 /* If it is not PF reset, the firmware will disable the MAC,
8129 * so it only need to stop phy here.
8131 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
8132 hdev->reset_type != HNAE3_FUNC_RESET) {
8133 hclge_mac_stop_phy(hdev);
8134 hclge_update_link_status(hdev);
8138 hclge_reset_tqp(handle);
8140 hclge_config_mac_tnl_int(hdev, false);
8143 hclge_cfg_mac_mode(hdev, false);
8145 hclge_mac_stop_phy(hdev);
8147 /* reset tqp stats */
8148 hclge_reset_tqp_stats(handle);
8149 hclge_update_link_status(hdev);
8152 int hclge_vport_start(struct hclge_vport *vport)
8154 struct hclge_dev *hdev = vport->back;
8156 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8157 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
8158 vport->last_active_jiffies = jiffies;
8160 if (test_bit(vport->vport_id, hdev->vport_config_block)) {
8161 if (vport->vport_id) {
8162 hclge_restore_mac_table_common(vport);
8163 hclge_restore_vport_vlan_table(vport);
8165 hclge_restore_hw_table(hdev);
8169 clear_bit(vport->vport_id, hdev->vport_config_block);
8174 void hclge_vport_stop(struct hclge_vport *vport)
8176 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8179 static int hclge_client_start(struct hnae3_handle *handle)
8181 struct hclge_vport *vport = hclge_get_vport(handle);
8183 return hclge_vport_start(vport);
8186 static void hclge_client_stop(struct hnae3_handle *handle)
8188 struct hclge_vport *vport = hclge_get_vport(handle);
8190 hclge_vport_stop(vport);
8193 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
8194 u16 cmdq_resp, u8 resp_code,
8195 enum hclge_mac_vlan_tbl_opcode op)
8197 struct hclge_dev *hdev = vport->back;
8200 dev_err(&hdev->pdev->dev,
8201 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8206 if (op == HCLGE_MAC_VLAN_ADD) {
8207 if (!resp_code || resp_code == 1)
8209 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
8210 resp_code == HCLGE_ADD_MC_OVERFLOW)
8213 dev_err(&hdev->pdev->dev,
8214 "add mac addr failed for undefined, code=%u.\n",
8217 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
8220 } else if (resp_code == 1) {
8221 dev_dbg(&hdev->pdev->dev,
8222 "remove mac addr failed for miss.\n");
8226 dev_err(&hdev->pdev->dev,
8227 "remove mac addr failed for undefined, code=%u.\n",
8230 } else if (op == HCLGE_MAC_VLAN_LKUP) {
8233 } else if (resp_code == 1) {
8234 dev_dbg(&hdev->pdev->dev,
8235 "lookup mac addr failed for miss.\n");
8239 dev_err(&hdev->pdev->dev,
8240 "lookup mac addr failed for undefined, code=%u.\n",
8245 dev_err(&hdev->pdev->dev,
8246 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
8251 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
8253 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8255 unsigned int word_num;
8256 unsigned int bit_num;
8258 if (vfid > 255 || vfid < 0)
8261 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
8262 word_num = vfid / 32;
8263 bit_num = vfid % 32;
8265 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8267 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
8269 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
8270 bit_num = vfid % 32;
8272 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8274 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
8280 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
8282 #define HCLGE_DESC_NUMBER 3
8283 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8286 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
8287 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
8288 if (desc[i].data[j])
8294 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
8295 const u8 *addr, bool is_mc)
8297 const unsigned char *mac_addr = addr;
8298 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
8299 (mac_addr[0]) | (mac_addr[1] << 8);
8300 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
8302 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8304 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8305 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8308 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8309 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8312 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8313 struct hclge_mac_vlan_tbl_entry_cmd *req)
8315 struct hclge_dev *hdev = vport->back;
8316 struct hclge_desc desc;
8321 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8323 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8325 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8327 dev_err(&hdev->pdev->dev,
8328 "del mac addr failed for cmd_send, ret =%d.\n",
8332 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8333 retval = le16_to_cpu(desc.retval);
8335 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8336 HCLGE_MAC_VLAN_REMOVE);
8339 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8340 struct hclge_mac_vlan_tbl_entry_cmd *req,
8341 struct hclge_desc *desc,
8344 struct hclge_dev *hdev = vport->back;
8349 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8351 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8352 memcpy(desc[0].data,
8354 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8355 hclge_cmd_setup_basic_desc(&desc[1],
8356 HCLGE_OPC_MAC_VLAN_ADD,
8358 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8359 hclge_cmd_setup_basic_desc(&desc[2],
8360 HCLGE_OPC_MAC_VLAN_ADD,
8362 ret = hclge_cmd_send(&hdev->hw, desc, 3);
8364 memcpy(desc[0].data,
8366 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8367 ret = hclge_cmd_send(&hdev->hw, desc, 1);
8370 dev_err(&hdev->pdev->dev,
8371 "lookup mac addr failed for cmd_send, ret =%d.\n",
8375 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8376 retval = le16_to_cpu(desc[0].retval);
8378 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8379 HCLGE_MAC_VLAN_LKUP);
8382 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8383 struct hclge_mac_vlan_tbl_entry_cmd *req,
8384 struct hclge_desc *mc_desc)
8386 struct hclge_dev *hdev = vport->back;
8393 struct hclge_desc desc;
8395 hclge_cmd_setup_basic_desc(&desc,
8396 HCLGE_OPC_MAC_VLAN_ADD,
8398 memcpy(desc.data, req,
8399 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8400 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8401 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8402 retval = le16_to_cpu(desc.retval);
8404 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8406 HCLGE_MAC_VLAN_ADD);
8408 hclge_cmd_reuse_desc(&mc_desc[0], false);
8409 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8410 hclge_cmd_reuse_desc(&mc_desc[1], false);
8411 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
8412 hclge_cmd_reuse_desc(&mc_desc[2], false);
8413 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
8414 memcpy(mc_desc[0].data, req,
8415 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8416 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8417 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8418 retval = le16_to_cpu(mc_desc[0].retval);
8420 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8422 HCLGE_MAC_VLAN_ADD);
8426 dev_err(&hdev->pdev->dev,
8427 "add mac addr failed for cmd_send, ret =%d.\n",
8435 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8436 u16 *allocated_size)
8438 struct hclge_umv_spc_alc_cmd *req;
8439 struct hclge_desc desc;
8442 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8443 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8445 req->space_size = cpu_to_le32(space_size);
8447 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8449 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8454 *allocated_size = le32_to_cpu(desc.data[1]);
8459 static int hclge_init_umv_space(struct hclge_dev *hdev)
8461 u16 allocated_size = 0;
8464 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8468 if (allocated_size < hdev->wanted_umv_size)
8469 dev_warn(&hdev->pdev->dev,
8470 "failed to alloc umv space, want %u, get %u\n",
8471 hdev->wanted_umv_size, allocated_size);
8473 hdev->max_umv_size = allocated_size;
8474 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8475 hdev->share_umv_size = hdev->priv_umv_size +
8476 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8481 static void hclge_reset_umv_space(struct hclge_dev *hdev)
8483 struct hclge_vport *vport;
8486 for (i = 0; i < hdev->num_alloc_vport; i++) {
8487 vport = &hdev->vport[i];
8488 vport->used_umv_num = 0;
8491 mutex_lock(&hdev->vport_lock);
8492 hdev->share_umv_size = hdev->priv_umv_size +
8493 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8494 mutex_unlock(&hdev->vport_lock);
8497 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8499 struct hclge_dev *hdev = vport->back;
8503 mutex_lock(&hdev->vport_lock);
8505 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8506 hdev->share_umv_size == 0);
8509 mutex_unlock(&hdev->vport_lock);
8514 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8516 struct hclge_dev *hdev = vport->back;
8519 if (vport->used_umv_num > hdev->priv_umv_size)
8520 hdev->share_umv_size++;
8522 if (vport->used_umv_num > 0)
8523 vport->used_umv_num--;
8525 if (vport->used_umv_num >= hdev->priv_umv_size &&
8526 hdev->share_umv_size > 0)
8527 hdev->share_umv_size--;
8528 vport->used_umv_num++;
8532 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8535 struct hclge_mac_node *mac_node, *tmp;
8537 list_for_each_entry_safe(mac_node, tmp, list, node)
8538 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8544 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8545 enum HCLGE_MAC_NODE_STATE state)
8548 /* from set_rx_mode or tmp_add_list */
8549 case HCLGE_MAC_TO_ADD:
8550 if (mac_node->state == HCLGE_MAC_TO_DEL)
8551 mac_node->state = HCLGE_MAC_ACTIVE;
8553 /* only from set_rx_mode */
8554 case HCLGE_MAC_TO_DEL:
8555 if (mac_node->state == HCLGE_MAC_TO_ADD) {
8556 list_del(&mac_node->node);
8559 mac_node->state = HCLGE_MAC_TO_DEL;
8562 /* only from tmp_add_list, the mac_node->state won't be
8565 case HCLGE_MAC_ACTIVE:
8566 if (mac_node->state == HCLGE_MAC_TO_ADD)
8567 mac_node->state = HCLGE_MAC_ACTIVE;
8573 int hclge_update_mac_list(struct hclge_vport *vport,
8574 enum HCLGE_MAC_NODE_STATE state,
8575 enum HCLGE_MAC_ADDR_TYPE mac_type,
8576 const unsigned char *addr)
8578 struct hclge_dev *hdev = vport->back;
8579 struct hclge_mac_node *mac_node;
8580 struct list_head *list;
8582 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8583 &vport->uc_mac_list : &vport->mc_mac_list;
8585 spin_lock_bh(&vport->mac_list_lock);
8587 /* if the mac addr is already in the mac list, no need to add a new
8588 * one into it, just check the mac addr state, convert it to a new
8589 * state, or just remove it, or do nothing.
8591 mac_node = hclge_find_mac_node(list, addr);
8593 hclge_update_mac_node(mac_node, state);
8594 spin_unlock_bh(&vport->mac_list_lock);
8595 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8599 /* if this address is never added, unnecessary to delete */
8600 if (state == HCLGE_MAC_TO_DEL) {
8601 spin_unlock_bh(&vport->mac_list_lock);
8602 dev_err(&hdev->pdev->dev,
8603 "failed to delete address %pM from mac list\n",
8608 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8610 spin_unlock_bh(&vport->mac_list_lock);
8614 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8616 mac_node->state = state;
8617 ether_addr_copy(mac_node->mac_addr, addr);
8618 list_add_tail(&mac_node->node, list);
8620 spin_unlock_bh(&vport->mac_list_lock);
8625 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8626 const unsigned char *addr)
8628 struct hclge_vport *vport = hclge_get_vport(handle);
8630 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8634 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8635 const unsigned char *addr)
8637 struct hclge_dev *hdev = vport->back;
8638 struct hclge_mac_vlan_tbl_entry_cmd req;
8639 struct hclge_desc desc;
8640 u16 egress_port = 0;
8643 /* mac addr check */
8644 if (is_zero_ether_addr(addr) ||
8645 is_broadcast_ether_addr(addr) ||
8646 is_multicast_ether_addr(addr)) {
8647 dev_err(&hdev->pdev->dev,
8648 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
8649 addr, is_zero_ether_addr(addr),
8650 is_broadcast_ether_addr(addr),
8651 is_multicast_ether_addr(addr));
8655 memset(&req, 0, sizeof(req));
8657 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8658 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8660 req.egress_port = cpu_to_le16(egress_port);
8662 hclge_prepare_mac_addr(&req, addr, false);
8664 /* Lookup the mac address in the mac_vlan table, and add
8665 * it if the entry is inexistent. Repeated unicast entry
8666 * is not allowed in the mac vlan table.
8668 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8669 if (ret == -ENOENT) {
8670 mutex_lock(&hdev->vport_lock);
8671 if (!hclge_is_umv_space_full(vport, false)) {
8672 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8674 hclge_update_umv_space(vport, false);
8675 mutex_unlock(&hdev->vport_lock);
8678 mutex_unlock(&hdev->vport_lock);
8680 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8681 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8682 hdev->priv_umv_size);
8687 /* check if we just hit the duplicate */
8689 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
8690 vport->vport_id, addr);
8694 dev_err(&hdev->pdev->dev,
8695 "PF failed to add unicast entry(%pM) in the MAC table\n",
8701 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8702 const unsigned char *addr)
8704 struct hclge_vport *vport = hclge_get_vport(handle);
8706 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8710 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8711 const unsigned char *addr)
8713 struct hclge_dev *hdev = vport->back;
8714 struct hclge_mac_vlan_tbl_entry_cmd req;
8717 /* mac addr check */
8718 if (is_zero_ether_addr(addr) ||
8719 is_broadcast_ether_addr(addr) ||
8720 is_multicast_ether_addr(addr)) {
8721 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
8726 memset(&req, 0, sizeof(req));
8727 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8728 hclge_prepare_mac_addr(&req, addr, false);
8729 ret = hclge_remove_mac_vlan_tbl(vport, &req);
8731 mutex_lock(&hdev->vport_lock);
8732 hclge_update_umv_space(vport, true);
8733 mutex_unlock(&hdev->vport_lock);
8734 } else if (ret == -ENOENT) {
8741 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8742 const unsigned char *addr)
8744 struct hclge_vport *vport = hclge_get_vport(handle);
8746 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8750 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8751 const unsigned char *addr)
8753 struct hclge_dev *hdev = vport->back;
8754 struct hclge_mac_vlan_tbl_entry_cmd req;
8755 struct hclge_desc desc[3];
8758 /* mac addr check */
8759 if (!is_multicast_ether_addr(addr)) {
8760 dev_err(&hdev->pdev->dev,
8761 "Add mc mac err! invalid mac:%pM.\n",
8765 memset(&req, 0, sizeof(req));
8766 hclge_prepare_mac_addr(&req, addr, true);
8767 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8769 /* This mac addr do not exist, add new entry for it */
8770 memset(desc[0].data, 0, sizeof(desc[0].data));
8771 memset(desc[1].data, 0, sizeof(desc[0].data));
8772 memset(desc[2].data, 0, sizeof(desc[0].data));
8774 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8777 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8778 /* if already overflow, not to print each time */
8779 if (status == -ENOSPC &&
8780 !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
8781 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8786 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8787 const unsigned char *addr)
8789 struct hclge_vport *vport = hclge_get_vport(handle);
8791 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8795 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8796 const unsigned char *addr)
8798 struct hclge_dev *hdev = vport->back;
8799 struct hclge_mac_vlan_tbl_entry_cmd req;
8800 enum hclge_cmd_status status;
8801 struct hclge_desc desc[3];
8803 /* mac addr check */
8804 if (!is_multicast_ether_addr(addr)) {
8805 dev_dbg(&hdev->pdev->dev,
8806 "Remove mc mac err! invalid mac:%pM.\n",
8811 memset(&req, 0, sizeof(req));
8812 hclge_prepare_mac_addr(&req, addr, true);
8813 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8815 /* This mac addr exist, remove this handle's VFID for it */
8816 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8820 if (hclge_is_all_function_id_zero(desc))
8821 /* All the vfid is zero, so need to delete this entry */
8822 status = hclge_remove_mac_vlan_tbl(vport, &req);
8824 /* Not all the vfid is zero, update the vfid */
8825 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8826 } else if (status == -ENOENT) {
8833 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8834 struct list_head *list,
8835 int (*sync)(struct hclge_vport *,
8836 const unsigned char *))
8838 struct hclge_mac_node *mac_node, *tmp;
8841 list_for_each_entry_safe(mac_node, tmp, list, node) {
8842 ret = sync(vport, mac_node->mac_addr);
8844 mac_node->state = HCLGE_MAC_ACTIVE;
8846 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8853 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8854 struct list_head *list,
8855 int (*unsync)(struct hclge_vport *,
8856 const unsigned char *))
8858 struct hclge_mac_node *mac_node, *tmp;
8861 list_for_each_entry_safe(mac_node, tmp, list, node) {
8862 ret = unsync(vport, mac_node->mac_addr);
8863 if (!ret || ret == -ENOENT) {
8864 list_del(&mac_node->node);
8867 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8874 static bool hclge_sync_from_add_list(struct list_head *add_list,
8875 struct list_head *mac_list)
8877 struct hclge_mac_node *mac_node, *tmp, *new_node;
8878 bool all_added = true;
8880 list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8881 if (mac_node->state == HCLGE_MAC_TO_ADD)
8884 /* if the mac address from tmp_add_list is not in the
8885 * uc/mc_mac_list, it means have received a TO_DEL request
8886 * during the time window of adding the mac address into mac
8887 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8888 * then it will be removed at next time. else it must be TO_ADD,
8889 * this address hasn't been added into mac table,
8890 * so just remove the mac node.
8892 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8894 hclge_update_mac_node(new_node, mac_node->state);
8895 list_del(&mac_node->node);
8897 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8898 mac_node->state = HCLGE_MAC_TO_DEL;
8899 list_move_tail(&mac_node->node, mac_list);
8901 list_del(&mac_node->node);
8909 static void hclge_sync_from_del_list(struct list_head *del_list,
8910 struct list_head *mac_list)
8912 struct hclge_mac_node *mac_node, *tmp, *new_node;
8914 list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8915 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8917 /* If the mac addr exists in the mac list, it means
8918 * received a new TO_ADD request during the time window
8919 * of configuring the mac address. For the mac node
8920 * state is TO_ADD, and the address is already in the
8921 * in the hardware(due to delete fail), so we just need
8922 * to change the mac node state to ACTIVE.
8924 new_node->state = HCLGE_MAC_ACTIVE;
8925 list_del(&mac_node->node);
8928 list_move_tail(&mac_node->node, mac_list);
8933 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8934 enum HCLGE_MAC_ADDR_TYPE mac_type,
8937 if (mac_type == HCLGE_MAC_ADDR_UC) {
8939 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8941 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8944 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8946 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8950 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8951 enum HCLGE_MAC_ADDR_TYPE mac_type)
8953 struct hclge_mac_node *mac_node, *tmp, *new_node;
8954 struct list_head tmp_add_list, tmp_del_list;
8955 struct list_head *list;
8958 INIT_LIST_HEAD(&tmp_add_list);
8959 INIT_LIST_HEAD(&tmp_del_list);
8961 /* move the mac addr to the tmp_add_list and tmp_del_list, then
8962 * we can add/delete these mac addr outside the spin lock
8964 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8965 &vport->uc_mac_list : &vport->mc_mac_list;
8967 spin_lock_bh(&vport->mac_list_lock);
8969 list_for_each_entry_safe(mac_node, tmp, list, node) {
8970 switch (mac_node->state) {
8971 case HCLGE_MAC_TO_DEL:
8972 list_move_tail(&mac_node->node, &tmp_del_list);
8974 case HCLGE_MAC_TO_ADD:
8975 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8978 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8979 new_node->state = mac_node->state;
8980 list_add_tail(&new_node->node, &tmp_add_list);
8988 spin_unlock_bh(&vport->mac_list_lock);
8990 /* delete first, in order to get max mac table space for adding */
8991 if (mac_type == HCLGE_MAC_ADDR_UC) {
8992 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8993 hclge_rm_uc_addr_common);
8994 hclge_sync_vport_mac_list(vport, &tmp_add_list,
8995 hclge_add_uc_addr_common);
8997 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
8998 hclge_rm_mc_addr_common);
8999 hclge_sync_vport_mac_list(vport, &tmp_add_list,
9000 hclge_add_mc_addr_common);
9003 /* if some mac addresses were added/deleted fail, move back to the
9004 * mac_list, and retry at next time.
9006 spin_lock_bh(&vport->mac_list_lock);
9008 hclge_sync_from_del_list(&tmp_del_list, list);
9009 all_added = hclge_sync_from_add_list(&tmp_add_list, list);
9011 spin_unlock_bh(&vport->mac_list_lock);
9013 hclge_update_overflow_flags(vport, mac_type, all_added);
9016 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
9018 struct hclge_dev *hdev = vport->back;
9020 if (test_bit(vport->vport_id, hdev->vport_config_block))
9023 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
9029 static void hclge_sync_mac_table(struct hclge_dev *hdev)
9033 for (i = 0; i < hdev->num_alloc_vport; i++) {
9034 struct hclge_vport *vport = &hdev->vport[i];
9036 if (!hclge_need_sync_mac_table(vport))
9039 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
9040 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
9044 static void hclge_build_del_list(struct list_head *list,
9046 struct list_head *tmp_del_list)
9048 struct hclge_mac_node *mac_cfg, *tmp;
9050 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
9051 switch (mac_cfg->state) {
9052 case HCLGE_MAC_TO_DEL:
9053 case HCLGE_MAC_ACTIVE:
9054 list_move_tail(&mac_cfg->node, tmp_del_list);
9056 case HCLGE_MAC_TO_ADD:
9058 list_del(&mac_cfg->node);
9066 static void hclge_unsync_del_list(struct hclge_vport *vport,
9067 int (*unsync)(struct hclge_vport *vport,
9068 const unsigned char *addr),
9070 struct list_head *tmp_del_list)
9072 struct hclge_mac_node *mac_cfg, *tmp;
9075 list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
9076 ret = unsync(vport, mac_cfg->mac_addr);
9077 if (!ret || ret == -ENOENT) {
9078 /* clear all mac addr from hardware, but remain these
9079 * mac addr in the mac list, and restore them after
9080 * vf reset finished.
9083 mac_cfg->state == HCLGE_MAC_ACTIVE) {
9084 mac_cfg->state = HCLGE_MAC_TO_ADD;
9086 list_del(&mac_cfg->node);
9089 } else if (is_del_list) {
9090 mac_cfg->state = HCLGE_MAC_TO_DEL;
9095 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
9096 enum HCLGE_MAC_ADDR_TYPE mac_type)
9098 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
9099 struct hclge_dev *hdev = vport->back;
9100 struct list_head tmp_del_list, *list;
9102 if (mac_type == HCLGE_MAC_ADDR_UC) {
9103 list = &vport->uc_mac_list;
9104 unsync = hclge_rm_uc_addr_common;
9106 list = &vport->mc_mac_list;
9107 unsync = hclge_rm_mc_addr_common;
9110 INIT_LIST_HEAD(&tmp_del_list);
9113 set_bit(vport->vport_id, hdev->vport_config_block);
9115 spin_lock_bh(&vport->mac_list_lock);
9117 hclge_build_del_list(list, is_del_list, &tmp_del_list);
9119 spin_unlock_bh(&vport->mac_list_lock);
9121 hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
9123 spin_lock_bh(&vport->mac_list_lock);
9125 hclge_sync_from_del_list(&tmp_del_list, list);
9127 spin_unlock_bh(&vport->mac_list_lock);
9130 /* remove all mac address when uninitailize */
9131 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
9132 enum HCLGE_MAC_ADDR_TYPE mac_type)
9134 struct hclge_mac_node *mac_node, *tmp;
9135 struct hclge_dev *hdev = vport->back;
9136 struct list_head tmp_del_list, *list;
9138 INIT_LIST_HEAD(&tmp_del_list);
9140 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9141 &vport->uc_mac_list : &vport->mc_mac_list;
9143 spin_lock_bh(&vport->mac_list_lock);
9145 list_for_each_entry_safe(mac_node, tmp, list, node) {
9146 switch (mac_node->state) {
9147 case HCLGE_MAC_TO_DEL:
9148 case HCLGE_MAC_ACTIVE:
9149 list_move_tail(&mac_node->node, &tmp_del_list);
9151 case HCLGE_MAC_TO_ADD:
9152 list_del(&mac_node->node);
9158 spin_unlock_bh(&vport->mac_list_lock);
9160 if (mac_type == HCLGE_MAC_ADDR_UC)
9161 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9162 hclge_rm_uc_addr_common);
9164 hclge_unsync_vport_mac_list(vport, &tmp_del_list,
9165 hclge_rm_mc_addr_common);
9167 if (!list_empty(&tmp_del_list))
9168 dev_warn(&hdev->pdev->dev,
9169 "uninit %s mac list for vport %u not completely.\n",
9170 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
9173 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
9174 list_del(&mac_node->node);
9179 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
9181 struct hclge_vport *vport;
9184 for (i = 0; i < hdev->num_alloc_vport; i++) {
9185 vport = &hdev->vport[i];
9186 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
9187 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
9191 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
9192 u16 cmdq_resp, u8 resp_code)
9194 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
9195 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
9196 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
9197 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
9202 dev_err(&hdev->pdev->dev,
9203 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9208 switch (resp_code) {
9209 case HCLGE_ETHERTYPE_SUCCESS_ADD:
9210 case HCLGE_ETHERTYPE_ALREADY_ADD:
9213 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
9214 dev_err(&hdev->pdev->dev,
9215 "add mac ethertype failed for manager table overflow.\n");
9216 return_status = -EIO;
9218 case HCLGE_ETHERTYPE_KEY_CONFLICT:
9219 dev_err(&hdev->pdev->dev,
9220 "add mac ethertype failed for key conflict.\n");
9221 return_status = -EIO;
9224 dev_err(&hdev->pdev->dev,
9225 "add mac ethertype failed for undefined, code=%u.\n",
9227 return_status = -EIO;
9230 return return_status;
9233 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
9236 struct hclge_mac_vlan_tbl_entry_cmd req;
9237 struct hclge_dev *hdev = vport->back;
9238 struct hclge_desc desc;
9239 u16 egress_port = 0;
9242 if (is_zero_ether_addr(mac_addr))
9245 memset(&req, 0, sizeof(req));
9246 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
9247 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
9248 req.egress_port = cpu_to_le16(egress_port);
9249 hclge_prepare_mac_addr(&req, mac_addr, false);
9251 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
9254 vf_idx += HCLGE_VF_VPORT_START_NUM;
9255 for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++)
9257 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
9263 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
9266 struct hclge_vport *vport = hclge_get_vport(handle);
9267 struct hclge_dev *hdev = vport->back;
9269 vport = hclge_get_vf_vport(hdev, vf);
9273 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
9274 dev_info(&hdev->pdev->dev,
9275 "Specified MAC(=%pM) is same as before, no change committed!\n",
9280 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
9281 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
9286 ether_addr_copy(vport->vf_info.mac, mac_addr);
9288 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9289 dev_info(&hdev->pdev->dev,
9290 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
9292 return hclge_inform_reset_assert_to_vf(vport);
9295 dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
9300 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9301 const struct hclge_mac_mgr_tbl_entry_cmd *req)
9303 struct hclge_desc desc;
9308 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9309 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9311 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9313 dev_err(&hdev->pdev->dev,
9314 "add mac ethertype failed for cmd_send, ret =%d.\n",
9319 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9320 retval = le16_to_cpu(desc.retval);
9322 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9325 static int init_mgr_tbl(struct hclge_dev *hdev)
9330 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9331 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9333 dev_err(&hdev->pdev->dev,
9334 "add mac ethertype failed, ret =%d.\n",
9343 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9345 struct hclge_vport *vport = hclge_get_vport(handle);
9346 struct hclge_dev *hdev = vport->back;
9348 ether_addr_copy(p, hdev->hw.mac.mac_addr);
9351 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9352 const u8 *old_addr, const u8 *new_addr)
9354 struct list_head *list = &vport->uc_mac_list;
9355 struct hclge_mac_node *old_node, *new_node;
9357 new_node = hclge_find_mac_node(list, new_addr);
9359 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9363 new_node->state = HCLGE_MAC_TO_ADD;
9364 ether_addr_copy(new_node->mac_addr, new_addr);
9365 list_add(&new_node->node, list);
9367 if (new_node->state == HCLGE_MAC_TO_DEL)
9368 new_node->state = HCLGE_MAC_ACTIVE;
9370 /* make sure the new addr is in the list head, avoid dev
9371 * addr may be not re-added into mac table for the umv space
9372 * limitation after global/imp reset which will clear mac
9373 * table by hardware.
9375 list_move(&new_node->node, list);
9378 if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9379 old_node = hclge_find_mac_node(list, old_addr);
9381 if (old_node->state == HCLGE_MAC_TO_ADD) {
9382 list_del(&old_node->node);
9385 old_node->state = HCLGE_MAC_TO_DEL;
9390 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9395 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
9398 const unsigned char *new_addr = (const unsigned char *)p;
9399 struct hclge_vport *vport = hclge_get_vport(handle);
9400 struct hclge_dev *hdev = vport->back;
9401 unsigned char *old_addr = NULL;
9404 /* mac addr check */
9405 if (is_zero_ether_addr(new_addr) ||
9406 is_broadcast_ether_addr(new_addr) ||
9407 is_multicast_ether_addr(new_addr)) {
9408 dev_err(&hdev->pdev->dev,
9409 "change uc mac err! invalid mac: %pM.\n",
9414 ret = hclge_pause_addr_cfg(hdev, new_addr);
9416 dev_err(&hdev->pdev->dev,
9417 "failed to configure mac pause address, ret = %d\n",
9423 old_addr = hdev->hw.mac.mac_addr;
9425 spin_lock_bh(&vport->mac_list_lock);
9426 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9428 dev_err(&hdev->pdev->dev,
9429 "failed to change the mac addr:%pM, ret = %d\n",
9431 spin_unlock_bh(&vport->mac_list_lock);
9434 hclge_pause_addr_cfg(hdev, old_addr);
9438 /* we must update dev addr with spin lock protect, preventing dev addr
9439 * being removed by set_rx_mode path.
9441 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9442 spin_unlock_bh(&vport->mac_list_lock);
9444 hclge_task_schedule(hdev, 0);
9449 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9451 struct mii_ioctl_data *data = if_mii(ifr);
9453 if (!hnae3_dev_phy_imp_supported(hdev))
9458 data->phy_id = hdev->hw.mac.phy_addr;
9459 /* this command reads phy id and register at the same time */
9462 data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9466 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9472 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9475 struct hclge_vport *vport = hclge_get_vport(handle);
9476 struct hclge_dev *hdev = vport->back;
9480 return hclge_ptp_get_cfg(hdev, ifr);
9482 return hclge_ptp_set_cfg(hdev, ifr);
9484 if (!hdev->hw.mac.phydev)
9485 return hclge_mii_ioctl(hdev, ifr, cmd);
9488 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9491 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id,
9494 struct hclge_port_vlan_filter_bypass_cmd *req;
9495 struct hclge_desc desc;
9498 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false);
9499 req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
9501 hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B,
9504 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9506 dev_err(&hdev->pdev->dev,
9507 "failed to set vport%u port vlan filter bypass state, ret = %d.\n",
9513 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9514 u8 fe_type, bool filter_en, u8 vf_id)
9516 struct hclge_vlan_filter_ctrl_cmd *req;
9517 struct hclge_desc desc;
9520 /* read current vlan filter parameter */
9521 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9522 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9523 req->vlan_type = vlan_type;
9526 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9528 dev_err(&hdev->pdev->dev,
9529 "failed to get vlan filter config, ret = %d.\n", ret);
9533 /* modify and write new config parameter */
9534 hclge_cmd_reuse_desc(&desc, false);
9535 req->vlan_fe = filter_en ?
9536 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9538 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9540 dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
9546 static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
9548 struct hclge_dev *hdev = vport->back;
9549 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
9552 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9553 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9554 HCLGE_FILTER_FE_EGRESS_V1_B,
9555 enable, vport->vport_id);
9557 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9558 HCLGE_FILTER_FE_EGRESS, enable,
9563 if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) {
9564 ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
9566 } else if (!vport->vport_id) {
9567 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
9570 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9571 HCLGE_FILTER_FE_INGRESS,
9578 static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
9580 struct hnae3_handle *handle = &vport->nic;
9581 struct hclge_vport_vlan_cfg *vlan, *tmp;
9582 struct hclge_dev *hdev = vport->back;
9584 if (vport->vport_id) {
9585 if (vport->port_base_vlan_cfg.state !=
9586 HNAE3_PORT_BASE_VLAN_DISABLE)
9589 if (vport->vf_info.trusted && vport->vf_info.request_uc_en)
9591 } else if (handle->netdev_flags & HNAE3_USER_UPE) {
9595 if (!vport->req_vlan_fltr_en)
9598 /* compatible with former device, always enable vlan filter */
9599 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
9602 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
9603 if (vlan->vlan_id != 0)
9609 int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
9611 struct hclge_dev *hdev = vport->back;
9615 mutex_lock(&hdev->vport_lock);
9617 vport->req_vlan_fltr_en = request_en;
9619 need_en = hclge_need_enable_vport_vlan_filter(vport);
9620 if (need_en == vport->cur_vlan_fltr_en) {
9621 mutex_unlock(&hdev->vport_lock);
9625 ret = hclge_set_vport_vlan_filter(vport, need_en);
9627 mutex_unlock(&hdev->vport_lock);
9631 vport->cur_vlan_fltr_en = need_en;
9633 mutex_unlock(&hdev->vport_lock);
9638 static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9640 struct hclge_vport *vport = hclge_get_vport(handle);
9642 return hclge_enable_vport_vlan_filter(vport, enable);
9645 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9646 bool is_kill, u16 vlan,
9647 struct hclge_desc *desc)
9649 struct hclge_vlan_filter_vf_cfg_cmd *req0;
9650 struct hclge_vlan_filter_vf_cfg_cmd *req1;
9655 hclge_cmd_setup_basic_desc(&desc[0],
9656 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9657 hclge_cmd_setup_basic_desc(&desc[1],
9658 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9660 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
9662 vf_byte_off = vfid / 8;
9663 vf_byte_val = 1 << (vfid % 8);
9665 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9666 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9668 req0->vlan_id = cpu_to_le16(vlan);
9669 req0->vlan_cfg = is_kill;
9671 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9672 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9674 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9676 ret = hclge_cmd_send(&hdev->hw, desc, 2);
9678 dev_err(&hdev->pdev->dev,
9679 "Send vf vlan command fail, ret =%d.\n",
9687 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9688 bool is_kill, struct hclge_desc *desc)
9690 struct hclge_vlan_filter_vf_cfg_cmd *req;
9692 req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9695 #define HCLGE_VF_VLAN_NO_ENTRY 2
9696 if (!req->resp_code || req->resp_code == 1)
9699 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9700 set_bit(vfid, hdev->vf_vlan_full);
9701 dev_warn(&hdev->pdev->dev,
9702 "vf vlan table is full, vf vlan filter is disabled\n");
9706 dev_err(&hdev->pdev->dev,
9707 "Add vf vlan filter fail, ret =%u.\n",
9710 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
9711 if (!req->resp_code)
9714 /* vf vlan filter is disabled when vf vlan table is full,
9715 * then new vlan id will not be added into vf vlan table.
9716 * Just return 0 without warning, avoid massive verbose
9717 * print logs when unload.
9719 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9722 dev_err(&hdev->pdev->dev,
9723 "Kill vf vlan filter fail, ret =%u.\n",
9730 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9731 bool is_kill, u16 vlan)
9733 struct hclge_vport *vport = &hdev->vport[vfid];
9734 struct hclge_desc desc[2];
9737 /* if vf vlan table is full, firmware will close vf vlan filter, it
9738 * is unable and unnecessary to add new vlan id to vf vlan filter.
9739 * If spoof check is enable, and vf vlan is full, it shouldn't add
9740 * new vlan, because tx packets with these vlan id will be dropped.
9742 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9743 if (vport->vf_info.spoofchk && vlan) {
9744 dev_err(&hdev->pdev->dev,
9745 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
9751 ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9755 return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9758 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9759 u16 vlan_id, bool is_kill)
9761 struct hclge_vlan_filter_pf_cfg_cmd *req;
9762 struct hclge_desc desc;
9763 u8 vlan_offset_byte_val;
9764 u8 vlan_offset_byte;
9768 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9770 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9771 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9772 HCLGE_VLAN_BYTE_SIZE;
9773 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9775 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9776 req->vlan_offset = vlan_offset_160;
9777 req->vlan_cfg = is_kill;
9778 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9780 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9782 dev_err(&hdev->pdev->dev,
9783 "port vlan command, send fail, ret =%d.\n", ret);
9787 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9788 u16 vport_id, u16 vlan_id,
9791 u16 vport_idx, vport_num = 0;
9794 if (is_kill && !vlan_id)
9797 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9799 dev_err(&hdev->pdev->dev,
9800 "Set %u vport vlan filter config fail, ret =%d.\n",
9805 /* vlan 0 may be added twice when 8021q module is enabled */
9806 if (!is_kill && !vlan_id &&
9807 test_bit(vport_id, hdev->vlan_table[vlan_id]))
9810 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9811 dev_err(&hdev->pdev->dev,
9812 "Add port vlan failed, vport %u is already in vlan %u\n",
9818 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9819 dev_err(&hdev->pdev->dev,
9820 "Delete port vlan failed, vport %u is not in vlan %u\n",
9825 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9828 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9829 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9835 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9837 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9838 struct hclge_vport_vtag_tx_cfg_cmd *req;
9839 struct hclge_dev *hdev = vport->back;
9840 struct hclge_desc desc;
9844 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9846 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9847 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9848 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9849 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9850 vcfg->accept_tag1 ? 1 : 0);
9851 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9852 vcfg->accept_untag1 ? 1 : 0);
9853 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9854 vcfg->accept_tag2 ? 1 : 0);
9855 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9856 vcfg->accept_untag2 ? 1 : 0);
9857 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9858 vcfg->insert_tag1_en ? 1 : 0);
9859 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9860 vcfg->insert_tag2_en ? 1 : 0);
9861 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9862 vcfg->tag_shift_mode_en ? 1 : 0);
9863 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9865 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9866 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9867 HCLGE_VF_NUM_PER_BYTE;
9868 req->vf_bitmap[bmap_index] =
9869 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9871 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9873 dev_err(&hdev->pdev->dev,
9874 "Send port txvlan cfg command fail, ret =%d\n",
9880 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9882 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9883 struct hclge_vport_vtag_rx_cfg_cmd *req;
9884 struct hclge_dev *hdev = vport->back;
9885 struct hclge_desc desc;
9889 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9891 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9892 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9893 vcfg->strip_tag1_en ? 1 : 0);
9894 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9895 vcfg->strip_tag2_en ? 1 : 0);
9896 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9897 vcfg->vlan1_vlan_prionly ? 1 : 0);
9898 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9899 vcfg->vlan2_vlan_prionly ? 1 : 0);
9900 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9901 vcfg->strip_tag1_discard_en ? 1 : 0);
9902 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9903 vcfg->strip_tag2_discard_en ? 1 : 0);
9905 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9906 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9907 HCLGE_VF_NUM_PER_BYTE;
9908 req->vf_bitmap[bmap_index] =
9909 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9911 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9913 dev_err(&hdev->pdev->dev,
9914 "Send port rxvlan cfg command fail, ret =%d\n",
9920 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9921 u16 port_base_vlan_state,
9922 u16 vlan_tag, u8 qos)
9926 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9927 vport->txvlan_cfg.accept_tag1 = true;
9928 vport->txvlan_cfg.insert_tag1_en = false;
9929 vport->txvlan_cfg.default_tag1 = 0;
9931 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9933 vport->txvlan_cfg.accept_tag1 =
9934 ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9935 vport->txvlan_cfg.insert_tag1_en = true;
9936 vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) |
9940 vport->txvlan_cfg.accept_untag1 = true;
9942 /* accept_tag2 and accept_untag2 are not supported on
9943 * pdev revision(0x20), new revision support them,
9944 * this two fields can not be configured by user.
9946 vport->txvlan_cfg.accept_tag2 = true;
9947 vport->txvlan_cfg.accept_untag2 = true;
9948 vport->txvlan_cfg.insert_tag2_en = false;
9949 vport->txvlan_cfg.default_tag2 = 0;
9950 vport->txvlan_cfg.tag_shift_mode_en = true;
9952 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9953 vport->rxvlan_cfg.strip_tag1_en = false;
9954 vport->rxvlan_cfg.strip_tag2_en =
9955 vport->rxvlan_cfg.rx_vlan_offload_en;
9956 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9958 vport->rxvlan_cfg.strip_tag1_en =
9959 vport->rxvlan_cfg.rx_vlan_offload_en;
9960 vport->rxvlan_cfg.strip_tag2_en = true;
9961 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9964 vport->rxvlan_cfg.strip_tag1_discard_en = false;
9965 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9966 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9968 ret = hclge_set_vlan_tx_offload_cfg(vport);
9972 return hclge_set_vlan_rx_offload_cfg(vport);
9975 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9977 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9978 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9979 struct hclge_desc desc;
9982 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9983 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9984 rx_req->ot_fst_vlan_type =
9985 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9986 rx_req->ot_sec_vlan_type =
9987 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9988 rx_req->in_fst_vlan_type =
9989 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9990 rx_req->in_sec_vlan_type =
9991 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9993 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9995 dev_err(&hdev->pdev->dev,
9996 "Send rxvlan protocol type command fail, ret =%d\n",
10001 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
10003 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
10004 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
10005 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
10007 status = hclge_cmd_send(&hdev->hw, &desc, 1);
10009 dev_err(&hdev->pdev->dev,
10010 "Send txvlan protocol type command fail, ret =%d\n",
10016 static int hclge_init_vlan_config(struct hclge_dev *hdev)
10018 #define HCLGE_DEF_VLAN_TYPE 0x8100
10020 struct hnae3_handle *handle = &hdev->vport[0].nic;
10021 struct hclge_vport *vport;
10025 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
10026 /* for revision 0x21, vf vlan filter is per function */
10027 for (i = 0; i < hdev->num_alloc_vport; i++) {
10028 vport = &hdev->vport[i];
10029 ret = hclge_set_vlan_filter_ctrl(hdev,
10030 HCLGE_FILTER_TYPE_VF,
10031 HCLGE_FILTER_FE_EGRESS,
10036 vport->cur_vlan_fltr_en = true;
10039 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
10040 HCLGE_FILTER_FE_INGRESS, true,
10045 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
10046 HCLGE_FILTER_FE_EGRESS_V1_B,
10052 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10053 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10054 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
10055 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
10056 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
10057 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
10059 ret = hclge_set_vlan_protocol_type(hdev);
10063 for (i = 0; i < hdev->num_alloc_vport; i++) {
10067 vport = &hdev->vport[i];
10068 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10069 qos = vport->port_base_vlan_cfg.vlan_info.qos;
10071 ret = hclge_vlan_offload_cfg(vport,
10072 vport->port_base_vlan_cfg.state,
10078 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
10081 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10082 bool writen_to_tbl)
10084 struct hclge_vport_vlan_cfg *vlan, *tmp;
10086 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
10087 if (vlan->vlan_id == vlan_id)
10090 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
10094 vlan->hd_tbl_status = writen_to_tbl;
10095 vlan->vlan_id = vlan_id;
10097 list_add_tail(&vlan->node, &vport->vlan_list);
10100 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
10102 struct hclge_vport_vlan_cfg *vlan, *tmp;
10103 struct hclge_dev *hdev = vport->back;
10106 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10107 if (!vlan->hd_tbl_status) {
10108 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10110 vlan->vlan_id, false);
10112 dev_err(&hdev->pdev->dev,
10113 "restore vport vlan list failed, ret=%d\n",
10118 vlan->hd_tbl_status = true;
10124 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10127 struct hclge_vport_vlan_cfg *vlan, *tmp;
10128 struct hclge_dev *hdev = vport->back;
10130 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10131 if (vlan->vlan_id == vlan_id) {
10132 if (is_write_tbl && vlan->hd_tbl_status)
10133 hclge_set_vlan_filter_hw(hdev,
10134 htons(ETH_P_8021Q),
10139 list_del(&vlan->node);
10146 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
10148 struct hclge_vport_vlan_cfg *vlan, *tmp;
10149 struct hclge_dev *hdev = vport->back;
10151 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10152 if (vlan->hd_tbl_status)
10153 hclge_set_vlan_filter_hw(hdev,
10154 htons(ETH_P_8021Q),
10159 vlan->hd_tbl_status = false;
10161 list_del(&vlan->node);
10165 clear_bit(vport->vport_id, hdev->vf_vlan_full);
10168 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
10170 struct hclge_vport_vlan_cfg *vlan, *tmp;
10171 struct hclge_vport *vport;
10174 for (i = 0; i < hdev->num_alloc_vport; i++) {
10175 vport = &hdev->vport[i];
10176 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10177 list_del(&vlan->node);
10183 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
10185 struct hclge_vport_vlan_cfg *vlan, *tmp;
10186 struct hclge_dev *hdev = vport->back;
10192 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
10193 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
10194 state = vport->port_base_vlan_cfg.state;
10196 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
10197 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
10198 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
10199 vport->vport_id, vlan_id,
10204 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10205 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10207 vlan->vlan_id, false);
10210 vlan->hd_tbl_status = true;
10214 /* For global reset and imp reset, hardware will clear the mac table,
10215 * so we change the mac address state from ACTIVE to TO_ADD, then they
10216 * can be restored in the service task after reset complete. Furtherly,
10217 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
10218 * be restored after reset, so just remove these mac nodes from mac_list.
10220 static void hclge_mac_node_convert_for_reset(struct list_head *list)
10222 struct hclge_mac_node *mac_node, *tmp;
10224 list_for_each_entry_safe(mac_node, tmp, list, node) {
10225 if (mac_node->state == HCLGE_MAC_ACTIVE) {
10226 mac_node->state = HCLGE_MAC_TO_ADD;
10227 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
10228 list_del(&mac_node->node);
10234 void hclge_restore_mac_table_common(struct hclge_vport *vport)
10236 spin_lock_bh(&vport->mac_list_lock);
10238 hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
10239 hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
10240 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
10242 spin_unlock_bh(&vport->mac_list_lock);
10245 static void hclge_restore_hw_table(struct hclge_dev *hdev)
10247 struct hclge_vport *vport = &hdev->vport[0];
10248 struct hnae3_handle *handle = &vport->nic;
10250 hclge_restore_mac_table_common(vport);
10251 hclge_restore_vport_vlan_table(vport);
10252 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10253 hclge_restore_fd_entries(handle);
10256 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10258 struct hclge_vport *vport = hclge_get_vport(handle);
10260 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10261 vport->rxvlan_cfg.strip_tag1_en = false;
10262 vport->rxvlan_cfg.strip_tag2_en = enable;
10263 vport->rxvlan_cfg.strip_tag2_discard_en = false;
10265 vport->rxvlan_cfg.strip_tag1_en = enable;
10266 vport->rxvlan_cfg.strip_tag2_en = true;
10267 vport->rxvlan_cfg.strip_tag2_discard_en = true;
10270 vport->rxvlan_cfg.strip_tag1_discard_en = false;
10271 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10272 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10273 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10275 return hclge_set_vlan_rx_offload_cfg(vport);
10278 static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport)
10280 struct hclge_dev *hdev = vport->back;
10282 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
10283 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state);
10286 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10287 u16 port_base_vlan_state,
10288 struct hclge_vlan_info *new_info,
10289 struct hclge_vlan_info *old_info)
10291 struct hclge_dev *hdev = vport->back;
10294 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10295 hclge_rm_vport_all_vlan_table(vport, false);
10296 /* force clear VLAN 0 */
10297 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0);
10300 return hclge_set_vlan_filter_hw(hdev,
10301 htons(new_info->vlan_proto),
10303 new_info->vlan_tag,
10307 /* force add VLAN 0 */
10308 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
10312 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10313 vport->vport_id, old_info->vlan_tag,
10318 return hclge_add_vport_all_vlan_table(vport);
10321 static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
10322 const struct hclge_vlan_info *old_cfg)
10324 if (new_cfg->vlan_tag != old_cfg->vlan_tag)
10327 if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0))
10333 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10334 struct hclge_vlan_info *vlan_info)
10336 struct hnae3_handle *nic = &vport->nic;
10337 struct hclge_vlan_info *old_vlan_info;
10338 struct hclge_dev *hdev = vport->back;
10341 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10343 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
10348 if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
10351 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
10352 /* add new VLAN tag */
10353 ret = hclge_set_vlan_filter_hw(hdev,
10354 htons(vlan_info->vlan_proto),
10356 vlan_info->vlan_tag,
10361 /* remove old VLAN tag */
10362 if (old_vlan_info->vlan_tag == 0)
10363 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
10366 ret = hclge_set_vlan_filter_hw(hdev,
10367 htons(ETH_P_8021Q),
10369 old_vlan_info->vlan_tag,
10372 dev_err(&hdev->pdev->dev,
10373 "failed to clear vport%u port base vlan %u, ret = %d.\n",
10374 vport->vport_id, old_vlan_info->vlan_tag, ret);
10381 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10387 vport->port_base_vlan_cfg.state = state;
10388 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10389 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10391 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10393 vport->port_base_vlan_cfg.vlan_info = *vlan_info;
10394 hclge_set_vport_vlan_fltr_change(vport);
10399 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10400 enum hnae3_port_base_vlan_state state,
10403 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10405 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10407 return HNAE3_PORT_BASE_VLAN_ENABLE;
10411 return HNAE3_PORT_BASE_VLAN_DISABLE;
10413 if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan &&
10414 vport->port_base_vlan_cfg.vlan_info.qos == qos)
10415 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10417 return HNAE3_PORT_BASE_VLAN_MODIFY;
10420 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10421 u16 vlan, u8 qos, __be16 proto)
10423 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10424 struct hclge_vport *vport = hclge_get_vport(handle);
10425 struct hclge_dev *hdev = vport->back;
10426 struct hclge_vlan_info vlan_info;
10430 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10431 return -EOPNOTSUPP;
10433 vport = hclge_get_vf_vport(hdev, vfid);
10437 /* qos is a 3 bits value, so can not be bigger than 7 */
10438 if (vlan > VLAN_N_VID - 1 || qos > 7)
10440 if (proto != htons(ETH_P_8021Q))
10441 return -EPROTONOSUPPORT;
10443 state = hclge_get_port_base_vlan_state(vport,
10444 vport->port_base_vlan_cfg.state,
10446 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10449 vlan_info.vlan_tag = vlan;
10450 vlan_info.qos = qos;
10451 vlan_info.vlan_proto = ntohs(proto);
10453 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10455 dev_err(&hdev->pdev->dev,
10456 "failed to update port base vlan for vf %d, ret = %d\n",
10461 /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10464 if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
10465 test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10466 hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10467 vport->vport_id, state,
10473 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10475 struct hclge_vlan_info *vlan_info;
10476 struct hclge_vport *vport;
10480 /* clear port base vlan for all vf */
10481 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10482 vport = &hdev->vport[vf];
10483 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10485 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10487 vlan_info->vlan_tag, true);
10489 dev_err(&hdev->pdev->dev,
10490 "failed to clear vf vlan for vf%d, ret = %d\n",
10491 vf - HCLGE_VF_VPORT_START_NUM, ret);
10495 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10496 u16 vlan_id, bool is_kill)
10498 struct hclge_vport *vport = hclge_get_vport(handle);
10499 struct hclge_dev *hdev = vport->back;
10500 bool writen_to_tbl = false;
10503 /* When device is resetting or reset failed, firmware is unable to
10504 * handle mailbox. Just record the vlan id, and remove it after
10507 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10508 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10509 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10513 /* when port base vlan enabled, we use port base vlan as the vlan
10514 * filter entry. In this case, we don't update vlan filter table
10515 * when user add new vlan or remove exist vlan, just update the vport
10516 * vlan list. The vlan id in vlan list will be writen in vlan filter
10517 * table until port base vlan disabled
10519 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10520 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10522 writen_to_tbl = true;
10527 hclge_rm_vport_vlan_table(vport, vlan_id, false);
10529 hclge_add_vport_vlan_table(vport, vlan_id,
10531 } else if (is_kill) {
10532 /* when remove hw vlan filter failed, record the vlan id,
10533 * and try to remove it from hw later, to be consistence
10536 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10539 hclge_set_vport_vlan_fltr_change(vport);
10544 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
10546 struct hclge_vport *vport;
10550 for (i = 0; i < hdev->num_alloc_vport; i++) {
10551 vport = &hdev->vport[i];
10552 if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10556 ret = hclge_enable_vport_vlan_filter(vport,
10557 vport->req_vlan_fltr_en);
10559 dev_err(&hdev->pdev->dev,
10560 "failed to sync vlan filter state for vport%u, ret = %d\n",
10561 vport->vport_id, ret);
10562 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10569 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10571 #define HCLGE_MAX_SYNC_COUNT 60
10573 int i, ret, sync_cnt = 0;
10576 /* start from vport 1 for PF is always alive */
10577 for (i = 0; i < hdev->num_alloc_vport; i++) {
10578 struct hclge_vport *vport = &hdev->vport[i];
10580 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10582 while (vlan_id != VLAN_N_VID) {
10583 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10584 vport->vport_id, vlan_id,
10586 if (ret && ret != -EINVAL)
10589 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10590 hclge_rm_vport_vlan_table(vport, vlan_id, false);
10591 hclge_set_vport_vlan_fltr_change(vport);
10594 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
10597 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10602 hclge_sync_vlan_fltr_state(hdev);
10605 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10607 struct hclge_config_max_frm_size_cmd *req;
10608 struct hclge_desc desc;
10610 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10612 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10613 req->max_frm_size = cpu_to_le16(new_mps);
10614 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10616 return hclge_cmd_send(&hdev->hw, &desc, 1);
10619 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10621 struct hclge_vport *vport = hclge_get_vport(handle);
10623 return hclge_set_vport_mtu(vport, new_mtu);
10626 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10628 struct hclge_dev *hdev = vport->back;
10629 int i, max_frm_size, ret;
10631 /* HW supprt 2 layer vlan */
10632 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10633 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10634 max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10637 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10638 mutex_lock(&hdev->vport_lock);
10639 /* VF's mps must fit within hdev->mps */
10640 if (vport->vport_id && max_frm_size > hdev->mps) {
10641 mutex_unlock(&hdev->vport_lock);
10643 } else if (vport->vport_id) {
10644 vport->mps = max_frm_size;
10645 mutex_unlock(&hdev->vport_lock);
10649 /* PF's mps must be greater then VF's mps */
10650 for (i = 1; i < hdev->num_alloc_vport; i++)
10651 if (max_frm_size < hdev->vport[i].mps) {
10652 mutex_unlock(&hdev->vport_lock);
10656 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10658 ret = hclge_set_mac_mtu(hdev, max_frm_size);
10660 dev_err(&hdev->pdev->dev,
10661 "Change mtu fail, ret =%d\n", ret);
10665 hdev->mps = max_frm_size;
10666 vport->mps = max_frm_size;
10668 ret = hclge_buffer_alloc(hdev);
10670 dev_err(&hdev->pdev->dev,
10671 "Allocate buffer fail, ret =%d\n", ret);
10674 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10675 mutex_unlock(&hdev->vport_lock);
10679 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10682 struct hclge_reset_tqp_queue_cmd *req;
10683 struct hclge_desc desc;
10686 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10688 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10689 req->tqp_id = cpu_to_le16(queue_id);
10691 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10693 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10695 dev_err(&hdev->pdev->dev,
10696 "Send tqp reset cmd error, status =%d\n", ret);
10703 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
10705 struct hclge_reset_tqp_queue_cmd *req;
10706 struct hclge_desc desc;
10709 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10711 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10712 req->tqp_id = cpu_to_le16(queue_id);
10714 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10716 dev_err(&hdev->pdev->dev,
10717 "Get reset status error, status =%d\n", ret);
10721 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10724 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10726 struct hnae3_queue *queue;
10727 struct hclge_tqp *tqp;
10729 queue = handle->kinfo.tqp[queue_id];
10730 tqp = container_of(queue, struct hclge_tqp, q);
10735 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
10737 struct hclge_vport *vport = hclge_get_vport(handle);
10738 struct hclge_dev *hdev = vport->back;
10739 u16 reset_try_times = 0;
10745 for (i = 0; i < handle->kinfo.num_tqps; i++) {
10746 queue_gid = hclge_covert_handle_qid_global(handle, i);
10747 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10749 dev_err(&hdev->pdev->dev,
10750 "failed to send reset tqp cmd, ret = %d\n",
10755 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10756 reset_status = hclge_get_reset_status(hdev, queue_gid);
10760 /* Wait for tqp hw reset */
10761 usleep_range(1000, 1200);
10764 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10765 dev_err(&hdev->pdev->dev,
10766 "wait for tqp hw reset timeout\n");
10770 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10772 dev_err(&hdev->pdev->dev,
10773 "failed to deassert soft reset, ret = %d\n",
10777 reset_try_times = 0;
10782 static int hclge_reset_rcb(struct hnae3_handle *handle)
10784 #define HCLGE_RESET_RCB_NOT_SUPPORT 0U
10785 #define HCLGE_RESET_RCB_SUCCESS 1U
10787 struct hclge_vport *vport = hclge_get_vport(handle);
10788 struct hclge_dev *hdev = vport->back;
10789 struct hclge_reset_cmd *req;
10790 struct hclge_desc desc;
10795 queue_gid = hclge_covert_handle_qid_global(handle, 0);
10797 req = (struct hclge_reset_cmd *)desc.data;
10798 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
10799 hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
10800 req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
10801 req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
10803 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10805 dev_err(&hdev->pdev->dev,
10806 "failed to send rcb reset cmd, ret = %d\n", ret);
10810 return_status = req->fun_reset_rcb_return_status;
10811 if (return_status == HCLGE_RESET_RCB_SUCCESS)
10814 if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
10815 dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
10820 /* if reset rcb cmd is unsupported, we need to send reset tqp cmd
10821 * again to reset all tqps
10823 return hclge_reset_tqp_cmd(handle);
10826 int hclge_reset_tqp(struct hnae3_handle *handle)
10828 struct hclge_vport *vport = hclge_get_vport(handle);
10829 struct hclge_dev *hdev = vport->back;
10832 /* only need to disable PF's tqp */
10833 if (!vport->vport_id) {
10834 ret = hclge_tqp_enable(handle, false);
10836 dev_err(&hdev->pdev->dev,
10837 "failed to disable tqp, ret = %d\n", ret);
10842 return hclge_reset_rcb(handle);
10845 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10847 struct hclge_vport *vport = hclge_get_vport(handle);
10848 struct hclge_dev *hdev = vport->back;
10850 return hdev->fw_version;
10853 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10855 struct phy_device *phydev = hdev->hw.mac.phydev;
10860 phy_set_asym_pause(phydev, rx_en, tx_en);
10863 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10867 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10870 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10872 dev_err(&hdev->pdev->dev,
10873 "configure pauseparam error, ret = %d.\n", ret);
10878 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10880 struct phy_device *phydev = hdev->hw.mac.phydev;
10881 u16 remote_advertising = 0;
10882 u16 local_advertising;
10883 u32 rx_pause, tx_pause;
10886 if (!phydev->link || !phydev->autoneg)
10889 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10892 remote_advertising = LPA_PAUSE_CAP;
10894 if (phydev->asym_pause)
10895 remote_advertising |= LPA_PAUSE_ASYM;
10897 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10898 remote_advertising);
10899 tx_pause = flowctl & FLOW_CTRL_TX;
10900 rx_pause = flowctl & FLOW_CTRL_RX;
10902 if (phydev->duplex == HCLGE_MAC_HALF) {
10907 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10910 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10911 u32 *rx_en, u32 *tx_en)
10913 struct hclge_vport *vport = hclge_get_vport(handle);
10914 struct hclge_dev *hdev = vport->back;
10915 u8 media_type = hdev->hw.mac.media_type;
10917 *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
10918 hclge_get_autoneg(handle) : 0;
10920 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10926 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10929 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10932 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10941 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10942 u32 rx_en, u32 tx_en)
10944 if (rx_en && tx_en)
10945 hdev->fc_mode_last_time = HCLGE_FC_FULL;
10946 else if (rx_en && !tx_en)
10947 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10948 else if (!rx_en && tx_en)
10949 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10951 hdev->fc_mode_last_time = HCLGE_FC_NONE;
10953 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10956 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10957 u32 rx_en, u32 tx_en)
10959 struct hclge_vport *vport = hclge_get_vport(handle);
10960 struct hclge_dev *hdev = vport->back;
10961 struct phy_device *phydev = hdev->hw.mac.phydev;
10964 if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
10965 fc_autoneg = hclge_get_autoneg(handle);
10966 if (auto_neg != fc_autoneg) {
10967 dev_info(&hdev->pdev->dev,
10968 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10969 return -EOPNOTSUPP;
10973 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10974 dev_info(&hdev->pdev->dev,
10975 "Priority flow control enabled. Cannot set link flow control.\n");
10976 return -EOPNOTSUPP;
10979 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10981 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10983 if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
10984 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10987 return phy_start_aneg(phydev);
10989 return -EOPNOTSUPP;
10992 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10993 u8 *auto_neg, u32 *speed, u8 *duplex)
10995 struct hclge_vport *vport = hclge_get_vport(handle);
10996 struct hclge_dev *hdev = vport->back;
10999 *speed = hdev->hw.mac.speed;
11001 *duplex = hdev->hw.mac.duplex;
11003 *auto_neg = hdev->hw.mac.autoneg;
11006 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
11009 struct hclge_vport *vport = hclge_get_vport(handle);
11010 struct hclge_dev *hdev = vport->back;
11012 /* When nic is down, the service task is not running, doesn't update
11013 * the port information per second. Query the port information before
11014 * return the media type, ensure getting the correct media information.
11016 hclge_update_port_info(hdev);
11019 *media_type = hdev->hw.mac.media_type;
11022 *module_type = hdev->hw.mac.module_type;
11025 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
11026 u8 *tp_mdix_ctrl, u8 *tp_mdix)
11028 struct hclge_vport *vport = hclge_get_vport(handle);
11029 struct hclge_dev *hdev = vport->back;
11030 struct phy_device *phydev = hdev->hw.mac.phydev;
11031 int mdix_ctrl, mdix, is_resolved;
11032 unsigned int retval;
11035 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11036 *tp_mdix = ETH_TP_MDI_INVALID;
11040 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
11042 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
11043 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
11044 HCLGE_PHY_MDIX_CTRL_S);
11046 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
11047 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
11048 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
11050 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
11052 switch (mdix_ctrl) {
11054 *tp_mdix_ctrl = ETH_TP_MDI;
11057 *tp_mdix_ctrl = ETH_TP_MDI_X;
11060 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
11063 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11068 *tp_mdix = ETH_TP_MDI_INVALID;
11070 *tp_mdix = ETH_TP_MDI_X;
11072 *tp_mdix = ETH_TP_MDI;
11075 static void hclge_info_show(struct hclge_dev *hdev)
11077 struct device *dev = &hdev->pdev->dev;
11079 dev_info(dev, "PF info begin:\n");
11081 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
11082 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
11083 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
11084 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
11085 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
11086 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
11087 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
11088 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
11089 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
11090 dev_info(dev, "This is %s PF\n",
11091 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
11092 dev_info(dev, "DCB %s\n",
11093 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
11094 dev_info(dev, "MQPRIO %s\n",
11095 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
11096 dev_info(dev, "Default tx spare buffer size: %u\n",
11097 hdev->tx_spare_buf_size);
11099 dev_info(dev, "PF info end.\n");
11102 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
11103 struct hclge_vport *vport)
11105 struct hnae3_client *client = vport->nic.client;
11106 struct hclge_dev *hdev = ae_dev->priv;
11107 int rst_cnt = hdev->rst_stats.reset_cnt;
11110 ret = client->ops->init_instance(&vport->nic);
11114 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11115 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11116 rst_cnt != hdev->rst_stats.reset_cnt) {
11121 /* Enable nic hw error interrupts */
11122 ret = hclge_config_nic_hw_error(hdev, true);
11124 dev_err(&ae_dev->pdev->dev,
11125 "fail(%d) to enable hw error interrupts\n", ret);
11129 hnae3_set_client_init_flag(client, ae_dev, 1);
11131 if (netif_msg_drv(&hdev->vport->nic))
11132 hclge_info_show(hdev);
11137 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11138 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11139 msleep(HCLGE_WAIT_RESET_DONE);
11141 client->ops->uninit_instance(&vport->nic, 0);
11146 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
11147 struct hclge_vport *vport)
11149 struct hclge_dev *hdev = ae_dev->priv;
11150 struct hnae3_client *client;
11154 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
11158 client = hdev->roce_client;
11159 ret = hclge_init_roce_base_info(vport);
11163 rst_cnt = hdev->rst_stats.reset_cnt;
11164 ret = client->ops->init_instance(&vport->roce);
11168 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11169 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11170 rst_cnt != hdev->rst_stats.reset_cnt) {
11172 goto init_roce_err;
11175 /* Enable roce ras interrupts */
11176 ret = hclge_config_rocee_ras_interrupt(hdev, true);
11178 dev_err(&ae_dev->pdev->dev,
11179 "fail(%d) to enable roce ras interrupts\n", ret);
11180 goto init_roce_err;
11183 hnae3_set_client_init_flag(client, ae_dev, 1);
11188 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11189 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11190 msleep(HCLGE_WAIT_RESET_DONE);
11192 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11197 static int hclge_init_client_instance(struct hnae3_client *client,
11198 struct hnae3_ae_dev *ae_dev)
11200 struct hclge_dev *hdev = ae_dev->priv;
11201 struct hclge_vport *vport = &hdev->vport[0];
11204 switch (client->type) {
11205 case HNAE3_CLIENT_KNIC:
11206 hdev->nic_client = client;
11207 vport->nic.client = client;
11208 ret = hclge_init_nic_client_instance(ae_dev, vport);
11212 ret = hclge_init_roce_client_instance(ae_dev, vport);
11217 case HNAE3_CLIENT_ROCE:
11218 if (hnae3_dev_roce_supported(hdev)) {
11219 hdev->roce_client = client;
11220 vport->roce.client = client;
11223 ret = hclge_init_roce_client_instance(ae_dev, vport);
11235 hdev->nic_client = NULL;
11236 vport->nic.client = NULL;
11239 hdev->roce_client = NULL;
11240 vport->roce.client = NULL;
11244 static void hclge_uninit_client_instance(struct hnae3_client *client,
11245 struct hnae3_ae_dev *ae_dev)
11247 struct hclge_dev *hdev = ae_dev->priv;
11248 struct hclge_vport *vport = &hdev->vport[0];
11250 if (hdev->roce_client) {
11251 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11252 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11253 msleep(HCLGE_WAIT_RESET_DONE);
11255 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11256 hdev->roce_client = NULL;
11257 vport->roce.client = NULL;
11259 if (client->type == HNAE3_CLIENT_ROCE)
11261 if (hdev->nic_client && client->ops->uninit_instance) {
11262 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11263 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11264 msleep(HCLGE_WAIT_RESET_DONE);
11266 client->ops->uninit_instance(&vport->nic, 0);
11267 hdev->nic_client = NULL;
11268 vport->nic.client = NULL;
11272 static int hclge_dev_mem_map(struct hclge_dev *hdev)
11274 #define HCLGE_MEM_BAR 4
11276 struct pci_dev *pdev = hdev->pdev;
11277 struct hclge_hw *hw = &hdev->hw;
11279 /* for device does not have device memory, return directly */
11280 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
11283 hw->mem_base = devm_ioremap_wc(&pdev->dev,
11284 pci_resource_start(pdev, HCLGE_MEM_BAR),
11285 pci_resource_len(pdev, HCLGE_MEM_BAR));
11286 if (!hw->mem_base) {
11287 dev_err(&pdev->dev, "failed to map device memory\n");
11294 static int hclge_pci_init(struct hclge_dev *hdev)
11296 struct pci_dev *pdev = hdev->pdev;
11297 struct hclge_hw *hw;
11300 ret = pci_enable_device(pdev);
11302 dev_err(&pdev->dev, "failed to enable PCI device\n");
11306 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11308 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11310 dev_err(&pdev->dev,
11311 "can't set consistent PCI DMA");
11312 goto err_disable_device;
11314 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
11317 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
11319 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
11320 goto err_disable_device;
11323 pci_set_master(pdev);
11325 hw->io_base = pcim_iomap(pdev, 2, 0);
11326 if (!hw->io_base) {
11327 dev_err(&pdev->dev, "Can't map configuration register space\n");
11329 goto err_clr_master;
11332 ret = hclge_dev_mem_map(hdev);
11334 goto err_unmap_io_base;
11336 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11341 pcim_iounmap(pdev, hdev->hw.io_base);
11343 pci_clear_master(pdev);
11344 pci_release_regions(pdev);
11345 err_disable_device:
11346 pci_disable_device(pdev);
11351 static void hclge_pci_uninit(struct hclge_dev *hdev)
11353 struct pci_dev *pdev = hdev->pdev;
11355 if (hdev->hw.mem_base)
11356 devm_iounmap(&pdev->dev, hdev->hw.mem_base);
11358 pcim_iounmap(pdev, hdev->hw.io_base);
11359 pci_free_irq_vectors(pdev);
11360 pci_clear_master(pdev);
11361 pci_release_mem_regions(pdev);
11362 pci_disable_device(pdev);
11365 static void hclge_state_init(struct hclge_dev *hdev)
11367 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11368 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11369 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11370 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11371 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11372 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11373 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11376 static void hclge_state_uninit(struct hclge_dev *hdev)
11378 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11379 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11381 if (hdev->reset_timer.function)
11382 del_timer_sync(&hdev->reset_timer);
11383 if (hdev->service_task.work.func)
11384 cancel_delayed_work_sync(&hdev->service_task);
11387 static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
11388 enum hnae3_reset_type rst_type)
11390 #define HCLGE_RESET_RETRY_WAIT_MS 500
11391 #define HCLGE_RESET_RETRY_CNT 5
11393 struct hclge_dev *hdev = ae_dev->priv;
11398 down(&hdev->reset_sem);
11399 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11400 hdev->reset_type = rst_type;
11401 ret = hclge_reset_prepare(hdev);
11402 if (ret || hdev->reset_pending) {
11403 dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
11405 if (hdev->reset_pending ||
11406 retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
11407 dev_err(&hdev->pdev->dev,
11408 "reset_pending:0x%lx, retry_cnt:%d\n",
11409 hdev->reset_pending, retry_cnt);
11410 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11411 up(&hdev->reset_sem);
11412 msleep(HCLGE_RESET_RETRY_WAIT_MS);
11417 /* disable misc vector before reset done */
11418 hclge_enable_vector(&hdev->misc_vector, false);
11419 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
11421 if (hdev->reset_type == HNAE3_FLR_RESET)
11422 hdev->rst_stats.flr_rst_cnt++;
11425 static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
11427 struct hclge_dev *hdev = ae_dev->priv;
11430 hclge_enable_vector(&hdev->misc_vector, true);
11432 ret = hclge_reset_rebuild(hdev);
11434 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11436 hdev->reset_type = HNAE3_NONE_RESET;
11437 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11438 up(&hdev->reset_sem);
11441 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11445 for (i = 0; i < hdev->num_alloc_vport; i++) {
11446 struct hclge_vport *vport = &hdev->vport[i];
11449 /* Send cmd to clear VF's FUNC_RST_ING */
11450 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11452 dev_warn(&hdev->pdev->dev,
11453 "clear vf(%u) rst failed %d!\n",
11454 vport->vport_id, ret);
11458 static int hclge_clear_hw_resource(struct hclge_dev *hdev)
11460 struct hclge_desc desc;
11463 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
11465 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11466 /* This new command is only supported by new firmware, it will
11467 * fail with older firmware. Error value -EOPNOSUPP can only be
11468 * returned by older firmware running this command, to keep code
11469 * backward compatible we will override this value and return
11472 if (ret && ret != -EOPNOTSUPP) {
11473 dev_err(&hdev->pdev->dev,
11474 "failed to clear hw resource, ret = %d\n", ret);
11480 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
11482 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11483 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
11486 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
11488 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11489 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
11492 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11494 struct pci_dev *pdev = ae_dev->pdev;
11495 struct hclge_dev *hdev;
11498 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11503 hdev->ae_dev = ae_dev;
11504 hdev->reset_type = HNAE3_NONE_RESET;
11505 hdev->reset_level = HNAE3_FUNC_RESET;
11506 ae_dev->priv = hdev;
11508 /* HW supprt 2 layer vlan */
11509 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11511 mutex_init(&hdev->vport_lock);
11512 spin_lock_init(&hdev->fd_rule_lock);
11513 sema_init(&hdev->reset_sem, 1);
11515 ret = hclge_pci_init(hdev);
11519 ret = hclge_devlink_init(hdev);
11521 goto err_pci_uninit;
11523 /* Firmware command queue initialize */
11524 ret = hclge_cmd_queue_init(hdev);
11526 goto err_devlink_uninit;
11528 /* Firmware command initialize */
11529 ret = hclge_cmd_init(hdev);
11531 goto err_cmd_uninit;
11533 ret = hclge_clear_hw_resource(hdev);
11535 goto err_cmd_uninit;
11537 ret = hclge_get_cap(hdev);
11539 goto err_cmd_uninit;
11541 ret = hclge_query_dev_specs(hdev);
11543 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11545 goto err_cmd_uninit;
11548 ret = hclge_configure(hdev);
11550 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11551 goto err_cmd_uninit;
11554 ret = hclge_init_msi(hdev);
11556 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11557 goto err_cmd_uninit;
11560 ret = hclge_misc_irq_init(hdev);
11562 goto err_msi_uninit;
11564 ret = hclge_alloc_tqps(hdev);
11566 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11567 goto err_msi_irq_uninit;
11570 ret = hclge_alloc_vport(hdev);
11572 goto err_msi_irq_uninit;
11574 ret = hclge_map_tqp(hdev);
11576 goto err_msi_irq_uninit;
11578 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
11579 !hnae3_dev_phy_imp_supported(hdev)) {
11580 ret = hclge_mac_mdio_config(hdev);
11582 goto err_msi_irq_uninit;
11585 ret = hclge_init_umv_space(hdev);
11587 goto err_mdiobus_unreg;
11589 ret = hclge_mac_init(hdev);
11591 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11592 goto err_mdiobus_unreg;
11595 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11597 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11598 goto err_mdiobus_unreg;
11601 ret = hclge_config_gro(hdev);
11603 goto err_mdiobus_unreg;
11605 ret = hclge_init_vlan_config(hdev);
11607 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11608 goto err_mdiobus_unreg;
11611 ret = hclge_tm_schd_init(hdev);
11613 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11614 goto err_mdiobus_unreg;
11617 ret = hclge_rss_init_cfg(hdev);
11619 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11620 goto err_mdiobus_unreg;
11623 ret = hclge_rss_init_hw(hdev);
11625 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11626 goto err_mdiobus_unreg;
11629 ret = init_mgr_tbl(hdev);
11631 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11632 goto err_mdiobus_unreg;
11635 ret = hclge_init_fd_config(hdev);
11637 dev_err(&pdev->dev,
11638 "fd table init fail, ret=%d\n", ret);
11639 goto err_mdiobus_unreg;
11642 ret = hclge_ptp_init(hdev);
11644 goto err_mdiobus_unreg;
11646 INIT_KFIFO(hdev->mac_tnl_log);
11648 hclge_dcb_ops_set(hdev);
11650 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11651 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11653 /* Setup affinity after service timer setup because add_timer_on
11654 * is called in affinity notify.
11656 hclge_misc_affinity_setup(hdev);
11658 hclge_clear_all_event_cause(hdev);
11659 hclge_clear_resetting_state(hdev);
11661 /* Log and clear the hw errors those already occurred */
11662 if (hnae3_dev_ras_imp_supported(hdev))
11663 hclge_handle_occurred_error(hdev);
11665 hclge_handle_all_hns_hw_errors(ae_dev);
11667 /* request delayed reset for the error recovery because an immediate
11668 * global reset on a PF affecting pending initialization of other PFs
11670 if (ae_dev->hw_err_reset_req) {
11671 enum hnae3_reset_type reset_level;
11673 reset_level = hclge_get_reset_level(ae_dev,
11674 &ae_dev->hw_err_reset_req);
11675 hclge_set_def_reset_request(ae_dev, reset_level);
11676 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11679 hclge_init_rxd_adv_layout(hdev);
11681 /* Enable MISC vector(vector0) */
11682 hclge_enable_vector(&hdev->misc_vector, true);
11684 hclge_state_init(hdev);
11685 hdev->last_reset_time = jiffies;
11687 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11688 HCLGE_DRIVER_NAME);
11690 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11695 if (hdev->hw.mac.phydev)
11696 mdiobus_unregister(hdev->hw.mac.mdio_bus);
11697 err_msi_irq_uninit:
11698 hclge_misc_irq_uninit(hdev);
11700 pci_free_irq_vectors(pdev);
11702 hclge_cmd_uninit(hdev);
11703 err_devlink_uninit:
11704 hclge_devlink_uninit(hdev);
11706 pcim_iounmap(pdev, hdev->hw.io_base);
11707 pci_clear_master(pdev);
11708 pci_release_regions(pdev);
11709 pci_disable_device(pdev);
11711 mutex_destroy(&hdev->vport_lock);
11715 static void hclge_stats_clear(struct hclge_dev *hdev)
11717 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11720 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11722 return hclge_config_switch_param(hdev, vf, enable,
11723 HCLGE_SWITCH_ANTI_SPOOF_MASK);
11726 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11728 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11729 HCLGE_FILTER_FE_NIC_INGRESS_B,
11733 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11737 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11739 dev_err(&hdev->pdev->dev,
11740 "Set vf %d mac spoof check %s failed, ret=%d\n",
11741 vf, enable ? "on" : "off", ret);
11745 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11747 dev_err(&hdev->pdev->dev,
11748 "Set vf %d vlan spoof check %s failed, ret=%d\n",
11749 vf, enable ? "on" : "off", ret);
11754 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11757 struct hclge_vport *vport = hclge_get_vport(handle);
11758 struct hclge_dev *hdev = vport->back;
11759 u32 new_spoofchk = enable ? 1 : 0;
11762 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11763 return -EOPNOTSUPP;
11765 vport = hclge_get_vf_vport(hdev, vf);
11769 if (vport->vf_info.spoofchk == new_spoofchk)
11772 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11773 dev_warn(&hdev->pdev->dev,
11774 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11776 else if (enable && hclge_is_umv_space_full(vport, true))
11777 dev_warn(&hdev->pdev->dev,
11778 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11781 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11785 vport->vf_info.spoofchk = new_spoofchk;
11789 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11791 struct hclge_vport *vport = hdev->vport;
11795 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11798 /* resume the vf spoof check state after reset */
11799 for (i = 0; i < hdev->num_alloc_vport; i++) {
11800 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11801 vport->vf_info.spoofchk);
11811 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11813 struct hclge_vport *vport = hclge_get_vport(handle);
11814 struct hclge_dev *hdev = vport->back;
11815 u32 new_trusted = enable ? 1 : 0;
11817 vport = hclge_get_vf_vport(hdev, vf);
11821 if (vport->vf_info.trusted == new_trusted)
11824 vport->vf_info.trusted = new_trusted;
11825 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
11826 hclge_task_schedule(hdev, 0);
11831 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11836 /* reset vf rate to default value */
11837 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11838 struct hclge_vport *vport = &hdev->vport[vf];
11840 vport->vf_info.max_tx_rate = 0;
11841 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11843 dev_err(&hdev->pdev->dev,
11844 "vf%d failed to reset to default, ret=%d\n",
11845 vf - HCLGE_VF_VPORT_START_NUM, ret);
11849 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11850 int min_tx_rate, int max_tx_rate)
11852 if (min_tx_rate != 0 ||
11853 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11854 dev_err(&hdev->pdev->dev,
11855 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11856 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11863 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
11864 int min_tx_rate, int max_tx_rate, bool force)
11866 struct hclge_vport *vport = hclge_get_vport(handle);
11867 struct hclge_dev *hdev = vport->back;
11870 ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
11874 vport = hclge_get_vf_vport(hdev, vf);
11878 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
11881 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
11885 vport->vf_info.max_tx_rate = max_tx_rate;
11890 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
11892 struct hnae3_handle *handle = &hdev->vport->nic;
11893 struct hclge_vport *vport;
11897 /* resume the vf max_tx_rate after reset */
11898 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
11899 vport = hclge_get_vf_vport(hdev, vf);
11903 /* zero means max rate, after reset, firmware already set it to
11904 * max rate, so just continue.
11906 if (!vport->vf_info.max_tx_rate)
11909 ret = hclge_set_vf_rate(handle, vf, 0,
11910 vport->vf_info.max_tx_rate, true);
11912 dev_err(&hdev->pdev->dev,
11913 "vf%d failed to resume tx_rate:%u, ret=%d\n",
11914 vf, vport->vf_info.max_tx_rate, ret);
11922 static void hclge_reset_vport_state(struct hclge_dev *hdev)
11924 struct hclge_vport *vport = hdev->vport;
11927 for (i = 0; i < hdev->num_alloc_vport; i++) {
11928 hclge_vport_stop(vport);
11933 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
11935 struct hclge_dev *hdev = ae_dev->priv;
11936 struct pci_dev *pdev = ae_dev->pdev;
11939 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11941 hclge_stats_clear(hdev);
11942 /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
11943 * so here should not clean table in memory.
11945 if (hdev->reset_type == HNAE3_IMP_RESET ||
11946 hdev->reset_type == HNAE3_GLOBAL_RESET) {
11947 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11948 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
11949 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11950 hclge_reset_umv_space(hdev);
11953 ret = hclge_cmd_init(hdev);
11955 dev_err(&pdev->dev, "Cmd queue init failed\n");
11959 ret = hclge_map_tqp(hdev);
11961 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11965 ret = hclge_mac_init(hdev);
11967 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11971 ret = hclge_tp_port_init(hdev);
11973 dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
11978 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11980 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11984 ret = hclge_config_gro(hdev);
11988 ret = hclge_init_vlan_config(hdev);
11990 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11994 ret = hclge_tm_init_hw(hdev, true);
11996 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
12000 ret = hclge_rss_init_hw(hdev);
12002 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
12006 ret = init_mgr_tbl(hdev);
12008 dev_err(&pdev->dev,
12009 "failed to reinit manager table, ret = %d\n", ret);
12013 ret = hclge_init_fd_config(hdev);
12015 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
12019 ret = hclge_ptp_init(hdev);
12023 /* Log and clear the hw errors those already occurred */
12024 if (hnae3_dev_ras_imp_supported(hdev))
12025 hclge_handle_occurred_error(hdev);
12027 hclge_handle_all_hns_hw_errors(ae_dev);
12029 /* Re-enable the hw error interrupts because
12030 * the interrupts get disabled on global reset.
12032 ret = hclge_config_nic_hw_error(hdev, true);
12034 dev_err(&pdev->dev,
12035 "fail(%d) to re-enable NIC hw error interrupts\n",
12040 if (hdev->roce_client) {
12041 ret = hclge_config_rocee_ras_interrupt(hdev, true);
12043 dev_err(&pdev->dev,
12044 "fail(%d) to re-enable roce ras interrupts\n",
12050 hclge_reset_vport_state(hdev);
12051 ret = hclge_reset_vport_spoofchk(hdev);
12055 ret = hclge_resume_vf_rate(hdev);
12059 hclge_init_rxd_adv_layout(hdev);
12061 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
12062 HCLGE_DRIVER_NAME);
12067 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
12069 struct hclge_dev *hdev = ae_dev->priv;
12070 struct hclge_mac *mac = &hdev->hw.mac;
12072 hclge_reset_vf_rate(hdev);
12073 hclge_clear_vf_vlan(hdev);
12074 hclge_misc_affinity_teardown(hdev);
12075 hclge_state_uninit(hdev);
12076 hclge_ptp_uninit(hdev);
12077 hclge_uninit_rxd_adv_layout(hdev);
12078 hclge_uninit_mac_table(hdev);
12079 hclge_del_all_fd_entries(hdev);
12082 mdiobus_unregister(mac->mdio_bus);
12084 /* Disable MISC vector(vector0) */
12085 hclge_enable_vector(&hdev->misc_vector, false);
12086 synchronize_irq(hdev->misc_vector.vector_irq);
12088 /* Disable all hw interrupts */
12089 hclge_config_mac_tnl_int(hdev, false);
12090 hclge_config_nic_hw_error(hdev, false);
12091 hclge_config_rocee_ras_interrupt(hdev, false);
12093 hclge_cmd_uninit(hdev);
12094 hclge_misc_irq_uninit(hdev);
12095 hclge_devlink_uninit(hdev);
12096 hclge_pci_uninit(hdev);
12097 mutex_destroy(&hdev->vport_lock);
12098 hclge_uninit_vport_vlan_table(hdev);
12099 ae_dev->priv = NULL;
12102 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
12104 struct hclge_vport *vport = hclge_get_vport(handle);
12105 struct hclge_dev *hdev = vport->back;
12107 return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
12110 static void hclge_get_channels(struct hnae3_handle *handle,
12111 struct ethtool_channels *ch)
12113 ch->max_combined = hclge_get_max_channels(handle);
12114 ch->other_count = 1;
12116 ch->combined_count = handle->kinfo.rss_size;
12119 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
12120 u16 *alloc_tqps, u16 *max_rss_size)
12122 struct hclge_vport *vport = hclge_get_vport(handle);
12123 struct hclge_dev *hdev = vport->back;
12125 *alloc_tqps = vport->alloc_tqps;
12126 *max_rss_size = hdev->pf_rss_size_max;
12129 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
12130 bool rxfh_configured)
12132 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
12133 struct hclge_vport *vport = hclge_get_vport(handle);
12134 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
12135 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
12136 struct hclge_dev *hdev = vport->back;
12137 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
12138 u16 cur_rss_size = kinfo->rss_size;
12139 u16 cur_tqps = kinfo->num_tqps;
12140 u16 tc_valid[HCLGE_MAX_TC_NUM];
12146 kinfo->req_rss_size = new_tqps_num;
12148 ret = hclge_tm_vport_map_update(hdev);
12150 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
12154 roundup_size = roundup_pow_of_two(kinfo->rss_size);
12155 roundup_size = ilog2(roundup_size);
12156 /* Set the RSS TC mode according to the new RSS size */
12157 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
12160 if (!(hdev->hw_tc_map & BIT(i)))
12164 tc_size[i] = roundup_size;
12165 tc_offset[i] = kinfo->rss_size * i;
12167 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
12171 /* RSS indirection table has been configured by user */
12172 if (rxfh_configured)
12175 /* Reinitializes the rss indirect table according to the new RSS size */
12176 rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
12181 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
12182 rss_indir[i] = i % kinfo->rss_size;
12184 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
12186 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
12193 dev_info(&hdev->pdev->dev,
12194 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
12195 cur_rss_size, kinfo->rss_size,
12196 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
12201 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
12202 u32 *regs_num_64_bit)
12204 struct hclge_desc desc;
12208 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
12209 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12211 dev_err(&hdev->pdev->dev,
12212 "Query register number cmd failed, ret = %d.\n", ret);
12216 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
12217 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
12219 total_num = *regs_num_32_bit + *regs_num_64_bit;
12226 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12229 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
12230 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
12232 struct hclge_desc *desc;
12233 u32 *reg_val = data;
12243 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
12244 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
12245 HCLGE_32_BIT_REG_RTN_DATANUM);
12246 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12250 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
12251 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12253 dev_err(&hdev->pdev->dev,
12254 "Query 32 bit register cmd failed, ret = %d.\n", ret);
12259 for (i = 0; i < cmd_num; i++) {
12261 desc_data = (__le32 *)(&desc[i].data[0]);
12262 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
12264 desc_data = (__le32 *)(&desc[i]);
12265 n = HCLGE_32_BIT_REG_RTN_DATANUM;
12267 for (k = 0; k < n; k++) {
12268 *reg_val++ = le32_to_cpu(*desc_data++);
12280 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12283 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
12284 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
12286 struct hclge_desc *desc;
12287 u64 *reg_val = data;
12297 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
12298 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
12299 HCLGE_64_BIT_REG_RTN_DATANUM);
12300 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12304 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
12305 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12307 dev_err(&hdev->pdev->dev,
12308 "Query 64 bit register cmd failed, ret = %d.\n", ret);
12313 for (i = 0; i < cmd_num; i++) {
12315 desc_data = (__le64 *)(&desc[i].data[0]);
12316 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
12318 desc_data = (__le64 *)(&desc[i]);
12319 n = HCLGE_64_BIT_REG_RTN_DATANUM;
12321 for (k = 0; k < n; k++) {
12322 *reg_val++ = le64_to_cpu(*desc_data++);
12334 #define MAX_SEPARATE_NUM 4
12335 #define SEPARATOR_VALUE 0xFDFCFBFA
12336 #define REG_NUM_PER_LINE 4
12337 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
12338 #define REG_SEPARATOR_LINE 1
12339 #define REG_NUM_REMAIN_MASK 3
12341 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
12345 /* initialize command BD except the last one */
12346 for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
12347 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
12349 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12352 /* initialize the last command BD */
12353 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
12355 return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
12358 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
12362 u32 entries_per_desc, desc_index, index, offset, i;
12363 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
12366 ret = hclge_query_bd_num_cmd_send(hdev, desc);
12368 dev_err(&hdev->pdev->dev,
12369 "Get dfx bd num fail, status is %d.\n", ret);
12373 entries_per_desc = ARRAY_SIZE(desc[0].data);
12374 for (i = 0; i < type_num; i++) {
12375 offset = hclge_dfx_bd_offset_list[i];
12376 index = offset % entries_per_desc;
12377 desc_index = offset / entries_per_desc;
12378 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
12384 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
12385 struct hclge_desc *desc_src, int bd_num,
12386 enum hclge_opcode_type cmd)
12388 struct hclge_desc *desc = desc_src;
12391 hclge_cmd_setup_basic_desc(desc, cmd, true);
12392 for (i = 0; i < bd_num - 1; i++) {
12393 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12395 hclge_cmd_setup_basic_desc(desc, cmd, true);
12399 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
12401 dev_err(&hdev->pdev->dev,
12402 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
12408 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
12411 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
12412 struct hclge_desc *desc = desc_src;
12415 entries_per_desc = ARRAY_SIZE(desc->data);
12416 reg_num = entries_per_desc * bd_num;
12417 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
12418 for (i = 0; i < reg_num; i++) {
12419 index = i % entries_per_desc;
12420 desc_index = i / entries_per_desc;
12421 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
12423 for (i = 0; i < separator_num; i++)
12424 *reg++ = SEPARATOR_VALUE;
12426 return reg_num + separator_num;
12429 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
12431 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12432 int data_len_per_desc, bd_num, i;
12437 bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12441 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12443 dev_err(&hdev->pdev->dev,
12444 "Get dfx reg bd num fail, status is %d.\n", ret);
12448 data_len_per_desc = sizeof_field(struct hclge_desc, data);
12450 for (i = 0; i < dfx_reg_type_num; i++) {
12451 bd_num = bd_num_list[i];
12452 data_len = data_len_per_desc * bd_num;
12453 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
12457 kfree(bd_num_list);
12461 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
12463 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12464 int bd_num, bd_num_max, buf_len, i;
12465 struct hclge_desc *desc_src;
12470 bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12474 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12476 dev_err(&hdev->pdev->dev,
12477 "Get dfx reg bd num fail, status is %d.\n", ret);
12481 bd_num_max = bd_num_list[0];
12482 for (i = 1; i < dfx_reg_type_num; i++)
12483 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
12485 buf_len = sizeof(*desc_src) * bd_num_max;
12486 desc_src = kzalloc(buf_len, GFP_KERNEL);
12492 for (i = 0; i < dfx_reg_type_num; i++) {
12493 bd_num = bd_num_list[i];
12494 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
12495 hclge_dfx_reg_opcode_list[i]);
12497 dev_err(&hdev->pdev->dev,
12498 "Get dfx reg fail, status is %d.\n", ret);
12502 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
12507 kfree(bd_num_list);
12511 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
12512 struct hnae3_knic_private_info *kinfo)
12514 #define HCLGE_RING_REG_OFFSET 0x200
12515 #define HCLGE_RING_INT_REG_OFFSET 0x4
12517 int i, j, reg_num, separator_num;
12521 /* fetching per-PF registers valus from PF PCIe register space */
12522 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
12523 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12524 for (i = 0; i < reg_num; i++)
12525 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
12526 for (i = 0; i < separator_num; i++)
12527 *reg++ = SEPARATOR_VALUE;
12528 data_num_sum = reg_num + separator_num;
12530 reg_num = ARRAY_SIZE(common_reg_addr_list);
12531 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12532 for (i = 0; i < reg_num; i++)
12533 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
12534 for (i = 0; i < separator_num; i++)
12535 *reg++ = SEPARATOR_VALUE;
12536 data_num_sum += reg_num + separator_num;
12538 reg_num = ARRAY_SIZE(ring_reg_addr_list);
12539 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12540 for (j = 0; j < kinfo->num_tqps; j++) {
12541 for (i = 0; i < reg_num; i++)
12542 *reg++ = hclge_read_dev(&hdev->hw,
12543 ring_reg_addr_list[i] +
12544 HCLGE_RING_REG_OFFSET * j);
12545 for (i = 0; i < separator_num; i++)
12546 *reg++ = SEPARATOR_VALUE;
12548 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
12550 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
12551 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12552 for (j = 0; j < hdev->num_msi_used - 1; j++) {
12553 for (i = 0; i < reg_num; i++)
12554 *reg++ = hclge_read_dev(&hdev->hw,
12555 tqp_intr_reg_addr_list[i] +
12556 HCLGE_RING_INT_REG_OFFSET * j);
12557 for (i = 0; i < separator_num; i++)
12558 *reg++ = SEPARATOR_VALUE;
12560 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
12562 return data_num_sum;
12565 static int hclge_get_regs_len(struct hnae3_handle *handle)
12567 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
12568 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12569 struct hclge_vport *vport = hclge_get_vport(handle);
12570 struct hclge_dev *hdev = vport->back;
12571 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
12572 int regs_lines_32_bit, regs_lines_64_bit;
12575 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
12577 dev_err(&hdev->pdev->dev,
12578 "Get register number failed, ret = %d.\n", ret);
12582 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
12584 dev_err(&hdev->pdev->dev,
12585 "Get dfx reg len failed, ret = %d.\n", ret);
12589 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
12590 REG_SEPARATOR_LINE;
12591 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
12592 REG_SEPARATOR_LINE;
12593 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
12594 REG_SEPARATOR_LINE;
12595 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
12596 REG_SEPARATOR_LINE;
12597 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
12598 REG_SEPARATOR_LINE;
12599 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
12600 REG_SEPARATOR_LINE;
12602 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
12603 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
12604 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
12607 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
12610 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12611 struct hclge_vport *vport = hclge_get_vport(handle);
12612 struct hclge_dev *hdev = vport->back;
12613 u32 regs_num_32_bit, regs_num_64_bit;
12614 int i, reg_num, separator_num, ret;
12617 *version = hdev->fw_version;
12619 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
12621 dev_err(&hdev->pdev->dev,
12622 "Get register number failed, ret = %d.\n", ret);
12626 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
12628 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
12630 dev_err(&hdev->pdev->dev,
12631 "Get 32 bit register failed, ret = %d.\n", ret);
12634 reg_num = regs_num_32_bit;
12636 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12637 for (i = 0; i < separator_num; i++)
12638 *reg++ = SEPARATOR_VALUE;
12640 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
12642 dev_err(&hdev->pdev->dev,
12643 "Get 64 bit register failed, ret = %d.\n", ret);
12646 reg_num = regs_num_64_bit * 2;
12648 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12649 for (i = 0; i < separator_num; i++)
12650 *reg++ = SEPARATOR_VALUE;
12652 ret = hclge_get_dfx_reg(hdev, reg);
12654 dev_err(&hdev->pdev->dev,
12655 "Get dfx register failed, ret = %d.\n", ret);
12658 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12660 struct hclge_set_led_state_cmd *req;
12661 struct hclge_desc desc;
12664 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12666 req = (struct hclge_set_led_state_cmd *)desc.data;
12667 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12668 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12670 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12672 dev_err(&hdev->pdev->dev,
12673 "Send set led state cmd error, ret =%d\n", ret);
12678 enum hclge_led_status {
12681 HCLGE_LED_NO_CHANGE = 0xFF,
12684 static int hclge_set_led_id(struct hnae3_handle *handle,
12685 enum ethtool_phys_id_state status)
12687 struct hclge_vport *vport = hclge_get_vport(handle);
12688 struct hclge_dev *hdev = vport->back;
12691 case ETHTOOL_ID_ACTIVE:
12692 return hclge_set_led_status(hdev, HCLGE_LED_ON);
12693 case ETHTOOL_ID_INACTIVE:
12694 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12700 static void hclge_get_link_mode(struct hnae3_handle *handle,
12701 unsigned long *supported,
12702 unsigned long *advertising)
12704 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12705 struct hclge_vport *vport = hclge_get_vport(handle);
12706 struct hclge_dev *hdev = vport->back;
12707 unsigned int idx = 0;
12709 for (; idx < size; idx++) {
12710 supported[idx] = hdev->hw.mac.supported[idx];
12711 advertising[idx] = hdev->hw.mac.advertising[idx];
12715 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12717 struct hclge_vport *vport = hclge_get_vport(handle);
12718 struct hclge_dev *hdev = vport->back;
12719 bool gro_en_old = hdev->gro_en;
12722 hdev->gro_en = enable;
12723 ret = hclge_config_gro(hdev);
12725 hdev->gro_en = gro_en_old;
12730 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12732 struct hclge_vport *vport = &hdev->vport[0];
12733 struct hnae3_handle *handle = &vport->nic;
12738 if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12739 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12740 vport->last_promisc_flags = vport->overflow_promisc_flags;
12743 if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) {
12744 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12745 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12746 tmp_flags & HNAE3_MPE);
12748 clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12750 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
12755 for (i = 1; i < hdev->num_alloc_vport; i++) {
12756 bool uc_en = false;
12757 bool mc_en = false;
12760 vport = &hdev->vport[i];
12762 if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12766 if (vport->vf_info.trusted) {
12767 uc_en = vport->vf_info.request_uc_en > 0;
12768 mc_en = vport->vf_info.request_mc_en > 0;
12770 bc_en = vport->vf_info.request_bc_en > 0;
12772 ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
12775 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12779 hclge_set_vport_vlan_fltr_change(vport);
12783 static bool hclge_module_existed(struct hclge_dev *hdev)
12785 struct hclge_desc desc;
12789 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12790 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12792 dev_err(&hdev->pdev->dev,
12793 "failed to get SFP exist state, ret = %d\n", ret);
12797 existed = le32_to_cpu(desc.data[0]);
12799 return existed != 0;
12802 /* need 6 bds(total 140 bytes) in one reading
12803 * return the number of bytes actually read, 0 means read failed.
12805 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12808 struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12809 struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12815 /* setup all 6 bds to read module eeprom info. */
12816 for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12817 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12820 /* bd0~bd4 need next flag */
12821 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12822 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
12825 /* setup bd0, this bd contains offset and read length. */
12826 sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12827 sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12828 read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12829 sfp_info_bd0->read_len = cpu_to_le16(read_len);
12831 ret = hclge_cmd_send(&hdev->hw, desc, i);
12833 dev_err(&hdev->pdev->dev,
12834 "failed to get SFP eeprom info, ret = %d\n", ret);
12838 /* copy sfp info from bd0 to out buffer. */
12839 copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12840 memcpy(data, sfp_info_bd0->data, copy_len);
12841 read_len = copy_len;
12843 /* copy sfp info from bd1~bd5 to out buffer if needed. */
12844 for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12845 if (read_len >= len)
12848 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12849 memcpy(data + read_len, desc[i].data, copy_len);
12850 read_len += copy_len;
12856 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12859 struct hclge_vport *vport = hclge_get_vport(handle);
12860 struct hclge_dev *hdev = vport->back;
12864 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12865 return -EOPNOTSUPP;
12867 if (!hclge_module_existed(hdev))
12870 while (read_len < len) {
12871 data_len = hclge_get_sfp_eeprom_info(hdev,
12878 read_len += data_len;
12884 static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle,
12887 struct hclge_vport *vport = hclge_get_vport(handle);
12888 struct hclge_dev *hdev = vport->back;
12889 struct hclge_desc desc;
12892 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2)
12893 return -EOPNOTSUPP;
12895 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true);
12896 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12898 dev_err(&hdev->pdev->dev,
12899 "failed to query link diagnosis info, ret = %d\n", ret);
12903 *status_code = le32_to_cpu(desc.data[0]);
12907 static const struct hnae3_ae_ops hclge_ops = {
12908 .init_ae_dev = hclge_init_ae_dev,
12909 .uninit_ae_dev = hclge_uninit_ae_dev,
12910 .reset_prepare = hclge_reset_prepare_general,
12911 .reset_done = hclge_reset_done,
12912 .init_client_instance = hclge_init_client_instance,
12913 .uninit_client_instance = hclge_uninit_client_instance,
12914 .map_ring_to_vector = hclge_map_ring_to_vector,
12915 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12916 .get_vector = hclge_get_vector,
12917 .put_vector = hclge_put_vector,
12918 .set_promisc_mode = hclge_set_promisc_mode,
12919 .request_update_promisc_mode = hclge_request_update_promisc_mode,
12920 .set_loopback = hclge_set_loopback,
12921 .start = hclge_ae_start,
12922 .stop = hclge_ae_stop,
12923 .client_start = hclge_client_start,
12924 .client_stop = hclge_client_stop,
12925 .get_status = hclge_get_status,
12926 .get_ksettings_an_result = hclge_get_ksettings_an_result,
12927 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12928 .get_media_type = hclge_get_media_type,
12929 .check_port_speed = hclge_check_port_speed,
12930 .get_fec = hclge_get_fec,
12931 .set_fec = hclge_set_fec,
12932 .get_rss_key_size = hclge_get_rss_key_size,
12933 .get_rss = hclge_get_rss,
12934 .set_rss = hclge_set_rss,
12935 .set_rss_tuple = hclge_set_rss_tuple,
12936 .get_rss_tuple = hclge_get_rss_tuple,
12937 .get_tc_size = hclge_get_tc_size,
12938 .get_mac_addr = hclge_get_mac_addr,
12939 .set_mac_addr = hclge_set_mac_addr,
12940 .do_ioctl = hclge_do_ioctl,
12941 .add_uc_addr = hclge_add_uc_addr,
12942 .rm_uc_addr = hclge_rm_uc_addr,
12943 .add_mc_addr = hclge_add_mc_addr,
12944 .rm_mc_addr = hclge_rm_mc_addr,
12945 .set_autoneg = hclge_set_autoneg,
12946 .get_autoneg = hclge_get_autoneg,
12947 .restart_autoneg = hclge_restart_autoneg,
12948 .halt_autoneg = hclge_halt_autoneg,
12949 .get_pauseparam = hclge_get_pauseparam,
12950 .set_pauseparam = hclge_set_pauseparam,
12951 .set_mtu = hclge_set_mtu,
12952 .reset_queue = hclge_reset_tqp,
12953 .get_stats = hclge_get_stats,
12954 .get_mac_stats = hclge_get_mac_stat,
12955 .update_stats = hclge_update_stats,
12956 .get_strings = hclge_get_strings,
12957 .get_sset_count = hclge_get_sset_count,
12958 .get_fw_version = hclge_get_fw_version,
12959 .get_mdix_mode = hclge_get_mdix_mode,
12960 .enable_vlan_filter = hclge_enable_vlan_filter,
12961 .set_vlan_filter = hclge_set_vlan_filter,
12962 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
12963 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
12964 .reset_event = hclge_reset_event,
12965 .get_reset_level = hclge_get_reset_level,
12966 .set_default_reset_request = hclge_set_def_reset_request,
12967 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
12968 .set_channels = hclge_set_channels,
12969 .get_channels = hclge_get_channels,
12970 .get_regs_len = hclge_get_regs_len,
12971 .get_regs = hclge_get_regs,
12972 .set_led_id = hclge_set_led_id,
12973 .get_link_mode = hclge_get_link_mode,
12974 .add_fd_entry = hclge_add_fd_entry,
12975 .del_fd_entry = hclge_del_fd_entry,
12976 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
12977 .get_fd_rule_info = hclge_get_fd_rule_info,
12978 .get_fd_all_rules = hclge_get_all_rules,
12979 .enable_fd = hclge_enable_fd,
12980 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
12981 .dbg_read_cmd = hclge_dbg_read_cmd,
12982 .handle_hw_ras_error = hclge_handle_hw_ras_error,
12983 .get_hw_reset_stat = hclge_get_hw_reset_stat,
12984 .ae_dev_resetting = hclge_ae_dev_resetting,
12985 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
12986 .set_gro_en = hclge_gro_en,
12987 .get_global_queue_id = hclge_covert_handle_qid_global,
12988 .set_timer_task = hclge_set_timer_task,
12989 .mac_connect_phy = hclge_mac_connect_phy,
12990 .mac_disconnect_phy = hclge_mac_disconnect_phy,
12991 .get_vf_config = hclge_get_vf_config,
12992 .set_vf_link_state = hclge_set_vf_link_state,
12993 .set_vf_spoofchk = hclge_set_vf_spoofchk,
12994 .set_vf_trust = hclge_set_vf_trust,
12995 .set_vf_rate = hclge_set_vf_rate,
12996 .set_vf_mac = hclge_set_vf_mac,
12997 .get_module_eeprom = hclge_get_module_eeprom,
12998 .get_cmdq_stat = hclge_get_cmdq_stat,
12999 .add_cls_flower = hclge_add_cls_flower,
13000 .del_cls_flower = hclge_del_cls_flower,
13001 .cls_flower_active = hclge_is_cls_flower_active,
13002 .get_phy_link_ksettings = hclge_get_phy_link_ksettings,
13003 .set_phy_link_ksettings = hclge_set_phy_link_ksettings,
13004 .set_tx_hwts_info = hclge_ptp_set_tx_info,
13005 .get_rx_hwts = hclge_ptp_get_rx_hwts,
13006 .get_ts_info = hclge_ptp_get_ts_info,
13007 .get_link_diagnosis_info = hclge_get_link_diagnosis_info,
13010 static struct hnae3_ae_algo ae_algo = {
13012 .pdev_id_table = ae_algo_pci_tbl,
13015 static int hclge_init(void)
13017 pr_info("%s is initializing\n", HCLGE_NAME);
13019 hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
13021 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
13025 hnae3_register_ae_algo(&ae_algo);
13030 static void hclge_exit(void)
13032 hnae3_unregister_ae_algo(&ae_algo);
13033 destroy_workqueue(hclge_wq);
13035 module_init(hclge_init);
13036 module_exit(hclge_exit);
13038 MODULE_LICENSE("GPL");
13039 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
13040 MODULE_DESCRIPTION("HCLGE Driver");
13041 MODULE_VERSION(HCLGE_MOD_VERSION);