1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
23 #include "hclge_err.h"
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_BUF_SIZE_UNIT 256
32 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
33 static int hclge_init_vlan_config(struct hclge_dev *hdev);
34 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
35 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
36 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
37 u16 *allocated_size, bool is_alloc);
39 static struct hnae3_ae_algo ae_algo;
41 static const struct pci_device_id ae_algo_pci_tbl[] = {
42 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
49 /* required last entry */
53 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
55 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
56 HCLGE_CMDQ_TX_ADDR_H_REG,
57 HCLGE_CMDQ_TX_DEPTH_REG,
58 HCLGE_CMDQ_TX_TAIL_REG,
59 HCLGE_CMDQ_TX_HEAD_REG,
60 HCLGE_CMDQ_RX_ADDR_L_REG,
61 HCLGE_CMDQ_RX_ADDR_H_REG,
62 HCLGE_CMDQ_RX_DEPTH_REG,
63 HCLGE_CMDQ_RX_TAIL_REG,
64 HCLGE_CMDQ_RX_HEAD_REG,
65 HCLGE_VECTOR0_CMDQ_SRC_REG,
66 HCLGE_CMDQ_INTR_STS_REG,
67 HCLGE_CMDQ_INTR_EN_REG,
68 HCLGE_CMDQ_INTR_GEN_REG};
70 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
71 HCLGE_VECTOR0_OTER_EN_REG,
72 HCLGE_MISC_RESET_STS_REG,
73 HCLGE_MISC_VECTOR_INT_STS,
74 HCLGE_GLOBAL_RESET_REG,
78 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
79 HCLGE_RING_RX_ADDR_H_REG,
80 HCLGE_RING_RX_BD_NUM_REG,
81 HCLGE_RING_RX_BD_LENGTH_REG,
82 HCLGE_RING_RX_MERGE_EN_REG,
83 HCLGE_RING_RX_TAIL_REG,
84 HCLGE_RING_RX_HEAD_REG,
85 HCLGE_RING_RX_FBD_NUM_REG,
86 HCLGE_RING_RX_OFFSET_REG,
87 HCLGE_RING_RX_FBD_OFFSET_REG,
88 HCLGE_RING_RX_STASH_REG,
89 HCLGE_RING_RX_BD_ERR_REG,
90 HCLGE_RING_TX_ADDR_L_REG,
91 HCLGE_RING_TX_ADDR_H_REG,
92 HCLGE_RING_TX_BD_NUM_REG,
93 HCLGE_RING_TX_PRIORITY_REG,
95 HCLGE_RING_TX_MERGE_EN_REG,
96 HCLGE_RING_TX_TAIL_REG,
97 HCLGE_RING_TX_HEAD_REG,
98 HCLGE_RING_TX_FBD_NUM_REG,
99 HCLGE_RING_TX_OFFSET_REG,
100 HCLGE_RING_TX_EBD_NUM_REG,
101 HCLGE_RING_TX_EBD_OFFSET_REG,
102 HCLGE_RING_TX_BD_ERR_REG,
105 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
106 HCLGE_TQP_INTR_GL0_REG,
107 HCLGE_TQP_INTR_GL1_REG,
108 HCLGE_TQP_INTR_GL2_REG,
109 HCLGE_TQP_INTR_RL_REG};
111 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
113 "Serdes serial Loopback test",
114 "Serdes parallel Loopback test",
118 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
119 {"mac_tx_mac_pause_num",
120 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
121 {"mac_rx_mac_pause_num",
122 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
123 {"mac_tx_control_pkt_num",
124 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
125 {"mac_rx_control_pkt_num",
126 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
127 {"mac_tx_pfc_pkt_num",
128 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
129 {"mac_tx_pfc_pri0_pkt_num",
130 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
131 {"mac_tx_pfc_pri1_pkt_num",
132 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
133 {"mac_tx_pfc_pri2_pkt_num",
134 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
135 {"mac_tx_pfc_pri3_pkt_num",
136 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
137 {"mac_tx_pfc_pri4_pkt_num",
138 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
139 {"mac_tx_pfc_pri5_pkt_num",
140 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
141 {"mac_tx_pfc_pri6_pkt_num",
142 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
143 {"mac_tx_pfc_pri7_pkt_num",
144 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
145 {"mac_rx_pfc_pkt_num",
146 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
147 {"mac_rx_pfc_pri0_pkt_num",
148 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
149 {"mac_rx_pfc_pri1_pkt_num",
150 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
151 {"mac_rx_pfc_pri2_pkt_num",
152 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
153 {"mac_rx_pfc_pri3_pkt_num",
154 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
155 {"mac_rx_pfc_pri4_pkt_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
157 {"mac_rx_pfc_pri5_pkt_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
159 {"mac_rx_pfc_pri6_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
161 {"mac_rx_pfc_pri7_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
163 {"mac_tx_total_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
165 {"mac_tx_total_oct_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
167 {"mac_tx_good_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
169 {"mac_tx_bad_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
171 {"mac_tx_good_oct_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
173 {"mac_tx_bad_oct_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
175 {"mac_tx_uni_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
177 {"mac_tx_multi_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
179 {"mac_tx_broad_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
181 {"mac_tx_undersize_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
183 {"mac_tx_oversize_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
185 {"mac_tx_64_oct_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
187 {"mac_tx_65_127_oct_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
189 {"mac_tx_128_255_oct_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
191 {"mac_tx_256_511_oct_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
193 {"mac_tx_512_1023_oct_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
195 {"mac_tx_1024_1518_oct_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
197 {"mac_tx_1519_2047_oct_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
199 {"mac_tx_2048_4095_oct_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
201 {"mac_tx_4096_8191_oct_pkt_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
203 {"mac_tx_8192_9216_oct_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
205 {"mac_tx_9217_12287_oct_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
207 {"mac_tx_12288_16383_oct_pkt_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
209 {"mac_tx_1519_max_good_pkt_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
211 {"mac_tx_1519_max_bad_pkt_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
213 {"mac_rx_total_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
215 {"mac_rx_total_oct_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
217 {"mac_rx_good_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
219 {"mac_rx_bad_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
221 {"mac_rx_good_oct_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
223 {"mac_rx_bad_oct_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
225 {"mac_rx_uni_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
227 {"mac_rx_multi_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
229 {"mac_rx_broad_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
231 {"mac_rx_undersize_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
233 {"mac_rx_oversize_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
235 {"mac_rx_64_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
237 {"mac_rx_65_127_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
239 {"mac_rx_128_255_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
241 {"mac_rx_256_511_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
243 {"mac_rx_512_1023_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
245 {"mac_rx_1024_1518_oct_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
247 {"mac_rx_1519_2047_oct_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
249 {"mac_rx_2048_4095_oct_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
251 {"mac_rx_4096_8191_oct_pkt_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
253 {"mac_rx_8192_9216_oct_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
255 {"mac_rx_9217_12287_oct_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
257 {"mac_rx_12288_16383_oct_pkt_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
259 {"mac_rx_1519_max_good_pkt_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
261 {"mac_rx_1519_max_bad_pkt_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
264 {"mac_tx_fragment_pkt_num",
265 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
266 {"mac_tx_undermin_pkt_num",
267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
268 {"mac_tx_jabber_pkt_num",
269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
270 {"mac_tx_err_all_pkt_num",
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
272 {"mac_tx_from_app_good_pkt_num",
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
274 {"mac_tx_from_app_bad_pkt_num",
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
276 {"mac_rx_fragment_pkt_num",
277 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
278 {"mac_rx_undermin_pkt_num",
279 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
280 {"mac_rx_jabber_pkt_num",
281 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
282 {"mac_rx_fcs_err_pkt_num",
283 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
284 {"mac_rx_send_app_good_pkt_num",
285 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
286 {"mac_rx_send_app_bad_pkt_num",
287 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
290 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
292 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
293 .ethter_type = cpu_to_le16(HCLGE_MAC_ETHERTYPE_LLDP),
294 .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)),
295 .mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
296 .i_port_bitmap = 0x1,
300 static const u8 hclge_hash_key[] = {
301 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
302 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
303 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
304 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
305 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
308 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
310 #define HCLGE_MAC_CMD_NUM 21
312 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
313 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
318 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
319 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
321 dev_err(&hdev->pdev->dev,
322 "Get MAC pkt stats fail, status = %d.\n", ret);
327 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
328 /* for special opcode 0032, only the first desc has the head */
329 if (unlikely(i == 0)) {
330 desc_data = (__le64 *)(&desc[i].data[0]);
331 n = HCLGE_RD_FIRST_STATS_NUM;
333 desc_data = (__le64 *)(&desc[i]);
334 n = HCLGE_RD_OTHER_STATS_NUM;
337 for (k = 0; k < n; k++) {
338 *data += le64_to_cpu(*desc_data);
347 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
349 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
350 struct hclge_desc *desc;
355 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL);
358 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
359 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
365 for (i = 0; i < desc_num; i++) {
366 /* for special opcode 0034, only the first desc has the head */
368 desc_data = (__le64 *)(&desc[i].data[0]);
369 n = HCLGE_RD_FIRST_STATS_NUM;
371 desc_data = (__le64 *)(&desc[i]);
372 n = HCLGE_RD_OTHER_STATS_NUM;
375 for (k = 0; k < n; k++) {
376 *data += le64_to_cpu(*desc_data);
387 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
389 struct hclge_desc desc;
394 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
395 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
399 desc_data = (__le32 *)(&desc.data[0]);
400 reg_num = le32_to_cpu(*desc_data);
402 *desc_num = 1 + ((reg_num - 3) >> 2) +
403 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
408 static int hclge_mac_update_stats(struct hclge_dev *hdev)
413 ret = hclge_mac_query_reg_num(hdev, &desc_num);
415 /* The firmware supports the new statistics acquisition method */
417 ret = hclge_mac_update_stats_complete(hdev, desc_num);
418 else if (ret == -EOPNOTSUPP)
419 ret = hclge_mac_update_stats_defective(hdev);
421 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
426 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
428 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
429 struct hclge_vport *vport = hclge_get_vport(handle);
430 struct hclge_dev *hdev = vport->back;
431 struct hnae3_queue *queue;
432 struct hclge_desc desc[1];
433 struct hclge_tqp *tqp;
436 for (i = 0; i < kinfo->num_tqps; i++) {
437 queue = handle->kinfo.tqp[i];
438 tqp = container_of(queue, struct hclge_tqp, q);
439 /* command : HCLGE_OPC_QUERY_IGU_STAT */
440 hclge_cmd_setup_basic_desc(&desc[0],
441 HCLGE_OPC_QUERY_RX_STATUS,
444 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
445 ret = hclge_cmd_send(&hdev->hw, desc, 1);
447 dev_err(&hdev->pdev->dev,
448 "Query tqp stat fail, status = %d,queue = %d\n",
452 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
453 le32_to_cpu(desc[0].data[1]);
456 for (i = 0; i < kinfo->num_tqps; i++) {
457 queue = handle->kinfo.tqp[i];
458 tqp = container_of(queue, struct hclge_tqp, q);
459 /* command : HCLGE_OPC_QUERY_IGU_STAT */
460 hclge_cmd_setup_basic_desc(&desc[0],
461 HCLGE_OPC_QUERY_TX_STATUS,
464 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
465 ret = hclge_cmd_send(&hdev->hw, desc, 1);
467 dev_err(&hdev->pdev->dev,
468 "Query tqp stat fail, status = %d,queue = %d\n",
472 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
473 le32_to_cpu(desc[0].data[1]);
479 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
481 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
482 struct hclge_tqp *tqp;
486 for (i = 0; i < kinfo->num_tqps; i++) {
487 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
488 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
491 for (i = 0; i < kinfo->num_tqps; i++) {
492 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
493 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
499 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
501 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
503 return kinfo->num_tqps * (2);
506 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
508 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
512 for (i = 0; i < kinfo->num_tqps; i++) {
513 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
514 struct hclge_tqp, q);
515 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
517 buff = buff + ETH_GSTRING_LEN;
520 for (i = 0; i < kinfo->num_tqps; i++) {
521 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
522 struct hclge_tqp, q);
523 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
525 buff = buff + ETH_GSTRING_LEN;
531 static u64 *hclge_comm_get_stats(void *comm_stats,
532 const struct hclge_comm_stats_str strs[],
538 for (i = 0; i < size; i++)
539 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
544 static u8 *hclge_comm_get_strings(u32 stringset,
545 const struct hclge_comm_stats_str strs[],
548 char *buff = (char *)data;
551 if (stringset != ETH_SS_STATS)
554 for (i = 0; i < size; i++) {
555 snprintf(buff, ETH_GSTRING_LEN,
557 buff = buff + ETH_GSTRING_LEN;
563 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
565 struct hnae3_handle *handle;
568 handle = &hdev->vport[0].nic;
569 if (handle->client) {
570 status = hclge_tqps_update_stats(handle);
572 dev_err(&hdev->pdev->dev,
573 "Update TQPS stats fail, status = %d.\n",
578 status = hclge_mac_update_stats(hdev);
580 dev_err(&hdev->pdev->dev,
581 "Update MAC stats fail, status = %d.\n", status);
584 static void hclge_update_stats(struct hnae3_handle *handle,
585 struct net_device_stats *net_stats)
587 struct hclge_vport *vport = hclge_get_vport(handle);
588 struct hclge_dev *hdev = vport->back;
591 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
594 status = hclge_mac_update_stats(hdev);
596 dev_err(&hdev->pdev->dev,
597 "Update MAC stats fail, status = %d.\n",
600 status = hclge_tqps_update_stats(handle);
602 dev_err(&hdev->pdev->dev,
603 "Update TQPS stats fail, status = %d.\n",
606 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
609 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
611 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
612 HNAE3_SUPPORT_PHY_LOOPBACK |\
613 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
614 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
616 struct hclge_vport *vport = hclge_get_vport(handle);
617 struct hclge_dev *hdev = vport->back;
620 /* Loopback test support rules:
621 * mac: only GE mode support
622 * serdes: all mac mode will support include GE/XGE/LGE/CGE
623 * phy: only support when phy device exist on board
625 if (stringset == ETH_SS_TEST) {
626 /* clear loopback bit flags at first */
627 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
628 if (hdev->pdev->revision >= 0x21 ||
629 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
630 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
631 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
633 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
637 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
638 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
639 } else if (stringset == ETH_SS_STATS) {
640 count = ARRAY_SIZE(g_mac_stats_string) +
641 hclge_tqps_get_sset_count(handle, stringset);
647 static void hclge_get_strings(struct hnae3_handle *handle,
651 u8 *p = (char *)data;
654 if (stringset == ETH_SS_STATS) {
655 size = ARRAY_SIZE(g_mac_stats_string);
656 p = hclge_comm_get_strings(stringset,
660 p = hclge_tqps_get_strings(handle, p);
661 } else if (stringset == ETH_SS_TEST) {
662 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
664 hns3_nic_test_strs[HNAE3_LOOP_APP],
666 p += ETH_GSTRING_LEN;
668 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
670 hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
672 p += ETH_GSTRING_LEN;
674 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
676 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
678 p += ETH_GSTRING_LEN;
680 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
682 hns3_nic_test_strs[HNAE3_LOOP_PHY],
684 p += ETH_GSTRING_LEN;
689 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
691 struct hclge_vport *vport = hclge_get_vport(handle);
692 struct hclge_dev *hdev = vport->back;
695 p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats,
697 ARRAY_SIZE(g_mac_stats_string),
699 p = hclge_tqps_get_stats(handle, p);
702 static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
705 struct hclge_vport *vport = hclge_get_vport(handle);
706 struct hclge_dev *hdev = vport->back;
708 *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
709 *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
712 static int hclge_parse_func_status(struct hclge_dev *hdev,
713 struct hclge_func_status_cmd *status)
715 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
718 /* Set the pf to main pf */
719 if (status->pf_state & HCLGE_PF_STATE_MAIN)
720 hdev->flag |= HCLGE_FLAG_MAIN;
722 hdev->flag &= ~HCLGE_FLAG_MAIN;
727 static int hclge_query_function_status(struct hclge_dev *hdev)
729 struct hclge_func_status_cmd *req;
730 struct hclge_desc desc;
734 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
735 req = (struct hclge_func_status_cmd *)desc.data;
738 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
740 dev_err(&hdev->pdev->dev,
741 "query function status failed %d.\n",
747 /* Check pf reset is done */
750 usleep_range(1000, 2000);
751 } while (timeout++ < 5);
753 ret = hclge_parse_func_status(hdev, req);
758 static int hclge_query_pf_resource(struct hclge_dev *hdev)
760 struct hclge_pf_res_cmd *req;
761 struct hclge_desc desc;
764 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
765 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
767 dev_err(&hdev->pdev->dev,
768 "query pf resource failed %d.\n", ret);
772 req = (struct hclge_pf_res_cmd *)desc.data;
773 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
774 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
776 if (req->tx_buf_size)
778 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
780 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
782 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
784 if (req->dv_buf_size)
786 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
788 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
790 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
792 if (hnae3_dev_roce_supported(hdev)) {
793 hdev->roce_base_msix_offset =
794 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
795 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
797 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
798 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
800 /* PF should have NIC vectors and Roce vectors,
801 * NIC vectors are queued before Roce vectors.
803 hdev->num_msi = hdev->num_roce_msi +
804 hdev->roce_base_msix_offset;
807 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
808 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
814 static int hclge_parse_speed(int speed_cmd, int *speed)
818 *speed = HCLGE_MAC_SPEED_10M;
821 *speed = HCLGE_MAC_SPEED_100M;
824 *speed = HCLGE_MAC_SPEED_1G;
827 *speed = HCLGE_MAC_SPEED_10G;
830 *speed = HCLGE_MAC_SPEED_25G;
833 *speed = HCLGE_MAC_SPEED_40G;
836 *speed = HCLGE_MAC_SPEED_50G;
839 *speed = HCLGE_MAC_SPEED_100G;
848 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
850 struct hclge_vport *vport = hclge_get_vport(handle);
851 struct hclge_dev *hdev = vport->back;
852 u32 speed_ability = hdev->hw.mac.speed_ability;
856 case HCLGE_MAC_SPEED_10M:
857 speed_bit = HCLGE_SUPPORT_10M_BIT;
859 case HCLGE_MAC_SPEED_100M:
860 speed_bit = HCLGE_SUPPORT_100M_BIT;
862 case HCLGE_MAC_SPEED_1G:
863 speed_bit = HCLGE_SUPPORT_1G_BIT;
865 case HCLGE_MAC_SPEED_10G:
866 speed_bit = HCLGE_SUPPORT_10G_BIT;
868 case HCLGE_MAC_SPEED_25G:
869 speed_bit = HCLGE_SUPPORT_25G_BIT;
871 case HCLGE_MAC_SPEED_40G:
872 speed_bit = HCLGE_SUPPORT_40G_BIT;
874 case HCLGE_MAC_SPEED_50G:
875 speed_bit = HCLGE_SUPPORT_50G_BIT;
877 case HCLGE_MAC_SPEED_100G:
878 speed_bit = HCLGE_SUPPORT_100G_BIT;
884 if (speed_bit & speed_ability)
890 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
892 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
893 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
895 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
896 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
898 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
899 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
901 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
902 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
904 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
905 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
909 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
911 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
912 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
914 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
915 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
917 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
918 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
920 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
921 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
923 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
924 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
928 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
930 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
931 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
933 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
934 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
936 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
937 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
939 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
940 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
942 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
943 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
947 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
949 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
950 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
952 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
953 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
955 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
956 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
958 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
959 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
961 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
962 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
964 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
965 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
969 static void hclge_convert_setting_fec(struct hclge_mac *mac)
971 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
972 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
974 switch (mac->speed) {
975 case HCLGE_MAC_SPEED_10G:
976 case HCLGE_MAC_SPEED_40G:
977 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
980 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
982 case HCLGE_MAC_SPEED_25G:
983 case HCLGE_MAC_SPEED_50G:
984 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
987 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
990 case HCLGE_MAC_SPEED_100G:
991 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
992 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
995 mac->fec_ability = 0;
1000 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1003 struct hclge_mac *mac = &hdev->hw.mac;
1005 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1006 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1009 hclge_convert_setting_sr(mac, speed_ability);
1010 hclge_convert_setting_lr(mac, speed_ability);
1011 hclge_convert_setting_cr(mac, speed_ability);
1012 if (hdev->pdev->revision >= 0x21)
1013 hclge_convert_setting_fec(mac);
1015 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1016 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1017 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1020 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1023 struct hclge_mac *mac = &hdev->hw.mac;
1025 hclge_convert_setting_kr(mac, speed_ability);
1026 if (hdev->pdev->revision >= 0x21)
1027 hclge_convert_setting_fec(mac);
1028 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1029 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1030 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1033 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1036 unsigned long *supported = hdev->hw.mac.supported;
1038 /* default to support all speed for GE port */
1040 speed_ability = HCLGE_SUPPORT_GE;
1042 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1043 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1046 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1047 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1049 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1053 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1054 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1055 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1058 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1059 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1060 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1063 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1065 u8 media_type = hdev->hw.mac.media_type;
1067 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1068 hclge_parse_fiber_link_mode(hdev, speed_ability);
1069 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1070 hclge_parse_copper_link_mode(hdev, speed_ability);
1071 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1072 hclge_parse_backplane_link_mode(hdev, speed_ability);
1074 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1076 struct hclge_cfg_param_cmd *req;
1077 u64 mac_addr_tmp_high;
1081 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1083 /* get the configuration */
1084 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1087 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1088 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1089 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1090 HCLGE_CFG_TQP_DESC_N_M,
1091 HCLGE_CFG_TQP_DESC_N_S);
1093 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1094 HCLGE_CFG_PHY_ADDR_M,
1095 HCLGE_CFG_PHY_ADDR_S);
1096 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1097 HCLGE_CFG_MEDIA_TP_M,
1098 HCLGE_CFG_MEDIA_TP_S);
1099 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1100 HCLGE_CFG_RX_BUF_LEN_M,
1101 HCLGE_CFG_RX_BUF_LEN_S);
1102 /* get mac_address */
1103 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1104 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1105 HCLGE_CFG_MAC_ADDR_H_M,
1106 HCLGE_CFG_MAC_ADDR_H_S);
1108 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1110 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1111 HCLGE_CFG_DEFAULT_SPEED_M,
1112 HCLGE_CFG_DEFAULT_SPEED_S);
1113 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1114 HCLGE_CFG_RSS_SIZE_M,
1115 HCLGE_CFG_RSS_SIZE_S);
1117 for (i = 0; i < ETH_ALEN; i++)
1118 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1120 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1121 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1123 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1124 HCLGE_CFG_SPEED_ABILITY_M,
1125 HCLGE_CFG_SPEED_ABILITY_S);
1126 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1127 HCLGE_CFG_UMV_TBL_SPACE_M,
1128 HCLGE_CFG_UMV_TBL_SPACE_S);
1129 if (!cfg->umv_space)
1130 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1133 /* hclge_get_cfg: query the static parameter from flash
1134 * @hdev: pointer to struct hclge_dev
1135 * @hcfg: the config structure to be getted
1137 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1139 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1140 struct hclge_cfg_param_cmd *req;
1143 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1146 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1147 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1149 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1150 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1151 /* Len should be united by 4 bytes when send to hardware */
1152 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1153 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1154 req->offset = cpu_to_le32(offset);
1157 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1159 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1163 hclge_parse_cfg(hcfg, desc);
1168 static int hclge_get_cap(struct hclge_dev *hdev)
1172 ret = hclge_query_function_status(hdev);
1174 dev_err(&hdev->pdev->dev,
1175 "query function status error %d.\n", ret);
1179 /* get pf resource */
1180 ret = hclge_query_pf_resource(hdev);
1182 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1187 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1189 #define HCLGE_MIN_TX_DESC 64
1190 #define HCLGE_MIN_RX_DESC 64
1192 if (!is_kdump_kernel())
1195 dev_info(&hdev->pdev->dev,
1196 "Running kdump kernel. Using minimal resources\n");
1198 /* minimal queue pairs equals to the number of vports */
1199 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1200 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1201 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1204 static int hclge_configure(struct hclge_dev *hdev)
1206 struct hclge_cfg cfg;
1209 ret = hclge_get_cfg(hdev, &cfg);
1211 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1215 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1216 hdev->base_tqp_pid = 0;
1217 hdev->rss_size_max = cfg.rss_size_max;
1218 hdev->rx_buf_len = cfg.rx_buf_len;
1219 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1220 hdev->hw.mac.media_type = cfg.media_type;
1221 hdev->hw.mac.phy_addr = cfg.phy_addr;
1222 hdev->num_tx_desc = cfg.tqp_desc_num;
1223 hdev->num_rx_desc = cfg.tqp_desc_num;
1224 hdev->tm_info.num_pg = 1;
1225 hdev->tc_max = cfg.tc_num;
1226 hdev->tm_info.hw_pfc_map = 0;
1227 hdev->wanted_umv_size = cfg.umv_space;
1229 if (hnae3_dev_fd_supported(hdev))
1232 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1234 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1238 hclge_parse_link_mode(hdev, cfg.speed_ability);
1240 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1241 (hdev->tc_max < 1)) {
1242 dev_warn(&hdev->pdev->dev, "TC num = %d.\n",
1247 /* Dev does not support DCB */
1248 if (!hnae3_dev_dcb_supported(hdev)) {
1252 hdev->pfc_max = hdev->tc_max;
1255 hdev->tm_info.num_tc = 1;
1257 /* Currently not support uncontiuous tc */
1258 for (i = 0; i < hdev->tm_info.num_tc; i++)
1259 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1261 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1263 hclge_init_kdump_kernel_config(hdev);
1268 static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
1271 struct hclge_cfg_tso_status_cmd *req;
1272 struct hclge_desc desc;
1275 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1277 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1280 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1281 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1282 req->tso_mss_min = cpu_to_le16(tso_mss);
1285 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1286 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1287 req->tso_mss_max = cpu_to_le16(tso_mss);
1289 return hclge_cmd_send(&hdev->hw, &desc, 1);
1292 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1294 struct hclge_cfg_gro_status_cmd *req;
1295 struct hclge_desc desc;
1298 if (!hnae3_dev_gro_supported(hdev))
1301 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1302 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1304 req->gro_en = cpu_to_le16(en ? 1 : 0);
1306 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1308 dev_err(&hdev->pdev->dev,
1309 "GRO hardware config cmd failed, ret = %d\n", ret);
1314 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1316 struct hclge_tqp *tqp;
1319 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1320 sizeof(struct hclge_tqp), GFP_KERNEL);
1326 for (i = 0; i < hdev->num_tqps; i++) {
1327 tqp->dev = &hdev->pdev->dev;
1330 tqp->q.ae_algo = &ae_algo;
1331 tqp->q.buf_size = hdev->rx_buf_len;
1332 tqp->q.tx_desc_num = hdev->num_tx_desc;
1333 tqp->q.rx_desc_num = hdev->num_rx_desc;
1334 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1335 i * HCLGE_TQP_REG_SIZE;
1343 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1344 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1346 struct hclge_tqp_map_cmd *req;
1347 struct hclge_desc desc;
1350 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1352 req = (struct hclge_tqp_map_cmd *)desc.data;
1353 req->tqp_id = cpu_to_le16(tqp_pid);
1354 req->tqp_vf = func_id;
1355 req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1356 1 << HCLGE_TQP_MAP_EN_B;
1357 req->tqp_vid = cpu_to_le16(tqp_vid);
1359 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1361 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1366 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1368 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1369 struct hclge_dev *hdev = vport->back;
1372 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1373 alloced < num_tqps; i++) {
1374 if (!hdev->htqp[i].alloced) {
1375 hdev->htqp[i].q.handle = &vport->nic;
1376 hdev->htqp[i].q.tqp_index = alloced;
1377 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1378 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1379 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1380 hdev->htqp[i].alloced = true;
1384 vport->alloc_tqps = alloced;
1385 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1386 vport->alloc_tqps / hdev->tm_info.num_tc);
1391 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1392 u16 num_tx_desc, u16 num_rx_desc)
1395 struct hnae3_handle *nic = &vport->nic;
1396 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1397 struct hclge_dev *hdev = vport->back;
1400 kinfo->num_tx_desc = num_tx_desc;
1401 kinfo->num_rx_desc = num_rx_desc;
1403 kinfo->rx_buf_len = hdev->rx_buf_len;
1405 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1406 sizeof(struct hnae3_queue *), GFP_KERNEL);
1410 ret = hclge_assign_tqp(vport, num_tqps);
1412 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1417 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1418 struct hclge_vport *vport)
1420 struct hnae3_handle *nic = &vport->nic;
1421 struct hnae3_knic_private_info *kinfo;
1424 kinfo = &nic->kinfo;
1425 for (i = 0; i < vport->alloc_tqps; i++) {
1426 struct hclge_tqp *q =
1427 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1431 is_pf = !(vport->vport_id);
1432 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1441 static int hclge_map_tqp(struct hclge_dev *hdev)
1443 struct hclge_vport *vport = hdev->vport;
1446 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1447 for (i = 0; i < num_vport; i++) {
1450 ret = hclge_map_tqp_to_vport(hdev, vport);
1460 static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps)
1462 /* this would be initialized later */
1465 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1467 struct hnae3_handle *nic = &vport->nic;
1468 struct hclge_dev *hdev = vport->back;
1471 nic->pdev = hdev->pdev;
1472 nic->ae_algo = &ae_algo;
1473 nic->numa_node_mask = hdev->numa_node_mask;
1475 if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) {
1476 ret = hclge_knic_setup(vport, num_tqps,
1477 hdev->num_tx_desc, hdev->num_rx_desc);
1480 dev_err(&hdev->pdev->dev, "knic setup failed %d\n",
1485 hclge_unic_setup(vport, num_tqps);
1491 static int hclge_alloc_vport(struct hclge_dev *hdev)
1493 struct pci_dev *pdev = hdev->pdev;
1494 struct hclge_vport *vport;
1500 /* We need to alloc a vport for main NIC of PF */
1501 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1503 if (hdev->num_tqps < num_vport) {
1504 dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)",
1505 hdev->num_tqps, num_vport);
1509 /* Alloc the same number of TQPs for every vport */
1510 tqp_per_vport = hdev->num_tqps / num_vport;
1511 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1513 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1518 hdev->vport = vport;
1519 hdev->num_alloc_vport = num_vport;
1521 if (IS_ENABLED(CONFIG_PCI_IOV))
1522 hdev->num_alloc_vfs = hdev->num_req_vfs;
1524 for (i = 0; i < num_vport; i++) {
1526 vport->vport_id = i;
1527 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1528 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1529 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1530 INIT_LIST_HEAD(&vport->vlan_list);
1531 INIT_LIST_HEAD(&vport->uc_mac_list);
1532 INIT_LIST_HEAD(&vport->mc_mac_list);
1535 ret = hclge_vport_setup(vport, tqp_main_vport);
1537 ret = hclge_vport_setup(vport, tqp_per_vport);
1540 "vport setup failed for vport %d, %d\n",
1551 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1552 struct hclge_pkt_buf_alloc *buf_alloc)
1554 /* TX buffer size is unit by 128 byte */
1555 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1556 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1557 struct hclge_tx_buff_alloc_cmd *req;
1558 struct hclge_desc desc;
1562 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1564 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1565 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1566 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1568 req->tx_pkt_buff[i] =
1569 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1570 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1573 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1575 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1581 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1582 struct hclge_pkt_buf_alloc *buf_alloc)
1584 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1587 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1592 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1596 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1597 if (hdev->hw_tc_map & BIT(i))
1602 /* Get the number of pfc enabled TCs, which have private buffer */
1603 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1604 struct hclge_pkt_buf_alloc *buf_alloc)
1606 struct hclge_priv_buf *priv;
1609 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1610 priv = &buf_alloc->priv_buf[i];
1611 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1619 /* Get the number of pfc disabled TCs, which have private buffer */
1620 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1621 struct hclge_pkt_buf_alloc *buf_alloc)
1623 struct hclge_priv_buf *priv;
1626 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1627 priv = &buf_alloc->priv_buf[i];
1628 if (hdev->hw_tc_map & BIT(i) &&
1629 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1637 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1639 struct hclge_priv_buf *priv;
1643 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1644 priv = &buf_alloc->priv_buf[i];
1646 rx_priv += priv->buf_size;
1651 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1653 u32 i, total_tx_size = 0;
1655 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1656 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1658 return total_tx_size;
1661 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1662 struct hclge_pkt_buf_alloc *buf_alloc,
1665 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1666 u32 tc_num = hclge_get_tc_num(hdev);
1667 u32 shared_buf, aligned_mps;
1671 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1673 if (hnae3_dev_dcb_supported(hdev))
1674 shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
1676 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1677 + hdev->dv_buf_size;
1679 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1680 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1681 HCLGE_BUF_SIZE_UNIT);
1683 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1684 if (rx_all < rx_priv + shared_std)
1687 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1688 buf_alloc->s_buf.buf_size = shared_buf;
1689 if (hnae3_dev_dcb_supported(hdev)) {
1690 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1691 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1692 - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
1694 buf_alloc->s_buf.self.high = aligned_mps +
1695 HCLGE_NON_DCB_ADDITIONAL_BUF;
1696 buf_alloc->s_buf.self.low = aligned_mps;
1699 if (hnae3_dev_dcb_supported(hdev)) {
1701 hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
1703 hi_thrd = shared_buf - hdev->dv_buf_size;
1705 hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps);
1706 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1707 lo_thrd = hi_thrd - aligned_mps / 2;
1709 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1710 lo_thrd = aligned_mps;
1713 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1714 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1715 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1721 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1722 struct hclge_pkt_buf_alloc *buf_alloc)
1726 total_size = hdev->pkt_buf_size;
1728 /* alloc tx buffer for all enabled tc */
1729 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1730 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1732 if (hdev->hw_tc_map & BIT(i)) {
1733 if (total_size < hdev->tx_buf_size)
1736 priv->tx_buf_size = hdev->tx_buf_size;
1738 priv->tx_buf_size = 0;
1741 total_size -= priv->tx_buf_size;
1747 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1748 struct hclge_pkt_buf_alloc *buf_alloc)
1750 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1751 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1754 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1755 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1762 if (!(hdev->hw_tc_map & BIT(i)))
1767 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1768 priv->wl.low = max ? aligned_mps : 256;
1769 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1770 HCLGE_BUF_SIZE_UNIT);
1773 priv->wl.high = max ? (aligned_mps * 2) : aligned_mps;
1776 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1779 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1782 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1783 struct hclge_pkt_buf_alloc *buf_alloc)
1785 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1786 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1789 /* let the last to be cleared first */
1790 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1791 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1793 if (hdev->hw_tc_map & BIT(i) &&
1794 !(hdev->tm_info.hw_pfc_map & BIT(i))) {
1795 /* Clear the no pfc TC private buffer */
1803 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1804 no_pfc_priv_num == 0)
1808 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1811 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1812 struct hclge_pkt_buf_alloc *buf_alloc)
1814 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1815 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1818 /* let the last to be cleared first */
1819 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1820 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1822 if (hdev->hw_tc_map & BIT(i) &&
1823 hdev->tm_info.hw_pfc_map & BIT(i)) {
1824 /* Reduce the number of pfc TC with private buffer */
1832 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1837 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1840 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
1841 * @hdev: pointer to struct hclge_dev
1842 * @buf_alloc: pointer to buffer calculation data
1843 * @return: 0: calculate sucessful, negative: fail
1845 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
1846 struct hclge_pkt_buf_alloc *buf_alloc)
1848 /* When DCB is not supported, rx private buffer is not allocated. */
1849 if (!hnae3_dev_dcb_supported(hdev)) {
1850 u32 rx_all = hdev->pkt_buf_size;
1852 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
1853 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
1859 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
1862 /* try to decrease the buffer size */
1863 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
1866 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
1869 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
1875 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
1876 struct hclge_pkt_buf_alloc *buf_alloc)
1878 struct hclge_rx_priv_buff_cmd *req;
1879 struct hclge_desc desc;
1883 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
1884 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
1886 /* Alloc private buffer TCs */
1887 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1888 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1891 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
1893 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
1897 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1898 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1900 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1902 dev_err(&hdev->pdev->dev,
1903 "rx private buffer alloc cmd failed %d\n", ret);
1908 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
1909 struct hclge_pkt_buf_alloc *buf_alloc)
1911 struct hclge_rx_priv_wl_buf *req;
1912 struct hclge_priv_buf *priv;
1913 struct hclge_desc desc[2];
1917 for (i = 0; i < 2; i++) {
1918 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
1920 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
1922 /* The first descriptor set the NEXT bit to 1 */
1924 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1926 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1928 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1929 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
1931 priv = &buf_alloc->priv_buf[idx];
1932 req->tc_wl[j].high =
1933 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
1934 req->tc_wl[j].high |=
1935 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1937 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
1938 req->tc_wl[j].low |=
1939 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1943 /* Send 2 descriptor at one time */
1944 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1946 dev_err(&hdev->pdev->dev,
1947 "rx private waterline config cmd failed %d\n",
1952 static int hclge_common_thrd_config(struct hclge_dev *hdev,
1953 struct hclge_pkt_buf_alloc *buf_alloc)
1955 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
1956 struct hclge_rx_com_thrd *req;
1957 struct hclge_desc desc[2];
1958 struct hclge_tc_thrd *tc;
1962 for (i = 0; i < 2; i++) {
1963 hclge_cmd_setup_basic_desc(&desc[i],
1964 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
1965 req = (struct hclge_rx_com_thrd *)&desc[i].data;
1967 /* The first descriptor set the NEXT bit to 1 */
1969 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1971 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
1973 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
1974 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
1976 req->com_thrd[j].high =
1977 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
1978 req->com_thrd[j].high |=
1979 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1980 req->com_thrd[j].low =
1981 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
1982 req->com_thrd[j].low |=
1983 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
1987 /* Send 2 descriptors at one time */
1988 ret = hclge_cmd_send(&hdev->hw, desc, 2);
1990 dev_err(&hdev->pdev->dev,
1991 "common threshold config cmd failed %d\n", ret);
1995 static int hclge_common_wl_config(struct hclge_dev *hdev,
1996 struct hclge_pkt_buf_alloc *buf_alloc)
1998 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
1999 struct hclge_rx_com_wl *req;
2000 struct hclge_desc desc;
2003 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2005 req = (struct hclge_rx_com_wl *)desc.data;
2006 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2007 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2009 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2010 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2012 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2014 dev_err(&hdev->pdev->dev,
2015 "common waterline config cmd failed %d\n", ret);
2020 int hclge_buffer_alloc(struct hclge_dev *hdev)
2022 struct hclge_pkt_buf_alloc *pkt_buf;
2025 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2029 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2031 dev_err(&hdev->pdev->dev,
2032 "could not calc tx buffer size for all TCs %d\n", ret);
2036 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2038 dev_err(&hdev->pdev->dev,
2039 "could not alloc tx buffers %d\n", ret);
2043 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2045 dev_err(&hdev->pdev->dev,
2046 "could not calc rx priv buffer size for all TCs %d\n",
2051 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2053 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2058 if (hnae3_dev_dcb_supported(hdev)) {
2059 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2061 dev_err(&hdev->pdev->dev,
2062 "could not configure rx private waterline %d\n",
2067 ret = hclge_common_thrd_config(hdev, pkt_buf);
2069 dev_err(&hdev->pdev->dev,
2070 "could not configure common threshold %d\n",
2076 ret = hclge_common_wl_config(hdev, pkt_buf);
2078 dev_err(&hdev->pdev->dev,
2079 "could not configure common waterline %d\n", ret);
2086 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2088 struct hnae3_handle *roce = &vport->roce;
2089 struct hnae3_handle *nic = &vport->nic;
2091 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2093 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2094 vport->back->num_msi_left == 0)
2097 roce->rinfo.base_vector = vport->back->roce_base_vector;
2099 roce->rinfo.netdev = nic->kinfo.netdev;
2100 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2102 roce->pdev = nic->pdev;
2103 roce->ae_algo = nic->ae_algo;
2104 roce->numa_node_mask = nic->numa_node_mask;
2109 static int hclge_init_msi(struct hclge_dev *hdev)
2111 struct pci_dev *pdev = hdev->pdev;
2115 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
2116 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2119 "failed(%d) to allocate MSI/MSI-X vectors\n",
2123 if (vectors < hdev->num_msi)
2124 dev_warn(&hdev->pdev->dev,
2125 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2126 hdev->num_msi, vectors);
2128 hdev->num_msi = vectors;
2129 hdev->num_msi_left = vectors;
2130 hdev->base_msi_vector = pdev->irq;
2131 hdev->roce_base_vector = hdev->base_msi_vector +
2132 hdev->roce_base_msix_offset;
2134 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2135 sizeof(u16), GFP_KERNEL);
2136 if (!hdev->vector_status) {
2137 pci_free_irq_vectors(pdev);
2141 for (i = 0; i < hdev->num_msi; i++)
2142 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2144 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2145 sizeof(int), GFP_KERNEL);
2146 if (!hdev->vector_irq) {
2147 pci_free_irq_vectors(pdev);
2154 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2157 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2158 duplex = HCLGE_MAC_FULL;
2163 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2166 struct hclge_config_mac_speed_dup_cmd *req;
2167 struct hclge_desc desc;
2170 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2172 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2174 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex);
2177 case HCLGE_MAC_SPEED_10M:
2178 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2179 HCLGE_CFG_SPEED_S, 6);
2181 case HCLGE_MAC_SPEED_100M:
2182 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2183 HCLGE_CFG_SPEED_S, 7);
2185 case HCLGE_MAC_SPEED_1G:
2186 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2187 HCLGE_CFG_SPEED_S, 0);
2189 case HCLGE_MAC_SPEED_10G:
2190 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2191 HCLGE_CFG_SPEED_S, 1);
2193 case HCLGE_MAC_SPEED_25G:
2194 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2195 HCLGE_CFG_SPEED_S, 2);
2197 case HCLGE_MAC_SPEED_40G:
2198 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2199 HCLGE_CFG_SPEED_S, 3);
2201 case HCLGE_MAC_SPEED_50G:
2202 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2203 HCLGE_CFG_SPEED_S, 4);
2205 case HCLGE_MAC_SPEED_100G:
2206 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2207 HCLGE_CFG_SPEED_S, 5);
2210 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2214 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2217 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2219 dev_err(&hdev->pdev->dev,
2220 "mac speed/duplex config cmd failed %d.\n", ret);
2227 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2231 duplex = hclge_check_speed_dup(duplex, speed);
2232 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2235 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2239 hdev->hw.mac.speed = speed;
2240 hdev->hw.mac.duplex = duplex;
2245 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2248 struct hclge_vport *vport = hclge_get_vport(handle);
2249 struct hclge_dev *hdev = vport->back;
2251 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2254 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2256 struct hclge_config_auto_neg_cmd *req;
2257 struct hclge_desc desc;
2261 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2263 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2264 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
2265 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2267 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2269 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2275 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2277 struct hclge_vport *vport = hclge_get_vport(handle);
2278 struct hclge_dev *hdev = vport->back;
2280 if (!hdev->hw.mac.support_autoneg) {
2282 dev_err(&hdev->pdev->dev,
2283 "autoneg is not supported by current port\n");
2290 return hclge_set_autoneg_en(hdev, enable);
2293 static int hclge_get_autoneg(struct hnae3_handle *handle)
2295 struct hclge_vport *vport = hclge_get_vport(handle);
2296 struct hclge_dev *hdev = vport->back;
2297 struct phy_device *phydev = hdev->hw.mac.phydev;
2300 return phydev->autoneg;
2302 return hdev->hw.mac.autoneg;
2305 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2307 struct hclge_vport *vport = hclge_get_vport(handle);
2308 struct hclge_dev *hdev = vport->back;
2311 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2313 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2316 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2319 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2321 struct hclge_config_fec_cmd *req;
2322 struct hclge_desc desc;
2325 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2327 req = (struct hclge_config_fec_cmd *)desc.data;
2328 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2329 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2330 if (fec_mode & BIT(HNAE3_FEC_RS))
2331 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2332 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2333 if (fec_mode & BIT(HNAE3_FEC_BASER))
2334 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2335 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2337 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2339 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2344 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2346 struct hclge_vport *vport = hclge_get_vport(handle);
2347 struct hclge_dev *hdev = vport->back;
2348 struct hclge_mac *mac = &hdev->hw.mac;
2351 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2352 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2356 ret = hclge_set_fec_hw(hdev, fec_mode);
2360 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2364 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2367 struct hclge_vport *vport = hclge_get_vport(handle);
2368 struct hclge_dev *hdev = vport->back;
2369 struct hclge_mac *mac = &hdev->hw.mac;
2372 *fec_ability = mac->fec_ability;
2374 *fec_mode = mac->fec_mode;
2377 static int hclge_mac_init(struct hclge_dev *hdev)
2379 struct hclge_mac *mac = &hdev->hw.mac;
2382 hdev->support_sfp_query = true;
2383 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2384 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2385 hdev->hw.mac.duplex);
2387 dev_err(&hdev->pdev->dev,
2388 "Config mac speed dup fail ret=%d\n", ret);
2394 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2395 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2397 dev_err(&hdev->pdev->dev,
2398 "Fec mode init fail, ret = %d\n", ret);
2403 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2405 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2409 ret = hclge_buffer_alloc(hdev);
2411 dev_err(&hdev->pdev->dev,
2412 "allocate buffer fail, ret=%d\n", ret);
2417 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2419 if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
2420 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2421 schedule_work(&hdev->mbx_service_task);
2424 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2426 if (!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2427 schedule_work(&hdev->rst_service_task);
2430 static void hclge_task_schedule(struct hclge_dev *hdev)
2432 if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) &&
2433 !test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2434 !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state))
2435 (void)schedule_work(&hdev->service_task);
2438 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2440 struct hclge_link_status_cmd *req;
2441 struct hclge_desc desc;
2445 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2446 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2448 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2453 req = (struct hclge_link_status_cmd *)desc.data;
2454 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2456 return !!link_status;
2459 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2464 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2467 mac_state = hclge_get_mac_link_status(hdev);
2469 if (hdev->hw.mac.phydev) {
2470 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2471 link_stat = mac_state &
2472 hdev->hw.mac.phydev->link;
2477 link_stat = mac_state;
2483 static void hclge_update_link_status(struct hclge_dev *hdev)
2485 struct hnae3_client *rclient = hdev->roce_client;
2486 struct hnae3_client *client = hdev->nic_client;
2487 struct hnae3_handle *rhandle;
2488 struct hnae3_handle *handle;
2494 state = hclge_get_mac_phy_link(hdev);
2495 if (state != hdev->hw.mac.link) {
2496 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2497 handle = &hdev->vport[i].nic;
2498 client->ops->link_status_change(handle, state);
2499 hclge_config_mac_tnl_int(hdev, state);
2500 rhandle = &hdev->vport[i].roce;
2501 if (rclient && rclient->ops->link_status_change)
2502 rclient->ops->link_status_change(rhandle,
2505 hdev->hw.mac.link = state;
2509 static void hclge_update_port_capability(struct hclge_mac *mac)
2511 /* firmware can not identify back plane type, the media type
2512 * read from configuration can help deal it
2514 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2515 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2516 mac->module_type = HNAE3_MODULE_TYPE_KR;
2517 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2518 mac->module_type = HNAE3_MODULE_TYPE_TP;
2520 if (mac->support_autoneg == true) {
2521 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2522 linkmode_copy(mac->advertising, mac->supported);
2524 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2526 linkmode_zero(mac->advertising);
2530 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2532 struct hclge_sfp_info_cmd *resp = NULL;
2533 struct hclge_desc desc;
2536 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2537 resp = (struct hclge_sfp_info_cmd *)desc.data;
2538 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2539 if (ret == -EOPNOTSUPP) {
2540 dev_warn(&hdev->pdev->dev,
2541 "IMP do not support get SFP speed %d\n", ret);
2544 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2548 *speed = le32_to_cpu(resp->speed);
2553 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2555 struct hclge_sfp_info_cmd *resp;
2556 struct hclge_desc desc;
2559 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2560 resp = (struct hclge_sfp_info_cmd *)desc.data;
2562 resp->query_type = QUERY_ACTIVE_SPEED;
2564 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2565 if (ret == -EOPNOTSUPP) {
2566 dev_warn(&hdev->pdev->dev,
2567 "IMP does not support get SFP info %d\n", ret);
2570 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2574 mac->speed = le32_to_cpu(resp->speed);
2575 /* if resp->speed_ability is 0, it means it's an old version
2576 * firmware, do not update these params
2578 if (resp->speed_ability) {
2579 mac->module_type = le32_to_cpu(resp->module_type);
2580 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2581 mac->autoneg = resp->autoneg;
2582 mac->support_autoneg = resp->autoneg_ability;
2584 mac->speed_type = QUERY_SFP_SPEED;
2590 static int hclge_update_port_info(struct hclge_dev *hdev)
2592 struct hclge_mac *mac = &hdev->hw.mac;
2593 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2596 /* get the port info from SFP cmd if not copper port */
2597 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2600 /* if IMP does not support get SFP/qSFP info, return directly */
2601 if (!hdev->support_sfp_query)
2604 if (hdev->pdev->revision >= 0x21)
2605 ret = hclge_get_sfp_info(hdev, mac);
2607 ret = hclge_get_sfp_speed(hdev, &speed);
2609 if (ret == -EOPNOTSUPP) {
2610 hdev->support_sfp_query = false;
2616 if (hdev->pdev->revision >= 0x21) {
2617 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2618 hclge_update_port_capability(mac);
2621 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2624 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2625 return 0; /* do nothing if no SFP */
2627 /* must config full duplex for SFP */
2628 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2632 static int hclge_get_status(struct hnae3_handle *handle)
2634 struct hclge_vport *vport = hclge_get_vport(handle);
2635 struct hclge_dev *hdev = vport->back;
2637 hclge_update_link_status(hdev);
2639 return hdev->hw.mac.link;
2642 static void hclge_service_timer(struct timer_list *t)
2644 struct hclge_dev *hdev = from_timer(hdev, t, service_timer);
2646 mod_timer(&hdev->service_timer, jiffies + HZ);
2647 hdev->hw_stats.stats_timer++;
2648 hclge_task_schedule(hdev);
2651 static void hclge_service_complete(struct hclge_dev *hdev)
2653 WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state));
2655 /* Flush memory before next watchdog */
2656 smp_mb__before_atomic();
2657 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
2660 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2662 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2664 /* fetch the events from their corresponding regs */
2665 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2666 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2667 msix_src_reg = hclge_read_dev(&hdev->hw,
2668 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2670 /* Assumption: If by any chance reset and mailbox events are reported
2671 * together then we will only process reset event in this go and will
2672 * defer the processing of the mailbox events. Since, we would have not
2673 * cleared RX CMDQ event this time we would receive again another
2674 * interrupt from H/W just for the mailbox.
2677 /* check for vector0 reset event sources */
2678 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2679 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2680 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2681 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2682 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2683 hdev->rst_stats.imp_rst_cnt++;
2684 return HCLGE_VECTOR0_EVENT_RST;
2687 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2688 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2689 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2690 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2691 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2692 hdev->rst_stats.global_rst_cnt++;
2693 return HCLGE_VECTOR0_EVENT_RST;
2696 if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
2697 dev_info(&hdev->pdev->dev, "core reset interrupt\n");
2698 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2699 set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
2700 *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
2701 hdev->rst_stats.core_rst_cnt++;
2702 return HCLGE_VECTOR0_EVENT_RST;
2705 /* check for vector0 msix event source */
2706 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
2707 dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
2709 return HCLGE_VECTOR0_EVENT_ERR;
2712 /* check for vector0 mailbox(=CMDQ RX) event source */
2713 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
2714 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
2715 *clearval = cmdq_src_reg;
2716 return HCLGE_VECTOR0_EVENT_MBX;
2719 /* print other vector0 event source */
2720 dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
2721 cmdq_src_reg, msix_src_reg);
2722 return HCLGE_VECTOR0_EVENT_OTHER;
2725 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
2728 switch (event_type) {
2729 case HCLGE_VECTOR0_EVENT_RST:
2730 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
2732 case HCLGE_VECTOR0_EVENT_MBX:
2733 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
2740 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
2742 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
2743 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
2744 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
2745 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
2746 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
2749 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
2751 writel(enable ? 1 : 0, vector->addr);
2754 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
2756 struct hclge_dev *hdev = data;
2760 hclge_enable_vector(&hdev->misc_vector, false);
2761 event_cause = hclge_check_event_cause(hdev, &clearval);
2763 /* vector 0 interrupt is shared with reset and mailbox source events.*/
2764 switch (event_cause) {
2765 case HCLGE_VECTOR0_EVENT_ERR:
2766 /* we do not know what type of reset is required now. This could
2767 * only be decided after we fetch the type of errors which
2768 * caused this event. Therefore, we will do below for now:
2769 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
2770 * have defered type of reset to be used.
2771 * 2. Schedule the reset serivce task.
2772 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
2773 * will fetch the correct type of reset. This would be done
2774 * by first decoding the types of errors.
2776 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
2778 case HCLGE_VECTOR0_EVENT_RST:
2779 hclge_reset_task_schedule(hdev);
2781 case HCLGE_VECTOR0_EVENT_MBX:
2782 /* If we are here then,
2783 * 1. Either we are not handling any mbx task and we are not
2786 * 2. We could be handling a mbx task but nothing more is
2788 * In both cases, we should schedule mbx task as there are more
2789 * mbx messages reported by this interrupt.
2791 hclge_mbx_task_schedule(hdev);
2794 dev_warn(&hdev->pdev->dev,
2795 "received unknown or unhandled event of vector0\n");
2799 /* clear the source of interrupt if it is not cause by reset */
2800 if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
2801 hclge_clear_event_cause(hdev, event_cause, clearval);
2802 hclge_enable_vector(&hdev->misc_vector, true);
2808 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
2810 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
2811 dev_warn(&hdev->pdev->dev,
2812 "vector(vector_id %d) has been freed.\n", vector_id);
2816 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
2817 hdev->num_msi_left += 1;
2818 hdev->num_msi_used -= 1;
2821 static void hclge_get_misc_vector(struct hclge_dev *hdev)
2823 struct hclge_misc_vector *vector = &hdev->misc_vector;
2825 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
2827 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
2828 hdev->vector_status[0] = 0;
2830 hdev->num_msi_left -= 1;
2831 hdev->num_msi_used += 1;
2834 static int hclge_misc_irq_init(struct hclge_dev *hdev)
2838 hclge_get_misc_vector(hdev);
2840 /* this would be explicitly freed in the end */
2841 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
2842 0, "hclge_misc", hdev);
2844 hclge_free_vector(hdev, 0);
2845 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
2846 hdev->misc_vector.vector_irq);
2852 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
2854 free_irq(hdev->misc_vector.vector_irq, hdev);
2855 hclge_free_vector(hdev, 0);
2858 int hclge_notify_client(struct hclge_dev *hdev,
2859 enum hnae3_reset_notify_type type)
2861 struct hnae3_client *client = hdev->nic_client;
2864 if (!client->ops->reset_notify)
2867 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2868 struct hnae3_handle *handle = &hdev->vport[i].nic;
2871 ret = client->ops->reset_notify(handle, type);
2873 dev_err(&hdev->pdev->dev,
2874 "notify nic client failed %d(%d)\n", type, ret);
2882 static int hclge_notify_roce_client(struct hclge_dev *hdev,
2883 enum hnae3_reset_notify_type type)
2885 struct hnae3_client *client = hdev->roce_client;
2892 if (!client->ops->reset_notify)
2895 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2896 struct hnae3_handle *handle = &hdev->vport[i].roce;
2898 ret = client->ops->reset_notify(handle, type);
2900 dev_err(&hdev->pdev->dev,
2901 "notify roce client failed %d(%d)",
2910 static int hclge_reset_wait(struct hclge_dev *hdev)
2912 #define HCLGE_RESET_WATI_MS 100
2913 #define HCLGE_RESET_WAIT_CNT 200
2914 u32 val, reg, reg_bit;
2917 switch (hdev->reset_type) {
2918 case HNAE3_IMP_RESET:
2919 reg = HCLGE_GLOBAL_RESET_REG;
2920 reg_bit = HCLGE_IMP_RESET_BIT;
2922 case HNAE3_GLOBAL_RESET:
2923 reg = HCLGE_GLOBAL_RESET_REG;
2924 reg_bit = HCLGE_GLOBAL_RESET_BIT;
2926 case HNAE3_CORE_RESET:
2927 reg = HCLGE_GLOBAL_RESET_REG;
2928 reg_bit = HCLGE_CORE_RESET_BIT;
2930 case HNAE3_FUNC_RESET:
2931 reg = HCLGE_FUN_RST_ING;
2932 reg_bit = HCLGE_FUN_RST_ING_B;
2934 case HNAE3_FLR_RESET:
2937 dev_err(&hdev->pdev->dev,
2938 "Wait for unsupported reset type: %d\n",
2943 if (hdev->reset_type == HNAE3_FLR_RESET) {
2944 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
2945 cnt++ < HCLGE_RESET_WAIT_CNT)
2946 msleep(HCLGE_RESET_WATI_MS);
2948 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
2949 dev_err(&hdev->pdev->dev,
2950 "flr wait timeout: %d\n", cnt);
2957 val = hclge_read_dev(&hdev->hw, reg);
2958 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
2959 msleep(HCLGE_RESET_WATI_MS);
2960 val = hclge_read_dev(&hdev->hw, reg);
2964 if (cnt >= HCLGE_RESET_WAIT_CNT) {
2965 dev_warn(&hdev->pdev->dev,
2966 "Wait for reset timeout: %d\n", hdev->reset_type);
2973 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
2975 struct hclge_vf_rst_cmd *req;
2976 struct hclge_desc desc;
2978 req = (struct hclge_vf_rst_cmd *)desc.data;
2979 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
2980 req->dest_vfid = func_id;
2985 return hclge_cmd_send(&hdev->hw, &desc, 1);
2988 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
2992 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
2993 struct hclge_vport *vport = &hdev->vport[i];
2996 /* Send cmd to set/clear VF's FUNC_RST_ING */
2997 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
2999 dev_err(&hdev->pdev->dev,
3000 "set vf(%d) rst failed %d!\n",
3001 vport->vport_id, ret);
3005 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3008 /* Inform VF to process the reset.
3009 * hclge_inform_reset_assert_to_vf may fail if VF
3010 * driver is not loaded.
3012 ret = hclge_inform_reset_assert_to_vf(vport);
3014 dev_warn(&hdev->pdev->dev,
3015 "inform reset to vf(%d) failed %d!\n",
3016 vport->vport_id, ret);
3022 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3024 struct hclge_desc desc;
3025 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3028 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3029 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3030 req->fun_reset_vfid = func_id;
3032 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3034 dev_err(&hdev->pdev->dev,
3035 "send function reset cmd fail, status =%d\n", ret);
3040 static void hclge_do_reset(struct hclge_dev *hdev)
3042 struct hnae3_handle *handle = &hdev->vport[0].nic;
3043 struct pci_dev *pdev = hdev->pdev;
3046 if (hclge_get_hw_reset_stat(handle)) {
3047 dev_info(&pdev->dev, "Hardware reset not finish\n");
3048 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3049 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3050 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3054 switch (hdev->reset_type) {
3055 case HNAE3_GLOBAL_RESET:
3056 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3057 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3058 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3059 dev_info(&pdev->dev, "Global Reset requested\n");
3061 case HNAE3_CORE_RESET:
3062 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3063 hnae3_set_bit(val, HCLGE_CORE_RESET_BIT, 1);
3064 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3065 dev_info(&pdev->dev, "Core Reset requested\n");
3067 case HNAE3_FUNC_RESET:
3068 dev_info(&pdev->dev, "PF Reset requested\n");
3069 /* schedule again to check later */
3070 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3071 hclge_reset_task_schedule(hdev);
3073 case HNAE3_FLR_RESET:
3074 dev_info(&pdev->dev, "FLR requested\n");
3075 /* schedule again to check later */
3076 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3077 hclge_reset_task_schedule(hdev);
3080 dev_warn(&pdev->dev,
3081 "Unsupported reset type: %d\n", hdev->reset_type);
3086 static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
3087 unsigned long *addr)
3089 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3091 /* first, resolve any unknown reset type to the known type(s) */
3092 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3093 /* we will intentionally ignore any errors from this function
3094 * as we will end up in *some* reset request in any case
3096 hclge_handle_hw_msix_error(hdev, addr);
3097 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3098 /* We defered the clearing of the error event which caused
3099 * interrupt since it was not posssible to do that in
3100 * interrupt context (and this is the reason we introduced
3101 * new UNKNOWN reset type). Now, the errors have been
3102 * handled and cleared in hardware we can safely enable
3103 * interrupts. This is an exception to the norm.
3105 hclge_enable_vector(&hdev->misc_vector, true);
3108 /* return the highest priority reset level amongst all */
3109 if (test_bit(HNAE3_IMP_RESET, addr)) {
3110 rst_level = HNAE3_IMP_RESET;
3111 clear_bit(HNAE3_IMP_RESET, addr);
3112 clear_bit(HNAE3_GLOBAL_RESET, addr);
3113 clear_bit(HNAE3_CORE_RESET, addr);
3114 clear_bit(HNAE3_FUNC_RESET, addr);
3115 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3116 rst_level = HNAE3_GLOBAL_RESET;
3117 clear_bit(HNAE3_GLOBAL_RESET, addr);
3118 clear_bit(HNAE3_CORE_RESET, addr);
3119 clear_bit(HNAE3_FUNC_RESET, addr);
3120 } else if (test_bit(HNAE3_CORE_RESET, addr)) {
3121 rst_level = HNAE3_CORE_RESET;
3122 clear_bit(HNAE3_CORE_RESET, addr);
3123 clear_bit(HNAE3_FUNC_RESET, addr);
3124 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3125 rst_level = HNAE3_FUNC_RESET;
3126 clear_bit(HNAE3_FUNC_RESET, addr);
3127 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3128 rst_level = HNAE3_FLR_RESET;
3129 clear_bit(HNAE3_FLR_RESET, addr);
3132 if (hdev->reset_type != HNAE3_NONE_RESET &&
3133 rst_level < hdev->reset_type)
3134 return HNAE3_NONE_RESET;
3139 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3143 switch (hdev->reset_type) {
3144 case HNAE3_IMP_RESET:
3145 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3147 case HNAE3_GLOBAL_RESET:
3148 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3150 case HNAE3_CORE_RESET:
3151 clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
3160 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval);
3161 hclge_enable_vector(&hdev->misc_vector, true);
3164 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3168 switch (hdev->reset_type) {
3169 case HNAE3_FUNC_RESET:
3171 case HNAE3_FLR_RESET:
3172 ret = hclge_set_all_vf_rst(hdev, true);
3181 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3186 switch (hdev->reset_type) {
3187 case HNAE3_FUNC_RESET:
3188 /* There is no mechanism for PF to know if VF has stopped IO
3189 * for now, just wait 100 ms for VF to stop IO
3192 ret = hclge_func_reset_cmd(hdev, 0);
3194 dev_err(&hdev->pdev->dev,
3195 "asserting function reset fail %d!\n", ret);
3199 /* After performaning pf reset, it is not necessary to do the
3200 * mailbox handling or send any command to firmware, because
3201 * any mailbox handling or command to firmware is only valid
3202 * after hclge_cmd_init is called.
3204 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3205 hdev->rst_stats.pf_rst_cnt++;
3207 case HNAE3_FLR_RESET:
3208 /* There is no mechanism for PF to know if VF has stopped IO
3209 * for now, just wait 100 ms for VF to stop IO
3212 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3213 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3214 hdev->rst_stats.flr_rst_cnt++;
3216 case HNAE3_IMP_RESET:
3217 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3218 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3219 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3225 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3230 static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
3232 #define MAX_RESET_FAIL_CNT 5
3233 #define RESET_UPGRADE_DELAY_SEC 10
3235 if (hdev->reset_pending) {
3236 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3237 hdev->reset_pending);
3239 } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
3240 (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
3241 BIT(HCLGE_IMP_RESET_BIT))) {
3242 dev_info(&hdev->pdev->dev,
3243 "reset failed because IMP Reset is pending\n");
3244 hclge_clear_reset_cause(hdev);
3246 } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3247 hdev->reset_fail_cnt++;
3249 set_bit(hdev->reset_type, &hdev->reset_pending);
3250 dev_info(&hdev->pdev->dev,
3251 "re-schedule to wait for hw reset done\n");
3255 dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
3256 hclge_clear_reset_cause(hdev);
3257 mod_timer(&hdev->reset_timer,
3258 jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
3263 hclge_clear_reset_cause(hdev);
3264 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3268 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3272 switch (hdev->reset_type) {
3273 case HNAE3_FUNC_RESET:
3275 case HNAE3_FLR_RESET:
3276 ret = hclge_set_all_vf_rst(hdev, false);
3285 static void hclge_reset(struct hclge_dev *hdev)
3287 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3288 bool is_timeout = false;
3291 /* Initialize ae_dev reset status as well, in case enet layer wants to
3292 * know if device is undergoing reset
3294 ae_dev->reset_type = hdev->reset_type;
3295 hdev->rst_stats.reset_cnt++;
3296 /* perform reset of the stack & ae device for a client */
3297 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3301 ret = hclge_reset_prepare_down(hdev);
3306 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3308 goto err_reset_lock;
3312 ret = hclge_reset_prepare_wait(hdev);
3316 if (hclge_reset_wait(hdev)) {
3321 hdev->rst_stats.hw_reset_done_cnt++;
3323 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3328 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3330 goto err_reset_lock;
3332 ret = hclge_reset_ae_dev(hdev->ae_dev);
3334 goto err_reset_lock;
3336 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3338 goto err_reset_lock;
3340 ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3342 goto err_reset_lock;
3344 hclge_clear_reset_cause(hdev);
3346 ret = hclge_reset_prepare_up(hdev);
3348 goto err_reset_lock;
3350 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3352 goto err_reset_lock;
3356 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3360 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3364 hdev->last_reset_time = jiffies;
3365 hdev->reset_fail_cnt = 0;
3366 hdev->rst_stats.reset_done_cnt++;
3367 ae_dev->reset_type = HNAE3_NONE_RESET;
3368 del_timer(&hdev->reset_timer);
3375 if (hclge_reset_err_handle(hdev, is_timeout))
3376 hclge_reset_task_schedule(hdev);
3379 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3381 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3382 struct hclge_dev *hdev = ae_dev->priv;
3384 /* We might end up getting called broadly because of 2 below cases:
3385 * 1. Recoverable error was conveyed through APEI and only way to bring
3386 * normalcy is to reset.
3387 * 2. A new reset request from the stack due to timeout
3389 * For the first case,error event might not have ae handle available.
3390 * check if this is a new reset request and we are not here just because
3391 * last reset attempt did not succeed and watchdog hit us again. We will
3392 * know this if last reset request did not occur very recently (watchdog
3393 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3394 * In case of new request we reset the "reset level" to PF reset.
3395 * And if it is a repeat reset request of the most recent one then we
3396 * want to make sure we throttle the reset request. Therefore, we will
3397 * not allow it again before 3*HZ times.
3400 handle = &hdev->vport[0].nic;
3402 if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
3404 else if (hdev->default_reset_request)
3406 hclge_get_reset_level(hdev,
3407 &hdev->default_reset_request);
3408 else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
3409 hdev->reset_level = HNAE3_FUNC_RESET;
3411 dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
3414 /* request reset & schedule reset task */
3415 set_bit(hdev->reset_level, &hdev->reset_request);
3416 hclge_reset_task_schedule(hdev);
3418 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3419 hdev->reset_level++;
3422 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3423 enum hnae3_reset_type rst_type)
3425 struct hclge_dev *hdev = ae_dev->priv;
3427 set_bit(rst_type, &hdev->default_reset_request);
3430 static void hclge_reset_timer(struct timer_list *t)
3432 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3434 dev_info(&hdev->pdev->dev,
3435 "triggering global reset in reset timer\n");
3436 set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
3437 hclge_reset_event(hdev->pdev, NULL);
3440 static void hclge_reset_subtask(struct hclge_dev *hdev)
3442 /* check if there is any ongoing reset in the hardware. This status can
3443 * be checked from reset_pending. If there is then, we need to wait for
3444 * hardware to complete reset.
3445 * a. If we are able to figure out in reasonable time that hardware
3446 * has fully resetted then, we can proceed with driver, client
3448 * b. else, we can come back later to check this status so re-sched
3451 hdev->last_reset_time = jiffies;
3452 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
3453 if (hdev->reset_type != HNAE3_NONE_RESET)
3456 /* check if we got any *new* reset requests to be honored */
3457 hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_request);
3458 if (hdev->reset_type != HNAE3_NONE_RESET)
3459 hclge_do_reset(hdev);
3461 hdev->reset_type = HNAE3_NONE_RESET;
3464 static void hclge_reset_service_task(struct work_struct *work)
3466 struct hclge_dev *hdev =
3467 container_of(work, struct hclge_dev, rst_service_task);
3469 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3472 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
3474 hclge_reset_subtask(hdev);
3476 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3479 static void hclge_mailbox_service_task(struct work_struct *work)
3481 struct hclge_dev *hdev =
3482 container_of(work, struct hclge_dev, mbx_service_task);
3484 if (test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3487 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
3489 hclge_mbx_handler(hdev);
3491 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3494 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3498 /* start from vport 1 for PF is always alive */
3499 for (i = 1; i < hdev->num_alloc_vport; i++) {
3500 struct hclge_vport *vport = &hdev->vport[i];
3502 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3503 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3505 /* If vf is not alive, set to default value */
3506 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3507 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3511 static void hclge_service_task(struct work_struct *work)
3513 struct hclge_dev *hdev =
3514 container_of(work, struct hclge_dev, service_task);
3516 if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) {
3517 hclge_update_stats_for_all(hdev);
3518 hdev->hw_stats.stats_timer = 0;
3521 hclge_update_port_info(hdev);
3522 hclge_update_link_status(hdev);
3523 hclge_update_vport_alive(hdev);
3524 hclge_service_complete(hdev);
3527 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
3529 /* VF handle has no client */
3530 if (!handle->client)
3531 return container_of(handle, struct hclge_vport, nic);
3532 else if (handle->client->type == HNAE3_CLIENT_ROCE)
3533 return container_of(handle, struct hclge_vport, roce);
3535 return container_of(handle, struct hclge_vport, nic);
3538 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
3539 struct hnae3_vector_info *vector_info)
3541 struct hclge_vport *vport = hclge_get_vport(handle);
3542 struct hnae3_vector_info *vector = vector_info;
3543 struct hclge_dev *hdev = vport->back;
3547 vector_num = min(hdev->num_msi_left, vector_num);
3549 for (j = 0; j < vector_num; j++) {
3550 for (i = 1; i < hdev->num_msi; i++) {
3551 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
3552 vector->vector = pci_irq_vector(hdev->pdev, i);
3553 vector->io_addr = hdev->hw.io_base +
3554 HCLGE_VECTOR_REG_BASE +
3555 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
3557 HCLGE_VECTOR_VF_OFFSET;
3558 hdev->vector_status[i] = vport->vport_id;
3559 hdev->vector_irq[i] = vector->vector;
3568 hdev->num_msi_left -= alloc;
3569 hdev->num_msi_used += alloc;
3574 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
3578 for (i = 0; i < hdev->num_msi; i++)
3579 if (vector == hdev->vector_irq[i])
3585 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
3587 struct hclge_vport *vport = hclge_get_vport(handle);
3588 struct hclge_dev *hdev = vport->back;
3591 vector_id = hclge_get_vector_index(hdev, vector);
3592 if (vector_id < 0) {
3593 dev_err(&hdev->pdev->dev,
3594 "Get vector index fail. vector_id =%d\n", vector_id);
3598 hclge_free_vector(hdev, vector_id);
3603 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
3605 return HCLGE_RSS_KEY_SIZE;
3608 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
3610 return HCLGE_RSS_IND_TBL_SIZE;
3613 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
3614 const u8 hfunc, const u8 *key)
3616 struct hclge_rss_config_cmd *req;
3617 struct hclge_desc desc;
3622 req = (struct hclge_rss_config_cmd *)desc.data;
3624 for (key_offset = 0; key_offset < 3; key_offset++) {
3625 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
3628 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
3629 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
3631 if (key_offset == 2)
3633 HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2;
3635 key_size = HCLGE_RSS_HASH_KEY_NUM;
3637 memcpy(req->hash_key,
3638 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
3640 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3642 dev_err(&hdev->pdev->dev,
3643 "Configure RSS config fail, status = %d\n",
3651 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
3653 struct hclge_rss_indirection_table_cmd *req;
3654 struct hclge_desc desc;
3658 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
3660 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
3661 hclge_cmd_setup_basic_desc
3662 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
3664 req->start_table_index =
3665 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
3666 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
3668 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
3669 req->rss_result[j] =
3670 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
3672 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3674 dev_err(&hdev->pdev->dev,
3675 "Configure rss indir table fail,status = %d\n",
3683 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
3684 u16 *tc_size, u16 *tc_offset)
3686 struct hclge_rss_tc_mode_cmd *req;
3687 struct hclge_desc desc;
3691 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
3692 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
3694 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
3697 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
3698 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
3699 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
3700 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
3701 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
3703 req->rss_tc_mode[i] = cpu_to_le16(mode);
3706 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3708 dev_err(&hdev->pdev->dev,
3709 "Configure rss tc mode fail, status = %d\n", ret);
3714 static void hclge_get_rss_type(struct hclge_vport *vport)
3716 if (vport->rss_tuple_sets.ipv4_tcp_en ||
3717 vport->rss_tuple_sets.ipv4_udp_en ||
3718 vport->rss_tuple_sets.ipv4_sctp_en ||
3719 vport->rss_tuple_sets.ipv6_tcp_en ||
3720 vport->rss_tuple_sets.ipv6_udp_en ||
3721 vport->rss_tuple_sets.ipv6_sctp_en)
3722 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
3723 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
3724 vport->rss_tuple_sets.ipv6_fragment_en)
3725 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
3727 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
3730 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
3732 struct hclge_rss_input_tuple_cmd *req;
3733 struct hclge_desc desc;
3736 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3738 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3740 /* Get the tuple cfg from pf */
3741 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
3742 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
3743 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
3744 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
3745 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
3746 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
3747 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
3748 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
3749 hclge_get_rss_type(&hdev->vport[0]);
3750 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3752 dev_err(&hdev->pdev->dev,
3753 "Configure rss input fail, status = %d\n", ret);
3757 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
3760 struct hclge_vport *vport = hclge_get_vport(handle);
3763 /* Get hash algorithm */
3765 switch (vport->rss_algo) {
3766 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
3767 *hfunc = ETH_RSS_HASH_TOP;
3769 case HCLGE_RSS_HASH_ALGO_SIMPLE:
3770 *hfunc = ETH_RSS_HASH_XOR;
3773 *hfunc = ETH_RSS_HASH_UNKNOWN;
3778 /* Get the RSS Key required by the user */
3780 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
3782 /* Get indirect table */
3784 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3785 indir[i] = vport->rss_indirection_tbl[i];
3790 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
3791 const u8 *key, const u8 hfunc)
3793 struct hclge_vport *vport = hclge_get_vport(handle);
3794 struct hclge_dev *hdev = vport->back;
3798 /* Set the RSS Hash Key if specififed by the user */
3801 case ETH_RSS_HASH_TOP:
3802 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
3804 case ETH_RSS_HASH_XOR:
3805 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
3807 case ETH_RSS_HASH_NO_CHANGE:
3808 hash_algo = vport->rss_algo;
3814 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
3818 /* Update the shadow RSS key with user specified qids */
3819 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
3820 vport->rss_algo = hash_algo;
3823 /* Update the shadow RSS table with user specified qids */
3824 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
3825 vport->rss_indirection_tbl[i] = indir[i];
3827 /* Update the hardware */
3828 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
3831 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
3833 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
3835 if (nfc->data & RXH_L4_B_2_3)
3836 hash_sets |= HCLGE_D_PORT_BIT;
3838 hash_sets &= ~HCLGE_D_PORT_BIT;
3840 if (nfc->data & RXH_IP_SRC)
3841 hash_sets |= HCLGE_S_IP_BIT;
3843 hash_sets &= ~HCLGE_S_IP_BIT;
3845 if (nfc->data & RXH_IP_DST)
3846 hash_sets |= HCLGE_D_IP_BIT;
3848 hash_sets &= ~HCLGE_D_IP_BIT;
3850 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
3851 hash_sets |= HCLGE_V_TAG_BIT;
3856 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
3857 struct ethtool_rxnfc *nfc)
3859 struct hclge_vport *vport = hclge_get_vport(handle);
3860 struct hclge_dev *hdev = vport->back;
3861 struct hclge_rss_input_tuple_cmd *req;
3862 struct hclge_desc desc;
3866 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
3867 RXH_L4_B_0_1 | RXH_L4_B_2_3))
3870 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
3871 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
3873 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
3874 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
3875 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
3876 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
3877 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
3878 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
3879 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
3880 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
3882 tuple_sets = hclge_get_rss_hash_bits(nfc);
3883 switch (nfc->flow_type) {
3885 req->ipv4_tcp_en = tuple_sets;
3888 req->ipv6_tcp_en = tuple_sets;
3891 req->ipv4_udp_en = tuple_sets;
3894 req->ipv6_udp_en = tuple_sets;
3897 req->ipv4_sctp_en = tuple_sets;
3900 if ((nfc->data & RXH_L4_B_0_1) ||
3901 (nfc->data & RXH_L4_B_2_3))
3904 req->ipv6_sctp_en = tuple_sets;
3907 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3910 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
3916 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3918 dev_err(&hdev->pdev->dev,
3919 "Set rss tuple fail, status = %d\n", ret);
3923 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
3924 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
3925 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
3926 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
3927 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
3928 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
3929 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
3930 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
3931 hclge_get_rss_type(vport);
3935 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
3936 struct ethtool_rxnfc *nfc)
3938 struct hclge_vport *vport = hclge_get_vport(handle);
3943 switch (nfc->flow_type) {
3945 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
3948 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
3951 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
3954 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
3957 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
3960 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
3964 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
3973 if (tuple_sets & HCLGE_D_PORT_BIT)
3974 nfc->data |= RXH_L4_B_2_3;
3975 if (tuple_sets & HCLGE_S_PORT_BIT)
3976 nfc->data |= RXH_L4_B_0_1;
3977 if (tuple_sets & HCLGE_D_IP_BIT)
3978 nfc->data |= RXH_IP_DST;
3979 if (tuple_sets & HCLGE_S_IP_BIT)
3980 nfc->data |= RXH_IP_SRC;
3985 static int hclge_get_tc_size(struct hnae3_handle *handle)
3987 struct hclge_vport *vport = hclge_get_vport(handle);
3988 struct hclge_dev *hdev = vport->back;
3990 return hdev->rss_size_max;
3993 int hclge_rss_init_hw(struct hclge_dev *hdev)
3995 struct hclge_vport *vport = hdev->vport;
3996 u8 *rss_indir = vport[0].rss_indirection_tbl;
3997 u16 rss_size = vport[0].alloc_rss_size;
3998 u8 *key = vport[0].rss_hash_key;
3999 u8 hfunc = vport[0].rss_algo;
4000 u16 tc_offset[HCLGE_MAX_TC_NUM];
4001 u16 tc_valid[HCLGE_MAX_TC_NUM];
4002 u16 tc_size[HCLGE_MAX_TC_NUM];
4006 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4010 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4014 ret = hclge_set_rss_input_tuple(hdev);
4018 /* Each TC have the same queue size, and tc_size set to hardware is
4019 * the log2 of roundup power of two of rss_size, the acutal queue
4020 * size is limited by indirection table.
4022 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4023 dev_err(&hdev->pdev->dev,
4024 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
4029 roundup_size = roundup_pow_of_two(rss_size);
4030 roundup_size = ilog2(roundup_size);
4032 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4035 if (!(hdev->hw_tc_map & BIT(i)))
4039 tc_size[i] = roundup_size;
4040 tc_offset[i] = rss_size * i;
4043 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4046 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4048 struct hclge_vport *vport = hdev->vport;
4051 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4052 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4053 vport[j].rss_indirection_tbl[i] =
4054 i % vport[j].alloc_rss_size;
4058 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4060 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4061 struct hclge_vport *vport = hdev->vport;
4063 if (hdev->pdev->revision >= 0x21)
4064 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4066 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4067 vport[i].rss_tuple_sets.ipv4_tcp_en =
4068 HCLGE_RSS_INPUT_TUPLE_OTHER;
4069 vport[i].rss_tuple_sets.ipv4_udp_en =
4070 HCLGE_RSS_INPUT_TUPLE_OTHER;
4071 vport[i].rss_tuple_sets.ipv4_sctp_en =
4072 HCLGE_RSS_INPUT_TUPLE_SCTP;
4073 vport[i].rss_tuple_sets.ipv4_fragment_en =
4074 HCLGE_RSS_INPUT_TUPLE_OTHER;
4075 vport[i].rss_tuple_sets.ipv6_tcp_en =
4076 HCLGE_RSS_INPUT_TUPLE_OTHER;
4077 vport[i].rss_tuple_sets.ipv6_udp_en =
4078 HCLGE_RSS_INPUT_TUPLE_OTHER;
4079 vport[i].rss_tuple_sets.ipv6_sctp_en =
4080 HCLGE_RSS_INPUT_TUPLE_SCTP;
4081 vport[i].rss_tuple_sets.ipv6_fragment_en =
4082 HCLGE_RSS_INPUT_TUPLE_OTHER;
4084 vport[i].rss_algo = rss_algo;
4086 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4087 HCLGE_RSS_KEY_SIZE);
4090 hclge_rss_indir_init_cfg(hdev);
4093 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4094 int vector_id, bool en,
4095 struct hnae3_ring_chain_node *ring_chain)
4097 struct hclge_dev *hdev = vport->back;
4098 struct hnae3_ring_chain_node *node;
4099 struct hclge_desc desc;
4100 struct hclge_ctrl_vector_chain_cmd *req
4101 = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4102 enum hclge_cmd_status status;
4103 enum hclge_opcode_type op;
4104 u16 tqp_type_and_id;
4107 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4108 hclge_cmd_setup_basic_desc(&desc, op, false);
4109 req->int_vector_id = vector_id;
4112 for (node = ring_chain; node; node = node->next) {
4113 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4114 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4116 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4117 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4118 HCLGE_TQP_ID_S, node->tqp_index);
4119 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4121 hnae3_get_field(node->int_gl_idx,
4122 HNAE3_RING_GL_IDX_M,
4123 HNAE3_RING_GL_IDX_S));
4124 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4125 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4126 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4127 req->vfid = vport->vport_id;
4129 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4131 dev_err(&hdev->pdev->dev,
4132 "Map TQP fail, status is %d.\n",
4138 hclge_cmd_setup_basic_desc(&desc,
4141 req->int_vector_id = vector_id;
4146 req->int_cause_num = i;
4147 req->vfid = vport->vport_id;
4148 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4150 dev_err(&hdev->pdev->dev,
4151 "Map TQP fail, status is %d.\n", status);
4159 static int hclge_map_ring_to_vector(struct hnae3_handle *handle,
4161 struct hnae3_ring_chain_node *ring_chain)
4163 struct hclge_vport *vport = hclge_get_vport(handle);
4164 struct hclge_dev *hdev = vport->back;
4167 vector_id = hclge_get_vector_index(hdev, vector);
4168 if (vector_id < 0) {
4169 dev_err(&hdev->pdev->dev,
4170 "Get vector index fail. vector_id =%d\n", vector_id);
4174 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4177 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle,
4179 struct hnae3_ring_chain_node *ring_chain)
4181 struct hclge_vport *vport = hclge_get_vport(handle);
4182 struct hclge_dev *hdev = vport->back;
4185 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4188 vector_id = hclge_get_vector_index(hdev, vector);
4189 if (vector_id < 0) {
4190 dev_err(&handle->pdev->dev,
4191 "Get vector index fail. ret =%d\n", vector_id);
4195 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4197 dev_err(&handle->pdev->dev,
4198 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4205 int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4206 struct hclge_promisc_param *param)
4208 struct hclge_promisc_cfg_cmd *req;
4209 struct hclge_desc desc;
4212 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4214 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4215 req->vf_id = param->vf_id;
4217 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4218 * pdev revision(0x20), new revision support them. The
4219 * value of this two fields will not return error when driver
4220 * send command to fireware in revision(0x20).
4222 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4223 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4225 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4227 dev_err(&hdev->pdev->dev,
4228 "Set promisc mode fail, status is %d.\n", ret);
4233 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
4234 bool en_mc, bool en_bc, int vport_id)
4239 memset(param, 0, sizeof(struct hclge_promisc_param));
4241 param->enable = HCLGE_PROMISC_EN_UC;
4243 param->enable |= HCLGE_PROMISC_EN_MC;
4245 param->enable |= HCLGE_PROMISC_EN_BC;
4246 param->vf_id = vport_id;
4249 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4252 struct hclge_vport *vport = hclge_get_vport(handle);
4253 struct hclge_dev *hdev = vport->back;
4254 struct hclge_promisc_param param;
4255 bool en_bc_pmc = true;
4257 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4258 * always bypassed. So broadcast promisc should be disabled until
4259 * user enable promisc mode
4261 if (handle->pdev->revision == 0x20)
4262 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4264 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4266 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4269 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4271 struct hclge_get_fd_mode_cmd *req;
4272 struct hclge_desc desc;
4275 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4277 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4279 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4281 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4285 *fd_mode = req->mode;
4290 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4291 u32 *stage1_entry_num,
4292 u32 *stage2_entry_num,
4293 u16 *stage1_counter_num,
4294 u16 *stage2_counter_num)
4296 struct hclge_get_fd_allocation_cmd *req;
4297 struct hclge_desc desc;
4300 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4302 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4304 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4306 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4311 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4312 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4313 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4314 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4319 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4321 struct hclge_set_fd_key_config_cmd *req;
4322 struct hclge_fd_key_cfg *stage;
4323 struct hclge_desc desc;
4326 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4328 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4329 stage = &hdev->fd_cfg.key_cfg[stage_num];
4330 req->stage = stage_num;
4331 req->key_select = stage->key_sel;
4332 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4333 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4334 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4335 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4336 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4337 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4339 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4341 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4346 static int hclge_init_fd_config(struct hclge_dev *hdev)
4348 #define LOW_2_WORDS 0x03
4349 struct hclge_fd_key_cfg *key_cfg;
4352 if (!hnae3_dev_fd_supported(hdev))
4355 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4359 switch (hdev->fd_cfg.fd_mode) {
4360 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4361 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4363 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4364 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4367 dev_err(&hdev->pdev->dev,
4368 "Unsupported flow director mode %d\n",
4369 hdev->fd_cfg.fd_mode);
4373 hdev->fd_cfg.proto_support =
4374 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4375 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4376 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4377 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4378 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4379 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4380 key_cfg->outer_sipv6_word_en = 0;
4381 key_cfg->outer_dipv6_word_en = 0;
4383 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4384 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4385 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4386 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4388 /* If use max 400bit key, we can support tuples for ether type */
4389 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4390 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4391 key_cfg->tuple_active |=
4392 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4395 /* roce_type is used to filter roce frames
4396 * dst_vport is used to specify the rule
4398 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4400 ret = hclge_get_fd_allocation(hdev,
4401 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4402 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4403 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4404 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4408 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4411 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4412 int loc, u8 *key, bool is_add)
4414 struct hclge_fd_tcam_config_1_cmd *req1;
4415 struct hclge_fd_tcam_config_2_cmd *req2;
4416 struct hclge_fd_tcam_config_3_cmd *req3;
4417 struct hclge_desc desc[3];
4420 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4421 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4422 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4423 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4424 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4426 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4427 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4428 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4430 req1->stage = stage;
4431 req1->xy_sel = sel_x ? 1 : 0;
4432 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4433 req1->index = cpu_to_le32(loc);
4434 req1->entry_vld = sel_x ? is_add : 0;
4437 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4438 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4439 sizeof(req2->tcam_data));
4440 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4441 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4444 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4446 dev_err(&hdev->pdev->dev,
4447 "config tcam key fail, ret=%d\n",
4453 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4454 struct hclge_fd_ad_data *action)
4456 struct hclge_fd_ad_config_cmd *req;
4457 struct hclge_desc desc;
4461 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4463 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4464 req->index = cpu_to_le32(loc);
4467 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4468 action->write_rule_id_to_bd);
4469 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
4472 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
4473 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
4474 action->forward_to_direct_queue);
4475 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
4477 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
4478 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
4479 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
4480 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
4481 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
4482 action->counter_id);
4484 req->ad_data = cpu_to_le64(ad_data);
4485 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4487 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
4492 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
4493 struct hclge_fd_rule *rule)
4495 u16 tmp_x_s, tmp_y_s;
4496 u32 tmp_x_l, tmp_y_l;
4499 if (rule->unused_tuple & tuple_bit)
4502 switch (tuple_bit) {
4505 case BIT(INNER_DST_MAC):
4506 for (i = 0; i < 6; i++) {
4507 calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
4508 rule->tuples_mask.dst_mac[i]);
4509 calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
4510 rule->tuples_mask.dst_mac[i]);
4514 case BIT(INNER_SRC_MAC):
4515 for (i = 0; i < 6; i++) {
4516 calc_x(key_x[5 - i], rule->tuples.src_mac[i],
4517 rule->tuples.src_mac[i]);
4518 calc_y(key_y[5 - i], rule->tuples.src_mac[i],
4519 rule->tuples.src_mac[i]);
4523 case BIT(INNER_VLAN_TAG_FST):
4524 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
4525 rule->tuples_mask.vlan_tag1);
4526 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
4527 rule->tuples_mask.vlan_tag1);
4528 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4529 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4532 case BIT(INNER_ETH_TYPE):
4533 calc_x(tmp_x_s, rule->tuples.ether_proto,
4534 rule->tuples_mask.ether_proto);
4535 calc_y(tmp_y_s, rule->tuples.ether_proto,
4536 rule->tuples_mask.ether_proto);
4537 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4538 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4541 case BIT(INNER_IP_TOS):
4542 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4543 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
4546 case BIT(INNER_IP_PROTO):
4547 calc_x(*key_x, rule->tuples.ip_proto,
4548 rule->tuples_mask.ip_proto);
4549 calc_y(*key_y, rule->tuples.ip_proto,
4550 rule->tuples_mask.ip_proto);
4553 case BIT(INNER_SRC_IP):
4554 calc_x(tmp_x_l, rule->tuples.src_ip[3],
4555 rule->tuples_mask.src_ip[3]);
4556 calc_y(tmp_y_l, rule->tuples.src_ip[3],
4557 rule->tuples_mask.src_ip[3]);
4558 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4559 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4562 case BIT(INNER_DST_IP):
4563 calc_x(tmp_x_l, rule->tuples.dst_ip[3],
4564 rule->tuples_mask.dst_ip[3]);
4565 calc_y(tmp_y_l, rule->tuples.dst_ip[3],
4566 rule->tuples_mask.dst_ip[3]);
4567 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
4568 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
4571 case BIT(INNER_SRC_PORT):
4572 calc_x(tmp_x_s, rule->tuples.src_port,
4573 rule->tuples_mask.src_port);
4574 calc_y(tmp_y_s, rule->tuples.src_port,
4575 rule->tuples_mask.src_port);
4576 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4577 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4580 case BIT(INNER_DST_PORT):
4581 calc_x(tmp_x_s, rule->tuples.dst_port,
4582 rule->tuples_mask.dst_port);
4583 calc_y(tmp_y_s, rule->tuples.dst_port,
4584 rule->tuples_mask.dst_port);
4585 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
4586 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
4594 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
4595 u8 vf_id, u8 network_port_id)
4597 u32 port_number = 0;
4599 if (port_type == HOST_PORT) {
4600 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
4602 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
4604 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
4606 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
4607 HCLGE_NETWORK_PORT_ID_S, network_port_id);
4608 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
4614 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
4615 __le32 *key_x, __le32 *key_y,
4616 struct hclge_fd_rule *rule)
4618 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
4619 u8 cur_pos = 0, tuple_size, shift_bits;
4622 for (i = 0; i < MAX_META_DATA; i++) {
4623 tuple_size = meta_data_key_info[i].key_length;
4624 tuple_bit = key_cfg->meta_data_active & BIT(i);
4626 switch (tuple_bit) {
4627 case BIT(ROCE_TYPE):
4628 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
4629 cur_pos += tuple_size;
4631 case BIT(DST_VPORT):
4632 port_number = hclge_get_port_number(HOST_PORT, 0,
4634 hnae3_set_field(meta_data,
4635 GENMASK(cur_pos + tuple_size, cur_pos),
4636 cur_pos, port_number);
4637 cur_pos += tuple_size;
4644 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
4645 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
4646 shift_bits = sizeof(meta_data) * 8 - cur_pos;
4648 *key_x = cpu_to_le32(tmp_x << shift_bits);
4649 *key_y = cpu_to_le32(tmp_y << shift_bits);
4652 /* A complete key is combined with meta data key and tuple key.
4653 * Meta data key is stored at the MSB region, and tuple key is stored at
4654 * the LSB region, unused bits will be filled 0.
4656 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
4657 struct hclge_fd_rule *rule)
4659 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
4660 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
4661 u8 *cur_key_x, *cur_key_y;
4662 int i, ret, tuple_size;
4663 u8 meta_data_region;
4665 memset(key_x, 0, sizeof(key_x));
4666 memset(key_y, 0, sizeof(key_y));
4670 for (i = 0 ; i < MAX_TUPLE; i++) {
4674 tuple_size = tuple_key_info[i].key_length / 8;
4675 check_tuple = key_cfg->tuple_active & BIT(i);
4677 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
4680 cur_key_x += tuple_size;
4681 cur_key_y += tuple_size;
4685 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
4686 MAX_META_DATA_LENGTH / 8;
4688 hclge_fd_convert_meta_data(key_cfg,
4689 (__le32 *)(key_x + meta_data_region),
4690 (__le32 *)(key_y + meta_data_region),
4693 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
4696 dev_err(&hdev->pdev->dev,
4697 "fd key_y config fail, loc=%d, ret=%d\n",
4698 rule->queue_id, ret);
4702 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
4705 dev_err(&hdev->pdev->dev,
4706 "fd key_x config fail, loc=%d, ret=%d\n",
4707 rule->queue_id, ret);
4711 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
4712 struct hclge_fd_rule *rule)
4714 struct hclge_fd_ad_data ad_data;
4716 ad_data.ad_id = rule->location;
4718 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
4719 ad_data.drop_packet = true;
4720 ad_data.forward_to_direct_queue = false;
4721 ad_data.queue_id = 0;
4723 ad_data.drop_packet = false;
4724 ad_data.forward_to_direct_queue = true;
4725 ad_data.queue_id = rule->queue_id;
4728 ad_data.use_counter = false;
4729 ad_data.counter_id = 0;
4731 ad_data.use_next_stage = false;
4732 ad_data.next_input_key = 0;
4734 ad_data.write_rule_id_to_bd = true;
4735 ad_data.rule_id = rule->location;
4737 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
4740 static int hclge_fd_check_spec(struct hclge_dev *hdev,
4741 struct ethtool_rx_flow_spec *fs, u32 *unused)
4743 struct ethtool_tcpip4_spec *tcp_ip4_spec;
4744 struct ethtool_usrip4_spec *usr_ip4_spec;
4745 struct ethtool_tcpip6_spec *tcp_ip6_spec;
4746 struct ethtool_usrip6_spec *usr_ip6_spec;
4747 struct ethhdr *ether_spec;
4749 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
4752 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
4755 if ((fs->flow_type & FLOW_EXT) &&
4756 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
4757 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
4761 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
4765 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
4766 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
4768 if (!tcp_ip4_spec->ip4src)
4769 *unused |= BIT(INNER_SRC_IP);
4771 if (!tcp_ip4_spec->ip4dst)
4772 *unused |= BIT(INNER_DST_IP);
4774 if (!tcp_ip4_spec->psrc)
4775 *unused |= BIT(INNER_SRC_PORT);
4777 if (!tcp_ip4_spec->pdst)
4778 *unused |= BIT(INNER_DST_PORT);
4780 if (!tcp_ip4_spec->tos)
4781 *unused |= BIT(INNER_IP_TOS);
4785 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
4786 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4787 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4789 if (!usr_ip4_spec->ip4src)
4790 *unused |= BIT(INNER_SRC_IP);
4792 if (!usr_ip4_spec->ip4dst)
4793 *unused |= BIT(INNER_DST_IP);
4795 if (!usr_ip4_spec->tos)
4796 *unused |= BIT(INNER_IP_TOS);
4798 if (!usr_ip4_spec->proto)
4799 *unused |= BIT(INNER_IP_PROTO);
4801 if (usr_ip4_spec->l4_4_bytes)
4804 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
4811 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
4812 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4815 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
4816 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
4817 *unused |= BIT(INNER_SRC_IP);
4819 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
4820 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
4821 *unused |= BIT(INNER_DST_IP);
4823 if (!tcp_ip6_spec->psrc)
4824 *unused |= BIT(INNER_SRC_PORT);
4826 if (!tcp_ip6_spec->pdst)
4827 *unused |= BIT(INNER_DST_PORT);
4829 if (tcp_ip6_spec->tclass)
4833 case IPV6_USER_FLOW:
4834 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
4835 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
4836 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
4837 BIT(INNER_DST_PORT);
4839 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
4840 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
4841 *unused |= BIT(INNER_SRC_IP);
4843 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
4844 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
4845 *unused |= BIT(INNER_DST_IP);
4847 if (!usr_ip6_spec->l4_proto)
4848 *unused |= BIT(INNER_IP_PROTO);
4850 if (usr_ip6_spec->tclass)
4853 if (usr_ip6_spec->l4_4_bytes)
4858 ether_spec = &fs->h_u.ether_spec;
4859 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4860 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
4861 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
4863 if (is_zero_ether_addr(ether_spec->h_source))
4864 *unused |= BIT(INNER_SRC_MAC);
4866 if (is_zero_ether_addr(ether_spec->h_dest))
4867 *unused |= BIT(INNER_DST_MAC);
4869 if (!ether_spec->h_proto)
4870 *unused |= BIT(INNER_ETH_TYPE);
4877 if ((fs->flow_type & FLOW_EXT)) {
4878 if (fs->h_ext.vlan_etype)
4880 if (!fs->h_ext.vlan_tci)
4881 *unused |= BIT(INNER_VLAN_TAG_FST);
4883 if (fs->m_ext.vlan_tci) {
4884 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
4888 *unused |= BIT(INNER_VLAN_TAG_FST);
4891 if (fs->flow_type & FLOW_MAC_EXT) {
4892 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
4895 if (is_zero_ether_addr(fs->h_ext.h_dest))
4896 *unused |= BIT(INNER_DST_MAC);
4898 *unused &= ~(BIT(INNER_DST_MAC));
4904 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
4906 struct hclge_fd_rule *rule = NULL;
4907 struct hlist_node *node2;
4909 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
4910 if (rule->location >= location)
4914 return rule && rule->location == location;
4917 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
4918 struct hclge_fd_rule *new_rule,
4922 struct hclge_fd_rule *rule = NULL, *parent = NULL;
4923 struct hlist_node *node2;
4925 if (is_add && !new_rule)
4928 hlist_for_each_entry_safe(rule, node2,
4929 &hdev->fd_rule_list, rule_node) {
4930 if (rule->location >= location)
4935 if (rule && rule->location == location) {
4936 hlist_del(&rule->rule_node);
4938 hdev->hclge_fd_rule_num--;
4943 } else if (!is_add) {
4944 dev_err(&hdev->pdev->dev,
4945 "delete fail, rule %d is inexistent\n",
4950 INIT_HLIST_NODE(&new_rule->rule_node);
4953 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
4955 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
4957 hdev->hclge_fd_rule_num++;
4962 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
4963 struct ethtool_rx_flow_spec *fs,
4964 struct hclge_fd_rule *rule)
4966 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
4968 switch (flow_type) {
4972 rule->tuples.src_ip[3] =
4973 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
4974 rule->tuples_mask.src_ip[3] =
4975 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
4977 rule->tuples.dst_ip[3] =
4978 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
4979 rule->tuples_mask.dst_ip[3] =
4980 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
4982 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
4983 rule->tuples_mask.src_port =
4984 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
4986 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
4987 rule->tuples_mask.dst_port =
4988 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
4990 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
4991 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
4993 rule->tuples.ether_proto = ETH_P_IP;
4994 rule->tuples_mask.ether_proto = 0xFFFF;
4998 rule->tuples.src_ip[3] =
4999 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5000 rule->tuples_mask.src_ip[3] =
5001 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5003 rule->tuples.dst_ip[3] =
5004 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5005 rule->tuples_mask.dst_ip[3] =
5006 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5008 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5009 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5011 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5012 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5014 rule->tuples.ether_proto = ETH_P_IP;
5015 rule->tuples_mask.ether_proto = 0xFFFF;
5021 be32_to_cpu_array(rule->tuples.src_ip,
5022 fs->h_u.tcp_ip6_spec.ip6src, 4);
5023 be32_to_cpu_array(rule->tuples_mask.src_ip,
5024 fs->m_u.tcp_ip6_spec.ip6src, 4);
5026 be32_to_cpu_array(rule->tuples.dst_ip,
5027 fs->h_u.tcp_ip6_spec.ip6dst, 4);
5028 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5029 fs->m_u.tcp_ip6_spec.ip6dst, 4);
5031 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5032 rule->tuples_mask.src_port =
5033 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5035 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5036 rule->tuples_mask.dst_port =
5037 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5039 rule->tuples.ether_proto = ETH_P_IPV6;
5040 rule->tuples_mask.ether_proto = 0xFFFF;
5043 case IPV6_USER_FLOW:
5044 be32_to_cpu_array(rule->tuples.src_ip,
5045 fs->h_u.usr_ip6_spec.ip6src, 4);
5046 be32_to_cpu_array(rule->tuples_mask.src_ip,
5047 fs->m_u.usr_ip6_spec.ip6src, 4);
5049 be32_to_cpu_array(rule->tuples.dst_ip,
5050 fs->h_u.usr_ip6_spec.ip6dst, 4);
5051 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5052 fs->m_u.usr_ip6_spec.ip6dst, 4);
5054 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5055 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5057 rule->tuples.ether_proto = ETH_P_IPV6;
5058 rule->tuples_mask.ether_proto = 0xFFFF;
5062 ether_addr_copy(rule->tuples.src_mac,
5063 fs->h_u.ether_spec.h_source);
5064 ether_addr_copy(rule->tuples_mask.src_mac,
5065 fs->m_u.ether_spec.h_source);
5067 ether_addr_copy(rule->tuples.dst_mac,
5068 fs->h_u.ether_spec.h_dest);
5069 ether_addr_copy(rule->tuples_mask.dst_mac,
5070 fs->m_u.ether_spec.h_dest);
5072 rule->tuples.ether_proto =
5073 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5074 rule->tuples_mask.ether_proto =
5075 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5082 switch (flow_type) {
5085 rule->tuples.ip_proto = IPPROTO_SCTP;
5086 rule->tuples_mask.ip_proto = 0xFF;
5090 rule->tuples.ip_proto = IPPROTO_TCP;
5091 rule->tuples_mask.ip_proto = 0xFF;
5095 rule->tuples.ip_proto = IPPROTO_UDP;
5096 rule->tuples_mask.ip_proto = 0xFF;
5102 if ((fs->flow_type & FLOW_EXT)) {
5103 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5104 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5107 if (fs->flow_type & FLOW_MAC_EXT) {
5108 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5109 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5115 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5116 struct ethtool_rxnfc *cmd)
5118 struct hclge_vport *vport = hclge_get_vport(handle);
5119 struct hclge_dev *hdev = vport->back;
5120 u16 dst_vport_id = 0, q_index = 0;
5121 struct ethtool_rx_flow_spec *fs;
5122 struct hclge_fd_rule *rule;
5127 if (!hnae3_dev_fd_supported(hdev))
5131 dev_warn(&hdev->pdev->dev,
5132 "Please enable flow director first\n");
5136 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5138 ret = hclge_fd_check_spec(hdev, fs, &unused);
5140 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5144 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5145 action = HCLGE_FD_ACTION_DROP_PACKET;
5147 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5148 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5151 if (vf > hdev->num_req_vfs) {
5152 dev_err(&hdev->pdev->dev,
5153 "Error: vf id (%d) > max vf num (%d)\n",
5154 vf, hdev->num_req_vfs);
5158 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5159 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5162 dev_err(&hdev->pdev->dev,
5163 "Error: queue id (%d) > max tqp num (%d)\n",
5168 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5172 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5176 ret = hclge_fd_get_tuple(hdev, fs, rule);
5180 rule->flow_type = fs->flow_type;
5182 rule->location = fs->location;
5183 rule->unused_tuple = unused;
5184 rule->vf_id = dst_vport_id;
5185 rule->queue_id = q_index;
5186 rule->action = action;
5188 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5192 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5196 ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
5207 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5208 struct ethtool_rxnfc *cmd)
5210 struct hclge_vport *vport = hclge_get_vport(handle);
5211 struct hclge_dev *hdev = vport->back;
5212 struct ethtool_rx_flow_spec *fs;
5215 if (!hnae3_dev_fd_supported(hdev))
5218 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5220 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5223 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5224 dev_err(&hdev->pdev->dev,
5225 "Delete fail, rule %d is inexistent\n",
5230 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5231 fs->location, NULL, false);
5235 return hclge_fd_update_rule_list(hdev, NULL, fs->location,
5239 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5242 struct hclge_vport *vport = hclge_get_vport(handle);
5243 struct hclge_dev *hdev = vport->back;
5244 struct hclge_fd_rule *rule;
5245 struct hlist_node *node;
5247 if (!hnae3_dev_fd_supported(hdev))
5251 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5253 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5254 rule->location, NULL, false);
5255 hlist_del(&rule->rule_node);
5257 hdev->hclge_fd_rule_num--;
5260 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5262 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
5263 rule->location, NULL, false);
5267 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5269 struct hclge_vport *vport = hclge_get_vport(handle);
5270 struct hclge_dev *hdev = vport->back;
5271 struct hclge_fd_rule *rule;
5272 struct hlist_node *node;
5275 /* Return ok here, because reset error handling will check this
5276 * return value. If error is returned here, the reset process will
5279 if (!hnae3_dev_fd_supported(hdev))
5282 /* if fd is disabled, should not restore it when reset */
5286 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5287 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5289 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5292 dev_warn(&hdev->pdev->dev,
5293 "Restore rule %d failed, remove it\n",
5295 hlist_del(&rule->rule_node);
5297 hdev->hclge_fd_rule_num--;
5303 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5304 struct ethtool_rxnfc *cmd)
5306 struct hclge_vport *vport = hclge_get_vport(handle);
5307 struct hclge_dev *hdev = vport->back;
5309 if (!hnae3_dev_fd_supported(hdev))
5312 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5313 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5318 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5319 struct ethtool_rxnfc *cmd)
5321 struct hclge_vport *vport = hclge_get_vport(handle);
5322 struct hclge_fd_rule *rule = NULL;
5323 struct hclge_dev *hdev = vport->back;
5324 struct ethtool_rx_flow_spec *fs;
5325 struct hlist_node *node2;
5327 if (!hnae3_dev_fd_supported(hdev))
5330 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5332 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5333 if (rule->location >= fs->location)
5337 if (!rule || fs->location != rule->location)
5340 fs->flow_type = rule->flow_type;
5341 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5345 fs->h_u.tcp_ip4_spec.ip4src =
5346 cpu_to_be32(rule->tuples.src_ip[3]);
5347 fs->m_u.tcp_ip4_spec.ip4src =
5348 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5349 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5351 fs->h_u.tcp_ip4_spec.ip4dst =
5352 cpu_to_be32(rule->tuples.dst_ip[3]);
5353 fs->m_u.tcp_ip4_spec.ip4dst =
5354 rule->unused_tuple & BIT(INNER_DST_IP) ?
5355 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5357 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5358 fs->m_u.tcp_ip4_spec.psrc =
5359 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5360 0 : cpu_to_be16(rule->tuples_mask.src_port);
5362 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5363 fs->m_u.tcp_ip4_spec.pdst =
5364 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5365 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5367 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5368 fs->m_u.tcp_ip4_spec.tos =
5369 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5370 0 : rule->tuples_mask.ip_tos;
5374 fs->h_u.usr_ip4_spec.ip4src =
5375 cpu_to_be32(rule->tuples.src_ip[3]);
5376 fs->m_u.tcp_ip4_spec.ip4src =
5377 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5378 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
5380 fs->h_u.usr_ip4_spec.ip4dst =
5381 cpu_to_be32(rule->tuples.dst_ip[3]);
5382 fs->m_u.usr_ip4_spec.ip4dst =
5383 rule->unused_tuple & BIT(INNER_DST_IP) ?
5384 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
5386 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5387 fs->m_u.usr_ip4_spec.tos =
5388 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5389 0 : rule->tuples_mask.ip_tos;
5391 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5392 fs->m_u.usr_ip4_spec.proto =
5393 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5394 0 : rule->tuples_mask.ip_proto;
5396 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5402 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5403 rule->tuples.src_ip, 4);
5404 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5405 memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
5407 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5408 rule->tuples_mask.src_ip, 4);
5410 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
5411 rule->tuples.dst_ip, 4);
5412 if (rule->unused_tuple & BIT(INNER_DST_IP))
5413 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5415 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
5416 rule->tuples_mask.dst_ip, 4);
5418 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5419 fs->m_u.tcp_ip6_spec.psrc =
5420 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5421 0 : cpu_to_be16(rule->tuples_mask.src_port);
5423 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5424 fs->m_u.tcp_ip6_spec.pdst =
5425 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5426 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5429 case IPV6_USER_FLOW:
5430 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
5431 rule->tuples.src_ip, 4);
5432 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5433 memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
5435 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
5436 rule->tuples_mask.src_ip, 4);
5438 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
5439 rule->tuples.dst_ip, 4);
5440 if (rule->unused_tuple & BIT(INNER_DST_IP))
5441 memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
5443 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
5444 rule->tuples_mask.dst_ip, 4);
5446 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
5447 fs->m_u.usr_ip6_spec.l4_proto =
5448 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5449 0 : rule->tuples_mask.ip_proto;
5453 ether_addr_copy(fs->h_u.ether_spec.h_source,
5454 rule->tuples.src_mac);
5455 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
5456 eth_zero_addr(fs->m_u.ether_spec.h_source);
5458 ether_addr_copy(fs->m_u.ether_spec.h_source,
5459 rule->tuples_mask.src_mac);
5461 ether_addr_copy(fs->h_u.ether_spec.h_dest,
5462 rule->tuples.dst_mac);
5463 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5464 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5466 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5467 rule->tuples_mask.dst_mac);
5469 fs->h_u.ether_spec.h_proto =
5470 cpu_to_be16(rule->tuples.ether_proto);
5471 fs->m_u.ether_spec.h_proto =
5472 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
5473 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
5480 if (fs->flow_type & FLOW_EXT) {
5481 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
5482 fs->m_ext.vlan_tci =
5483 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
5484 cpu_to_be16(VLAN_VID_MASK) :
5485 cpu_to_be16(rule->tuples_mask.vlan_tag1);
5488 if (fs->flow_type & FLOW_MAC_EXT) {
5489 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
5490 if (rule->unused_tuple & BIT(INNER_DST_MAC))
5491 eth_zero_addr(fs->m_u.ether_spec.h_dest);
5493 ether_addr_copy(fs->m_u.ether_spec.h_dest,
5494 rule->tuples_mask.dst_mac);
5497 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5498 fs->ring_cookie = RX_CLS_FLOW_DISC;
5502 fs->ring_cookie = rule->queue_id;
5503 vf_id = rule->vf_id;
5504 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
5505 fs->ring_cookie |= vf_id;
5511 static int hclge_get_all_rules(struct hnae3_handle *handle,
5512 struct ethtool_rxnfc *cmd, u32 *rule_locs)
5514 struct hclge_vport *vport = hclge_get_vport(handle);
5515 struct hclge_dev *hdev = vport->back;
5516 struct hclge_fd_rule *rule;
5517 struct hlist_node *node2;
5520 if (!hnae3_dev_fd_supported(hdev))
5523 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5525 hlist_for_each_entry_safe(rule, node2,
5526 &hdev->fd_rule_list, rule_node) {
5527 if (cnt == cmd->rule_cnt)
5530 rule_locs[cnt] = rule->location;
5534 cmd->rule_cnt = cnt;
5539 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
5541 struct hclge_vport *vport = hclge_get_vport(handle);
5542 struct hclge_dev *hdev = vport->back;
5544 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
5545 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
5548 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
5550 struct hclge_vport *vport = hclge_get_vport(handle);
5551 struct hclge_dev *hdev = vport->back;
5553 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
5556 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
5558 struct hclge_vport *vport = hclge_get_vport(handle);
5559 struct hclge_dev *hdev = vport->back;
5561 return hdev->rst_stats.hw_reset_done_cnt;
5564 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
5566 struct hclge_vport *vport = hclge_get_vport(handle);
5567 struct hclge_dev *hdev = vport->back;
5569 hdev->fd_en = enable;
5571 hclge_del_all_fd_entries(handle, false);
5573 hclge_restore_fd_entries(handle);
5576 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
5578 struct hclge_desc desc;
5579 struct hclge_config_mac_mode_cmd *req =
5580 (struct hclge_config_mac_mode_cmd *)desc.data;
5584 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
5585 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
5586 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
5587 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
5588 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
5589 hnae3_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
5590 hnae3_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
5591 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
5592 hnae3_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
5593 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
5594 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
5595 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
5596 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
5597 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
5598 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
5599 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5601 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5603 dev_err(&hdev->pdev->dev,
5604 "mac enable fail, ret =%d.\n", ret);
5607 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
5609 struct hclge_config_mac_mode_cmd *req;
5610 struct hclge_desc desc;
5614 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
5615 /* 1 Read out the MAC mode config at first */
5616 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
5617 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5619 dev_err(&hdev->pdev->dev,
5620 "mac loopback get fail, ret =%d.\n", ret);
5624 /* 2 Then setup the loopback flag */
5625 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
5626 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
5627 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
5628 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
5630 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
5632 /* 3 Config mac work mode with loopback flag
5633 * and its original configure parameters
5635 hclge_cmd_reuse_desc(&desc, false);
5636 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5638 dev_err(&hdev->pdev->dev,
5639 "mac loopback set fail, ret =%d.\n", ret);
5643 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
5644 enum hnae3_loop loop_mode)
5646 #define HCLGE_SERDES_RETRY_MS 10
5647 #define HCLGE_SERDES_RETRY_NUM 100
5649 #define HCLGE_MAC_LINK_STATUS_MS 10
5650 #define HCLGE_MAC_LINK_STATUS_NUM 100
5651 #define HCLGE_MAC_LINK_STATUS_DOWN 0
5652 #define HCLGE_MAC_LINK_STATUS_UP 1
5654 struct hclge_serdes_lb_cmd *req;
5655 struct hclge_desc desc;
5656 int mac_link_ret = 0;
5660 req = (struct hclge_serdes_lb_cmd *)desc.data;
5661 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
5663 switch (loop_mode) {
5664 case HNAE3_LOOP_SERIAL_SERDES:
5665 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
5667 case HNAE3_LOOP_PARALLEL_SERDES:
5668 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
5671 dev_err(&hdev->pdev->dev,
5672 "unsupported serdes loopback mode %d\n", loop_mode);
5677 req->enable = loop_mode_b;
5678 req->mask = loop_mode_b;
5679 mac_link_ret = HCLGE_MAC_LINK_STATUS_UP;
5681 req->mask = loop_mode_b;
5682 mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN;
5685 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5687 dev_err(&hdev->pdev->dev,
5688 "serdes loopback set fail, ret = %d\n", ret);
5693 msleep(HCLGE_SERDES_RETRY_MS);
5694 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
5696 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5698 dev_err(&hdev->pdev->dev,
5699 "serdes loopback get, ret = %d\n", ret);
5702 } while (++i < HCLGE_SERDES_RETRY_NUM &&
5703 !(req->result & HCLGE_CMD_SERDES_DONE_B));
5705 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
5706 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
5708 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
5709 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
5713 hclge_cfg_mac_mode(hdev, en);
5717 /* serdes Internal loopback, independent of the network cable.*/
5718 msleep(HCLGE_MAC_LINK_STATUS_MS);
5719 ret = hclge_get_mac_link_status(hdev);
5720 if (ret == mac_link_ret)
5722 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
5724 dev_err(&hdev->pdev->dev, "config mac mode timeout\n");
5729 static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
5730 int stream_id, bool enable)
5732 struct hclge_desc desc;
5733 struct hclge_cfg_com_tqp_queue_cmd *req =
5734 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
5737 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
5738 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
5739 req->stream_id = cpu_to_le16(stream_id);
5740 req->enable |= enable << HCLGE_TQP_ENABLE_B;
5742 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5744 dev_err(&hdev->pdev->dev,
5745 "Tqp enable fail, status =%d.\n", ret);
5749 static int hclge_set_loopback(struct hnae3_handle *handle,
5750 enum hnae3_loop loop_mode, bool en)
5752 struct hclge_vport *vport = hclge_get_vport(handle);
5753 struct hnae3_knic_private_info *kinfo;
5754 struct hclge_dev *hdev = vport->back;
5757 switch (loop_mode) {
5758 case HNAE3_LOOP_APP:
5759 ret = hclge_set_app_loopback(hdev, en);
5761 case HNAE3_LOOP_SERIAL_SERDES:
5762 case HNAE3_LOOP_PARALLEL_SERDES:
5763 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
5767 dev_err(&hdev->pdev->dev,
5768 "loop_mode %d is not supported\n", loop_mode);
5775 kinfo = &vport->nic.kinfo;
5776 for (i = 0; i < kinfo->num_tqps; i++) {
5777 ret = hclge_tqp_enable(hdev, i, 0, en);
5785 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
5787 struct hclge_vport *vport = hclge_get_vport(handle);
5788 struct hnae3_knic_private_info *kinfo;
5789 struct hnae3_queue *queue;
5790 struct hclge_tqp *tqp;
5793 kinfo = &vport->nic.kinfo;
5794 for (i = 0; i < kinfo->num_tqps; i++) {
5795 queue = handle->kinfo.tqp[i];
5796 tqp = container_of(queue, struct hclge_tqp, q);
5797 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
5801 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
5803 struct hclge_vport *vport = hclge_get_vport(handle);
5804 struct hclge_dev *hdev = vport->back;
5807 mod_timer(&hdev->service_timer, jiffies + HZ);
5809 del_timer_sync(&hdev->service_timer);
5810 cancel_work_sync(&hdev->service_task);
5811 clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
5815 static int hclge_ae_start(struct hnae3_handle *handle)
5817 struct hclge_vport *vport = hclge_get_vport(handle);
5818 struct hclge_dev *hdev = vport->back;
5821 hclge_cfg_mac_mode(hdev, true);
5822 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
5823 hdev->hw.mac.link = 0;
5825 /* reset tqp stats */
5826 hclge_reset_tqp_stats(handle);
5828 hclge_mac_start_phy(hdev);
5833 static void hclge_ae_stop(struct hnae3_handle *handle)
5835 struct hclge_vport *vport = hclge_get_vport(handle);
5836 struct hclge_dev *hdev = vport->back;
5839 set_bit(HCLGE_STATE_DOWN, &hdev->state);
5841 /* If it is not PF reset, the firmware will disable the MAC,
5842 * so it only need to stop phy here.
5844 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
5845 hdev->reset_type != HNAE3_FUNC_RESET) {
5846 hclge_mac_stop_phy(hdev);
5850 for (i = 0; i < handle->kinfo.num_tqps; i++)
5851 hclge_reset_tqp(handle, i);
5854 hclge_cfg_mac_mode(hdev, false);
5856 hclge_mac_stop_phy(hdev);
5858 /* reset tqp stats */
5859 hclge_reset_tqp_stats(handle);
5860 hclge_update_link_status(hdev);
5863 int hclge_vport_start(struct hclge_vport *vport)
5865 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5866 vport->last_active_jiffies = jiffies;
5870 void hclge_vport_stop(struct hclge_vport *vport)
5872 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
5875 static int hclge_client_start(struct hnae3_handle *handle)
5877 struct hclge_vport *vport = hclge_get_vport(handle);
5879 return hclge_vport_start(vport);
5882 static void hclge_client_stop(struct hnae3_handle *handle)
5884 struct hclge_vport *vport = hclge_get_vport(handle);
5886 hclge_vport_stop(vport);
5889 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
5890 u16 cmdq_resp, u8 resp_code,
5891 enum hclge_mac_vlan_tbl_opcode op)
5893 struct hclge_dev *hdev = vport->back;
5894 int return_status = -EIO;
5897 dev_err(&hdev->pdev->dev,
5898 "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n",
5903 if (op == HCLGE_MAC_VLAN_ADD) {
5904 if ((!resp_code) || (resp_code == 1)) {
5906 } else if (resp_code == 2) {
5907 return_status = -ENOSPC;
5908 dev_err(&hdev->pdev->dev,
5909 "add mac addr failed for uc_overflow.\n");
5910 } else if (resp_code == 3) {
5911 return_status = -ENOSPC;
5912 dev_err(&hdev->pdev->dev,
5913 "add mac addr failed for mc_overflow.\n");
5915 dev_err(&hdev->pdev->dev,
5916 "add mac addr failed for undefined, code=%d.\n",
5919 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
5922 } else if (resp_code == 1) {
5923 return_status = -ENOENT;
5924 dev_dbg(&hdev->pdev->dev,
5925 "remove mac addr failed for miss.\n");
5927 dev_err(&hdev->pdev->dev,
5928 "remove mac addr failed for undefined, code=%d.\n",
5931 } else if (op == HCLGE_MAC_VLAN_LKUP) {
5934 } else if (resp_code == 1) {
5935 return_status = -ENOENT;
5936 dev_dbg(&hdev->pdev->dev,
5937 "lookup mac addr failed for miss.\n");
5939 dev_err(&hdev->pdev->dev,
5940 "lookup mac addr failed for undefined, code=%d.\n",
5944 return_status = -EINVAL;
5945 dev_err(&hdev->pdev->dev,
5946 "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n",
5950 return return_status;
5953 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
5958 if (vfid > 255 || vfid < 0)
5961 if (vfid >= 0 && vfid <= 191) {
5962 word_num = vfid / 32;
5963 bit_num = vfid % 32;
5965 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5967 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
5969 word_num = (vfid - 192) / 32;
5970 bit_num = vfid % 32;
5972 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
5974 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
5980 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
5982 #define HCLGE_DESC_NUMBER 3
5983 #define HCLGE_FUNC_NUMBER_PER_DESC 6
5986 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
5987 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
5988 if (desc[i].data[j])
5994 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
5995 const u8 *addr, bool is_mc)
5997 const unsigned char *mac_addr = addr;
5998 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
5999 (mac_addr[0]) | (mac_addr[1] << 8);
6000 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
6002 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6004 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6005 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6008 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
6009 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
6012 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
6013 struct hclge_mac_vlan_tbl_entry_cmd *req)
6015 struct hclge_dev *hdev = vport->back;
6016 struct hclge_desc desc;
6021 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
6023 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6025 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6027 dev_err(&hdev->pdev->dev,
6028 "del mac addr failed for cmd_send, ret =%d.\n",
6032 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6033 retval = le16_to_cpu(desc.retval);
6035 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6036 HCLGE_MAC_VLAN_REMOVE);
6039 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
6040 struct hclge_mac_vlan_tbl_entry_cmd *req,
6041 struct hclge_desc *desc,
6044 struct hclge_dev *hdev = vport->back;
6049 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
6051 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6052 memcpy(desc[0].data,
6054 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6055 hclge_cmd_setup_basic_desc(&desc[1],
6056 HCLGE_OPC_MAC_VLAN_ADD,
6058 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6059 hclge_cmd_setup_basic_desc(&desc[2],
6060 HCLGE_OPC_MAC_VLAN_ADD,
6062 ret = hclge_cmd_send(&hdev->hw, desc, 3);
6064 memcpy(desc[0].data,
6066 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6067 ret = hclge_cmd_send(&hdev->hw, desc, 1);
6070 dev_err(&hdev->pdev->dev,
6071 "lookup mac addr failed for cmd_send, ret =%d.\n",
6075 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
6076 retval = le16_to_cpu(desc[0].retval);
6078 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
6079 HCLGE_MAC_VLAN_LKUP);
6082 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
6083 struct hclge_mac_vlan_tbl_entry_cmd *req,
6084 struct hclge_desc *mc_desc)
6086 struct hclge_dev *hdev = vport->back;
6093 struct hclge_desc desc;
6095 hclge_cmd_setup_basic_desc(&desc,
6096 HCLGE_OPC_MAC_VLAN_ADD,
6098 memcpy(desc.data, req,
6099 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6100 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6101 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6102 retval = le16_to_cpu(desc.retval);
6104 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6106 HCLGE_MAC_VLAN_ADD);
6108 hclge_cmd_reuse_desc(&mc_desc[0], false);
6109 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6110 hclge_cmd_reuse_desc(&mc_desc[1], false);
6111 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6112 hclge_cmd_reuse_desc(&mc_desc[2], false);
6113 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
6114 memcpy(mc_desc[0].data, req,
6115 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
6116 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
6117 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
6118 retval = le16_to_cpu(mc_desc[0].retval);
6120 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
6122 HCLGE_MAC_VLAN_ADD);
6126 dev_err(&hdev->pdev->dev,
6127 "add mac addr failed for cmd_send, ret =%d.\n",
6135 static int hclge_init_umv_space(struct hclge_dev *hdev)
6137 u16 allocated_size = 0;
6140 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
6145 if (allocated_size < hdev->wanted_umv_size)
6146 dev_warn(&hdev->pdev->dev,
6147 "Alloc umv space failed, want %d, get %d\n",
6148 hdev->wanted_umv_size, allocated_size);
6150 mutex_init(&hdev->umv_mutex);
6151 hdev->max_umv_size = allocated_size;
6152 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
6153 hdev->share_umv_size = hdev->priv_umv_size +
6154 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6159 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
6163 if (hdev->max_umv_size > 0) {
6164 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
6168 hdev->max_umv_size = 0;
6170 mutex_destroy(&hdev->umv_mutex);
6175 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
6176 u16 *allocated_size, bool is_alloc)
6178 struct hclge_umv_spc_alc_cmd *req;
6179 struct hclge_desc desc;
6182 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
6183 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
6184 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, !is_alloc);
6185 req->space_size = cpu_to_le32(space_size);
6187 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6189 dev_err(&hdev->pdev->dev,
6190 "%s umv space failed for cmd_send, ret =%d\n",
6191 is_alloc ? "allocate" : "free", ret);
6195 if (is_alloc && allocated_size)
6196 *allocated_size = le32_to_cpu(desc.data[1]);
6201 static void hclge_reset_umv_space(struct hclge_dev *hdev)
6203 struct hclge_vport *vport;
6206 for (i = 0; i < hdev->num_alloc_vport; i++) {
6207 vport = &hdev->vport[i];
6208 vport->used_umv_num = 0;
6211 mutex_lock(&hdev->umv_mutex);
6212 hdev->share_umv_size = hdev->priv_umv_size +
6213 hdev->max_umv_size % (hdev->num_req_vfs + 2);
6214 mutex_unlock(&hdev->umv_mutex);
6217 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
6219 struct hclge_dev *hdev = vport->back;
6222 mutex_lock(&hdev->umv_mutex);
6223 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
6224 hdev->share_umv_size == 0);
6225 mutex_unlock(&hdev->umv_mutex);
6230 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
6232 struct hclge_dev *hdev = vport->back;
6234 mutex_lock(&hdev->umv_mutex);
6236 if (vport->used_umv_num > hdev->priv_umv_size)
6237 hdev->share_umv_size++;
6239 if (vport->used_umv_num > 0)
6240 vport->used_umv_num--;
6242 if (vport->used_umv_num >= hdev->priv_umv_size &&
6243 hdev->share_umv_size > 0)
6244 hdev->share_umv_size--;
6245 vport->used_umv_num++;
6247 mutex_unlock(&hdev->umv_mutex);
6250 static int hclge_add_uc_addr(struct hnae3_handle *handle,
6251 const unsigned char *addr)
6253 struct hclge_vport *vport = hclge_get_vport(handle);
6255 return hclge_add_uc_addr_common(vport, addr);
6258 int hclge_add_uc_addr_common(struct hclge_vport *vport,
6259 const unsigned char *addr)
6261 struct hclge_dev *hdev = vport->back;
6262 struct hclge_mac_vlan_tbl_entry_cmd req;
6263 struct hclge_desc desc;
6264 u16 egress_port = 0;
6267 /* mac addr check */
6268 if (is_zero_ether_addr(addr) ||
6269 is_broadcast_ether_addr(addr) ||
6270 is_multicast_ether_addr(addr)) {
6271 dev_err(&hdev->pdev->dev,
6272 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
6274 is_zero_ether_addr(addr),
6275 is_broadcast_ether_addr(addr),
6276 is_multicast_ether_addr(addr));
6280 memset(&req, 0, sizeof(req));
6282 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
6283 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
6285 req.egress_port = cpu_to_le16(egress_port);
6287 hclge_prepare_mac_addr(&req, addr, false);
6289 /* Lookup the mac address in the mac_vlan table, and add
6290 * it if the entry is inexistent. Repeated unicast entry
6291 * is not allowed in the mac vlan table.
6293 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
6294 if (ret == -ENOENT) {
6295 if (!hclge_is_umv_space_full(vport)) {
6296 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
6298 hclge_update_umv_space(vport, false);
6302 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
6303 hdev->priv_umv_size);
6308 /* check if we just hit the duplicate */
6310 dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
6311 vport->vport_id, addr);
6315 dev_err(&hdev->pdev->dev,
6316 "PF failed to add unicast entry(%pM) in the MAC table\n",
6322 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
6323 const unsigned char *addr)
6325 struct hclge_vport *vport = hclge_get_vport(handle);
6327 return hclge_rm_uc_addr_common(vport, addr);
6330 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
6331 const unsigned char *addr)
6333 struct hclge_dev *hdev = vport->back;
6334 struct hclge_mac_vlan_tbl_entry_cmd req;
6337 /* mac addr check */
6338 if (is_zero_ether_addr(addr) ||
6339 is_broadcast_ether_addr(addr) ||
6340 is_multicast_ether_addr(addr)) {
6341 dev_dbg(&hdev->pdev->dev,
6342 "Remove mac err! invalid mac:%pM.\n",
6347 memset(&req, 0, sizeof(req));
6348 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6349 hclge_prepare_mac_addr(&req, addr, false);
6350 ret = hclge_remove_mac_vlan_tbl(vport, &req);
6352 hclge_update_umv_space(vport, true);
6357 static int hclge_add_mc_addr(struct hnae3_handle *handle,
6358 const unsigned char *addr)
6360 struct hclge_vport *vport = hclge_get_vport(handle);
6362 return hclge_add_mc_addr_common(vport, addr);
6365 int hclge_add_mc_addr_common(struct hclge_vport *vport,
6366 const unsigned char *addr)
6368 struct hclge_dev *hdev = vport->back;
6369 struct hclge_mac_vlan_tbl_entry_cmd req;
6370 struct hclge_desc desc[3];
6373 /* mac addr check */
6374 if (!is_multicast_ether_addr(addr)) {
6375 dev_err(&hdev->pdev->dev,
6376 "Add mc mac err! invalid mac:%pM.\n",
6380 memset(&req, 0, sizeof(req));
6381 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6382 hclge_prepare_mac_addr(&req, addr, true);
6383 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6385 /* This mac addr exist, update VFID for it */
6386 hclge_update_desc_vfid(desc, vport->vport_id, false);
6387 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6389 /* This mac addr do not exist, add new entry for it */
6390 memset(desc[0].data, 0, sizeof(desc[0].data));
6391 memset(desc[1].data, 0, sizeof(desc[0].data));
6392 memset(desc[2].data, 0, sizeof(desc[0].data));
6393 hclge_update_desc_vfid(desc, vport->vport_id, false);
6394 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6397 if (status == -ENOSPC)
6398 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
6403 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
6404 const unsigned char *addr)
6406 struct hclge_vport *vport = hclge_get_vport(handle);
6408 return hclge_rm_mc_addr_common(vport, addr);
6411 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
6412 const unsigned char *addr)
6414 struct hclge_dev *hdev = vport->back;
6415 struct hclge_mac_vlan_tbl_entry_cmd req;
6416 enum hclge_cmd_status status;
6417 struct hclge_desc desc[3];
6419 /* mac addr check */
6420 if (!is_multicast_ether_addr(addr)) {
6421 dev_dbg(&hdev->pdev->dev,
6422 "Remove mc mac err! invalid mac:%pM.\n",
6427 memset(&req, 0, sizeof(req));
6428 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
6429 hclge_prepare_mac_addr(&req, addr, true);
6430 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
6432 /* This mac addr exist, remove this handle's VFID for it */
6433 hclge_update_desc_vfid(desc, vport->vport_id, true);
6435 if (hclge_is_all_function_id_zero(desc))
6436 /* All the vfid is zero, so need to delete this entry */
6437 status = hclge_remove_mac_vlan_tbl(vport, &req);
6439 /* Not all the vfid is zero, update the vfid */
6440 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
6443 /* Maybe this mac address is in mta table, but it cannot be
6444 * deleted here because an entry of mta represents an address
6445 * range rather than a specific address. the delete action to
6446 * all entries will take effect in update_mta_status called by
6447 * hns3_nic_set_rx_mode.
6455 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6456 enum HCLGE_MAC_ADDR_TYPE mac_type)
6458 struct hclge_vport_mac_addr_cfg *mac_cfg;
6459 struct list_head *list;
6461 if (!vport->vport_id)
6464 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
6468 mac_cfg->hd_tbl_status = true;
6469 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
6471 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6472 &vport->uc_mac_list : &vport->mc_mac_list;
6474 list_add_tail(&mac_cfg->node, list);
6477 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
6479 enum HCLGE_MAC_ADDR_TYPE mac_type)
6481 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6482 struct list_head *list;
6483 bool uc_flag, mc_flag;
6485 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6486 &vport->uc_mac_list : &vport->mc_mac_list;
6488 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
6489 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
6491 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6492 if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) {
6493 if (uc_flag && mac_cfg->hd_tbl_status)
6494 hclge_rm_uc_addr_common(vport, mac_addr);
6496 if (mc_flag && mac_cfg->hd_tbl_status)
6497 hclge_rm_mc_addr_common(vport, mac_addr);
6499 list_del(&mac_cfg->node);
6506 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
6507 enum HCLGE_MAC_ADDR_TYPE mac_type)
6509 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
6510 struct list_head *list;
6512 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
6513 &vport->uc_mac_list : &vport->mc_mac_list;
6515 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
6516 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
6517 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
6519 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
6520 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
6522 mac_cfg->hd_tbl_status = false;
6524 list_del(&mac_cfg->node);
6530 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
6532 struct hclge_vport_mac_addr_cfg *mac, *tmp;
6533 struct hclge_vport *vport;
6536 mutex_lock(&hdev->vport_cfg_mutex);
6537 for (i = 0; i < hdev->num_alloc_vport; i++) {
6538 vport = &hdev->vport[i];
6539 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
6540 list_del(&mac->node);
6544 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
6545 list_del(&mac->node);
6549 mutex_unlock(&hdev->vport_cfg_mutex);
6552 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
6553 u16 cmdq_resp, u8 resp_code)
6555 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
6556 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
6557 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
6558 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
6563 dev_err(&hdev->pdev->dev,
6564 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
6569 switch (resp_code) {
6570 case HCLGE_ETHERTYPE_SUCCESS_ADD:
6571 case HCLGE_ETHERTYPE_ALREADY_ADD:
6574 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
6575 dev_err(&hdev->pdev->dev,
6576 "add mac ethertype failed for manager table overflow.\n");
6577 return_status = -EIO;
6579 case HCLGE_ETHERTYPE_KEY_CONFLICT:
6580 dev_err(&hdev->pdev->dev,
6581 "add mac ethertype failed for key conflict.\n");
6582 return_status = -EIO;
6585 dev_err(&hdev->pdev->dev,
6586 "add mac ethertype failed for undefined, code=%d.\n",
6588 return_status = -EIO;
6591 return return_status;
6594 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
6595 const struct hclge_mac_mgr_tbl_entry_cmd *req)
6597 struct hclge_desc desc;
6602 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
6603 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
6605 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6607 dev_err(&hdev->pdev->dev,
6608 "add mac ethertype failed for cmd_send, ret =%d.\n",
6613 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
6614 retval = le16_to_cpu(desc.retval);
6616 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
6619 static int init_mgr_tbl(struct hclge_dev *hdev)
6624 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
6625 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
6627 dev_err(&hdev->pdev->dev,
6628 "add mac ethertype failed, ret =%d.\n",
6637 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
6639 struct hclge_vport *vport = hclge_get_vport(handle);
6640 struct hclge_dev *hdev = vport->back;
6642 ether_addr_copy(p, hdev->hw.mac.mac_addr);
6645 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
6648 const unsigned char *new_addr = (const unsigned char *)p;
6649 struct hclge_vport *vport = hclge_get_vport(handle);
6650 struct hclge_dev *hdev = vport->back;
6653 /* mac addr check */
6654 if (is_zero_ether_addr(new_addr) ||
6655 is_broadcast_ether_addr(new_addr) ||
6656 is_multicast_ether_addr(new_addr)) {
6657 dev_err(&hdev->pdev->dev,
6658 "Change uc mac err! invalid mac:%p.\n",
6663 if ((!is_first || is_kdump_kernel()) &&
6664 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
6665 dev_warn(&hdev->pdev->dev,
6666 "remove old uc mac address fail.\n");
6668 ret = hclge_add_uc_addr(handle, new_addr);
6670 dev_err(&hdev->pdev->dev,
6671 "add uc mac address fail, ret =%d.\n",
6675 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
6676 dev_err(&hdev->pdev->dev,
6677 "restore uc mac address fail.\n");
6682 ret = hclge_pause_addr_cfg(hdev, new_addr);
6684 dev_err(&hdev->pdev->dev,
6685 "configure mac pause address fail, ret =%d.\n",
6690 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
6695 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
6698 struct hclge_vport *vport = hclge_get_vport(handle);
6699 struct hclge_dev *hdev = vport->back;
6701 if (!hdev->hw.mac.phydev)
6704 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
6707 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
6708 u8 fe_type, bool filter_en, u8 vf_id)
6710 struct hclge_vlan_filter_ctrl_cmd *req;
6711 struct hclge_desc desc;
6714 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
6716 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
6717 req->vlan_type = vlan_type;
6718 req->vlan_fe = filter_en ? fe_type : 0;
6721 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6723 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
6729 #define HCLGE_FILTER_TYPE_VF 0
6730 #define HCLGE_FILTER_TYPE_PORT 1
6731 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
6732 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
6733 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
6734 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
6735 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
6736 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
6737 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
6738 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
6739 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
6741 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
6743 struct hclge_vport *vport = hclge_get_vport(handle);
6744 struct hclge_dev *hdev = vport->back;
6746 if (hdev->pdev->revision >= 0x21) {
6747 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6748 HCLGE_FILTER_FE_EGRESS, enable, 0);
6749 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
6750 HCLGE_FILTER_FE_INGRESS, enable, 0);
6752 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
6753 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
6757 handle->netdev_flags |= HNAE3_VLAN_FLTR;
6759 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
6762 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
6763 bool is_kill, u16 vlan, u8 qos,
6766 #define HCLGE_MAX_VF_BYTES 16
6767 struct hclge_vlan_filter_vf_cfg_cmd *req0;
6768 struct hclge_vlan_filter_vf_cfg_cmd *req1;
6769 struct hclge_desc desc[2];
6774 hclge_cmd_setup_basic_desc(&desc[0],
6775 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6776 hclge_cmd_setup_basic_desc(&desc[1],
6777 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
6779 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
6781 vf_byte_off = vfid / 8;
6782 vf_byte_val = 1 << (vfid % 8);
6784 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
6785 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
6787 req0->vlan_id = cpu_to_le16(vlan);
6788 req0->vlan_cfg = is_kill;
6790 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
6791 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
6793 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
6795 ret = hclge_cmd_send(&hdev->hw, desc, 2);
6797 dev_err(&hdev->pdev->dev,
6798 "Send vf vlan command fail, ret =%d.\n",
6804 #define HCLGE_VF_VLAN_NO_ENTRY 2
6805 if (!req0->resp_code || req0->resp_code == 1)
6808 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
6809 dev_warn(&hdev->pdev->dev,
6810 "vf vlan table is full, vf vlan filter is disabled\n");
6814 dev_err(&hdev->pdev->dev,
6815 "Add vf vlan filter fail, ret =%d.\n",
6818 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
6819 if (!req0->resp_code)
6822 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) {
6823 dev_warn(&hdev->pdev->dev,
6824 "vlan %d filter is not in vf vlan table\n",
6829 dev_err(&hdev->pdev->dev,
6830 "Kill vf vlan filter fail, ret =%d.\n",
6837 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
6838 u16 vlan_id, bool is_kill)
6840 struct hclge_vlan_filter_pf_cfg_cmd *req;
6841 struct hclge_desc desc;
6842 u8 vlan_offset_byte_val;
6843 u8 vlan_offset_byte;
6847 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
6849 vlan_offset_160 = vlan_id / 160;
6850 vlan_offset_byte = (vlan_id % 160) / 8;
6851 vlan_offset_byte_val = 1 << (vlan_id % 8);
6853 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
6854 req->vlan_offset = vlan_offset_160;
6855 req->vlan_cfg = is_kill;
6856 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
6858 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6860 dev_err(&hdev->pdev->dev,
6861 "port vlan command, send fail, ret =%d.\n", ret);
6865 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
6866 u16 vport_id, u16 vlan_id, u8 qos,
6869 u16 vport_idx, vport_num = 0;
6872 if (is_kill && !vlan_id)
6875 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
6878 dev_err(&hdev->pdev->dev,
6879 "Set %d vport vlan filter config fail, ret =%d.\n",
6884 /* vlan 0 may be added twice when 8021q module is enabled */
6885 if (!is_kill && !vlan_id &&
6886 test_bit(vport_id, hdev->vlan_table[vlan_id]))
6889 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
6890 dev_err(&hdev->pdev->dev,
6891 "Add port vlan failed, vport %d is already in vlan %d\n",
6897 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
6898 dev_err(&hdev->pdev->dev,
6899 "Delete port vlan failed, vport %d is not in vlan %d\n",
6904 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
6907 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
6908 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
6914 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
6916 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
6917 struct hclge_vport_vtag_tx_cfg_cmd *req;
6918 struct hclge_dev *hdev = vport->back;
6919 struct hclge_desc desc;
6922 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
6924 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
6925 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
6926 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
6927 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
6928 vcfg->accept_tag1 ? 1 : 0);
6929 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
6930 vcfg->accept_untag1 ? 1 : 0);
6931 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
6932 vcfg->accept_tag2 ? 1 : 0);
6933 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
6934 vcfg->accept_untag2 ? 1 : 0);
6935 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
6936 vcfg->insert_tag1_en ? 1 : 0);
6937 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
6938 vcfg->insert_tag2_en ? 1 : 0);
6939 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
6941 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6942 req->vf_bitmap[req->vf_offset] =
6943 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6945 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6947 dev_err(&hdev->pdev->dev,
6948 "Send port txvlan cfg command fail, ret =%d\n",
6954 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
6956 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
6957 struct hclge_vport_vtag_rx_cfg_cmd *req;
6958 struct hclge_dev *hdev = vport->back;
6959 struct hclge_desc desc;
6962 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
6964 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
6965 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
6966 vcfg->strip_tag1_en ? 1 : 0);
6967 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
6968 vcfg->strip_tag2_en ? 1 : 0);
6969 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
6970 vcfg->vlan1_vlan_prionly ? 1 : 0);
6971 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
6972 vcfg->vlan2_vlan_prionly ? 1 : 0);
6974 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
6975 req->vf_bitmap[req->vf_offset] =
6976 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
6978 status = hclge_cmd_send(&hdev->hw, &desc, 1);
6980 dev_err(&hdev->pdev->dev,
6981 "Send port rxvlan cfg command fail, ret =%d\n",
6987 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
6988 u16 port_base_vlan_state,
6993 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
6994 vport->txvlan_cfg.accept_tag1 = true;
6995 vport->txvlan_cfg.insert_tag1_en = false;
6996 vport->txvlan_cfg.default_tag1 = 0;
6998 vport->txvlan_cfg.accept_tag1 = false;
6999 vport->txvlan_cfg.insert_tag1_en = true;
7000 vport->txvlan_cfg.default_tag1 = vlan_tag;
7003 vport->txvlan_cfg.accept_untag1 = true;
7005 /* accept_tag2 and accept_untag2 are not supported on
7006 * pdev revision(0x20), new revision support them,
7007 * this two fields can not be configured by user.
7009 vport->txvlan_cfg.accept_tag2 = true;
7010 vport->txvlan_cfg.accept_untag2 = true;
7011 vport->txvlan_cfg.insert_tag2_en = false;
7012 vport->txvlan_cfg.default_tag2 = 0;
7014 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7015 vport->rxvlan_cfg.strip_tag1_en = false;
7016 vport->rxvlan_cfg.strip_tag2_en =
7017 vport->rxvlan_cfg.rx_vlan_offload_en;
7019 vport->rxvlan_cfg.strip_tag1_en =
7020 vport->rxvlan_cfg.rx_vlan_offload_en;
7021 vport->rxvlan_cfg.strip_tag2_en = true;
7023 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7024 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7026 ret = hclge_set_vlan_tx_offload_cfg(vport);
7030 return hclge_set_vlan_rx_offload_cfg(vport);
7033 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
7035 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
7036 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
7037 struct hclge_desc desc;
7040 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
7041 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
7042 rx_req->ot_fst_vlan_type =
7043 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
7044 rx_req->ot_sec_vlan_type =
7045 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
7046 rx_req->in_fst_vlan_type =
7047 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
7048 rx_req->in_sec_vlan_type =
7049 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
7051 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7053 dev_err(&hdev->pdev->dev,
7054 "Send rxvlan protocol type command fail, ret =%d\n",
7059 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
7061 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
7062 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
7063 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
7065 status = hclge_cmd_send(&hdev->hw, &desc, 1);
7067 dev_err(&hdev->pdev->dev,
7068 "Send txvlan protocol type command fail, ret =%d\n",
7074 static int hclge_init_vlan_config(struct hclge_dev *hdev)
7076 #define HCLGE_DEF_VLAN_TYPE 0x8100
7078 struct hnae3_handle *handle = &hdev->vport[0].nic;
7079 struct hclge_vport *vport;
7083 if (hdev->pdev->revision >= 0x21) {
7084 /* for revision 0x21, vf vlan filter is per function */
7085 for (i = 0; i < hdev->num_alloc_vport; i++) {
7086 vport = &hdev->vport[i];
7087 ret = hclge_set_vlan_filter_ctrl(hdev,
7088 HCLGE_FILTER_TYPE_VF,
7089 HCLGE_FILTER_FE_EGRESS,
7096 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7097 HCLGE_FILTER_FE_INGRESS, true,
7102 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7103 HCLGE_FILTER_FE_EGRESS_V1_B,
7109 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7111 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7112 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7113 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
7114 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
7115 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
7116 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
7118 ret = hclge_set_vlan_protocol_type(hdev);
7122 for (i = 0; i < hdev->num_alloc_vport; i++) {
7125 vport = &hdev->vport[i];
7126 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
7128 ret = hclge_vlan_offload_cfg(vport,
7129 vport->port_base_vlan_cfg.state,
7135 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
7138 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7141 struct hclge_vport_vlan_cfg *vlan;
7143 /* vlan 0 is reserved */
7147 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
7151 vlan->hd_tbl_status = writen_to_tbl;
7152 vlan->vlan_id = vlan_id;
7154 list_add_tail(&vlan->node, &vport->vlan_list);
7157 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
7159 struct hclge_vport_vlan_cfg *vlan, *tmp;
7160 struct hclge_dev *hdev = vport->back;
7163 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7164 if (!vlan->hd_tbl_status) {
7165 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
7167 vlan->vlan_id, 0, false);
7169 dev_err(&hdev->pdev->dev,
7170 "restore vport vlan list failed, ret=%d\n",
7175 vlan->hd_tbl_status = true;
7181 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
7184 struct hclge_vport_vlan_cfg *vlan, *tmp;
7185 struct hclge_dev *hdev = vport->back;
7187 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7188 if (vlan->vlan_id == vlan_id) {
7189 if (is_write_tbl && vlan->hd_tbl_status)
7190 hclge_set_vlan_filter_hw(hdev,
7196 list_del(&vlan->node);
7203 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
7205 struct hclge_vport_vlan_cfg *vlan, *tmp;
7206 struct hclge_dev *hdev = vport->back;
7208 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7209 if (vlan->hd_tbl_status)
7210 hclge_set_vlan_filter_hw(hdev,
7216 vlan->hd_tbl_status = false;
7218 list_del(&vlan->node);
7224 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
7226 struct hclge_vport_vlan_cfg *vlan, *tmp;
7227 struct hclge_vport *vport;
7230 mutex_lock(&hdev->vport_cfg_mutex);
7231 for (i = 0; i < hdev->num_alloc_vport; i++) {
7232 vport = &hdev->vport[i];
7233 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
7234 list_del(&vlan->node);
7238 mutex_unlock(&hdev->vport_cfg_mutex);
7241 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
7243 struct hclge_vport *vport = hclge_get_vport(handle);
7245 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7246 vport->rxvlan_cfg.strip_tag1_en = false;
7247 vport->rxvlan_cfg.strip_tag2_en = enable;
7249 vport->rxvlan_cfg.strip_tag1_en = enable;
7250 vport->rxvlan_cfg.strip_tag2_en = true;
7252 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
7253 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
7254 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
7256 return hclge_set_vlan_rx_offload_cfg(vport);
7259 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
7260 u16 port_base_vlan_state,
7261 struct hclge_vlan_info *new_info,
7262 struct hclge_vlan_info *old_info)
7264 struct hclge_dev *hdev = vport->back;
7267 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
7268 hclge_rm_vport_all_vlan_table(vport, false);
7269 return hclge_set_vlan_filter_hw(hdev,
7270 htons(new_info->vlan_proto),
7273 new_info->qos, false);
7276 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
7277 vport->vport_id, old_info->vlan_tag,
7278 old_info->qos, true);
7282 return hclge_add_vport_all_vlan_table(vport);
7285 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
7286 struct hclge_vlan_info *vlan_info)
7288 struct hnae3_handle *nic = &vport->nic;
7289 struct hclge_vlan_info *old_vlan_info;
7290 struct hclge_dev *hdev = vport->back;
7293 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
7295 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
7299 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
7300 /* add new VLAN tag */
7301 ret = hclge_set_vlan_filter_hw(hdev,
7302 htons(vlan_info->vlan_proto),
7304 vlan_info->vlan_tag,
7305 vlan_info->qos, false);
7309 /* remove old VLAN tag */
7310 ret = hclge_set_vlan_filter_hw(hdev,
7311 htons(old_vlan_info->vlan_proto),
7313 old_vlan_info->vlan_tag,
7314 old_vlan_info->qos, true);
7321 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
7326 /* update state only when disable/enable port based VLAN */
7327 vport->port_base_vlan_cfg.state = state;
7328 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
7329 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
7331 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
7334 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
7335 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
7336 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
7341 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
7342 enum hnae3_port_base_vlan_state state,
7345 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7347 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7349 return HNAE3_PORT_BASE_VLAN_ENABLE;
7352 return HNAE3_PORT_BASE_VLAN_DISABLE;
7353 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
7354 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
7356 return HNAE3_PORT_BASE_VLAN_MODIFY;
7360 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
7361 u16 vlan, u8 qos, __be16 proto)
7363 struct hclge_vport *vport = hclge_get_vport(handle);
7364 struct hclge_dev *hdev = vport->back;
7365 struct hclge_vlan_info vlan_info;
7369 if (hdev->pdev->revision == 0x20)
7372 /* qos is a 3 bits value, so can not be bigger than 7 */
7373 if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
7375 if (proto != htons(ETH_P_8021Q))
7376 return -EPROTONOSUPPORT;
7378 vport = &hdev->vport[vfid];
7379 state = hclge_get_port_base_vlan_state(vport,
7380 vport->port_base_vlan_cfg.state,
7382 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
7385 vlan_info.vlan_tag = vlan;
7386 vlan_info.qos = qos;
7387 vlan_info.vlan_proto = ntohs(proto);
7389 /* update port based VLAN for PF */
7391 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7392 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
7393 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7398 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
7399 return hclge_update_port_base_vlan_cfg(vport, state,
7402 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
7410 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
7411 u16 vlan_id, bool is_kill)
7413 struct hclge_vport *vport = hclge_get_vport(handle);
7414 struct hclge_dev *hdev = vport->back;
7415 bool writen_to_tbl = false;
7418 /* when port based VLAN enabled, we use port based VLAN as the VLAN
7419 * filter entry. In this case, we don't update VLAN filter table
7420 * when user add new VLAN or remove exist VLAN, just update the vport
7421 * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
7422 * table until port based VLAN disabled
7424 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
7425 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
7426 vlan_id, 0, is_kill);
7427 writen_to_tbl = true;
7434 hclge_rm_vport_vlan_table(vport, vlan_id, false);
7436 hclge_add_vport_vlan_table(vport, vlan_id,
7442 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
7444 struct hclge_config_max_frm_size_cmd *req;
7445 struct hclge_desc desc;
7447 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
7449 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
7450 req->max_frm_size = cpu_to_le16(new_mps);
7451 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
7453 return hclge_cmd_send(&hdev->hw, &desc, 1);
7456 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
7458 struct hclge_vport *vport = hclge_get_vport(handle);
7460 return hclge_set_vport_mtu(vport, new_mtu);
7463 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
7465 struct hclge_dev *hdev = vport->back;
7466 int i, max_frm_size, ret = 0;
7468 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
7469 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
7470 max_frm_size > HCLGE_MAC_MAX_FRAME)
7473 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
7474 mutex_lock(&hdev->vport_lock);
7475 /* VF's mps must fit within hdev->mps */
7476 if (vport->vport_id && max_frm_size > hdev->mps) {
7477 mutex_unlock(&hdev->vport_lock);
7479 } else if (vport->vport_id) {
7480 vport->mps = max_frm_size;
7481 mutex_unlock(&hdev->vport_lock);
7485 /* PF's mps must be greater then VF's mps */
7486 for (i = 1; i < hdev->num_alloc_vport; i++)
7487 if (max_frm_size < hdev->vport[i].mps) {
7488 mutex_unlock(&hdev->vport_lock);
7492 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
7494 ret = hclge_set_mac_mtu(hdev, max_frm_size);
7496 dev_err(&hdev->pdev->dev,
7497 "Change mtu fail, ret =%d\n", ret);
7501 hdev->mps = max_frm_size;
7502 vport->mps = max_frm_size;
7504 ret = hclge_buffer_alloc(hdev);
7506 dev_err(&hdev->pdev->dev,
7507 "Allocate buffer fail, ret =%d\n", ret);
7510 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
7511 mutex_unlock(&hdev->vport_lock);
7515 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
7518 struct hclge_reset_tqp_queue_cmd *req;
7519 struct hclge_desc desc;
7522 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
7524 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7525 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7526 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
7528 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7530 dev_err(&hdev->pdev->dev,
7531 "Send tqp reset cmd error, status =%d\n", ret);
7538 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
7540 struct hclge_reset_tqp_queue_cmd *req;
7541 struct hclge_desc desc;
7544 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
7546 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
7547 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
7549 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7551 dev_err(&hdev->pdev->dev,
7552 "Get reset status error, status =%d\n", ret);
7556 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
7559 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
7561 struct hnae3_queue *queue;
7562 struct hclge_tqp *tqp;
7564 queue = handle->kinfo.tqp[queue_id];
7565 tqp = container_of(queue, struct hclge_tqp, q);
7570 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
7572 struct hclge_vport *vport = hclge_get_vport(handle);
7573 struct hclge_dev *hdev = vport->back;
7574 int reset_try_times = 0;
7579 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
7581 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
7583 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
7587 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7589 dev_err(&hdev->pdev->dev,
7590 "Send reset tqp cmd fail, ret = %d\n", ret);
7594 reset_try_times = 0;
7595 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7596 /* Wait for tqp hw reset */
7598 reset_status = hclge_get_reset_status(hdev, queue_gid);
7603 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7604 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
7608 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7610 dev_err(&hdev->pdev->dev,
7611 "Deassert the soft reset fail, ret = %d\n", ret);
7616 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
7618 struct hclge_dev *hdev = vport->back;
7619 int reset_try_times = 0;
7624 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
7626 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
7628 dev_warn(&hdev->pdev->dev,
7629 "Send reset tqp cmd fail, ret = %d\n", ret);
7633 reset_try_times = 0;
7634 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
7635 /* Wait for tqp hw reset */
7637 reset_status = hclge_get_reset_status(hdev, queue_gid);
7642 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
7643 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
7647 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
7649 dev_warn(&hdev->pdev->dev,
7650 "Deassert the soft reset fail, ret = %d\n", ret);
7653 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
7655 struct hclge_vport *vport = hclge_get_vport(handle);
7656 struct hclge_dev *hdev = vport->back;
7658 return hdev->fw_version;
7661 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7663 struct phy_device *phydev = hdev->hw.mac.phydev;
7668 phy_set_asym_pause(phydev, rx_en, tx_en);
7671 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
7676 hdev->fc_mode_last_time = HCLGE_FC_FULL;
7677 else if (rx_en && !tx_en)
7678 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
7679 else if (!rx_en && tx_en)
7680 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
7682 hdev->fc_mode_last_time = HCLGE_FC_NONE;
7684 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
7687 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
7689 dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n",
7694 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
7699 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
7701 struct phy_device *phydev = hdev->hw.mac.phydev;
7702 u16 remote_advertising = 0;
7703 u16 local_advertising = 0;
7704 u32 rx_pause, tx_pause;
7707 if (!phydev->link || !phydev->autoneg)
7710 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
7713 remote_advertising = LPA_PAUSE_CAP;
7715 if (phydev->asym_pause)
7716 remote_advertising |= LPA_PAUSE_ASYM;
7718 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
7719 remote_advertising);
7720 tx_pause = flowctl & FLOW_CTRL_TX;
7721 rx_pause = flowctl & FLOW_CTRL_RX;
7723 if (phydev->duplex == HCLGE_MAC_HALF) {
7728 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
7731 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
7732 u32 *rx_en, u32 *tx_en)
7734 struct hclge_vport *vport = hclge_get_vport(handle);
7735 struct hclge_dev *hdev = vport->back;
7737 *auto_neg = hclge_get_autoneg(handle);
7739 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
7745 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
7748 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
7751 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
7760 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
7761 u32 rx_en, u32 tx_en)
7763 struct hclge_vport *vport = hclge_get_vport(handle);
7764 struct hclge_dev *hdev = vport->back;
7765 struct phy_device *phydev = hdev->hw.mac.phydev;
7768 fc_autoneg = hclge_get_autoneg(handle);
7769 if (auto_neg != fc_autoneg) {
7770 dev_info(&hdev->pdev->dev,
7771 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
7775 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
7776 dev_info(&hdev->pdev->dev,
7777 "Priority flow control enabled. Cannot set link flow control.\n");
7781 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
7784 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
7787 return phy_start_aneg(phydev);
7789 if (hdev->pdev->revision == 0x20)
7792 return hclge_restart_autoneg(handle);
7795 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
7796 u8 *auto_neg, u32 *speed, u8 *duplex)
7798 struct hclge_vport *vport = hclge_get_vport(handle);
7799 struct hclge_dev *hdev = vport->back;
7802 *speed = hdev->hw.mac.speed;
7804 *duplex = hdev->hw.mac.duplex;
7806 *auto_neg = hdev->hw.mac.autoneg;
7809 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
7812 struct hclge_vport *vport = hclge_get_vport(handle);
7813 struct hclge_dev *hdev = vport->back;
7816 *media_type = hdev->hw.mac.media_type;
7819 *module_type = hdev->hw.mac.module_type;
7822 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
7823 u8 *tp_mdix_ctrl, u8 *tp_mdix)
7825 struct hclge_vport *vport = hclge_get_vport(handle);
7826 struct hclge_dev *hdev = vport->back;
7827 struct phy_device *phydev = hdev->hw.mac.phydev;
7828 int mdix_ctrl, mdix, retval, is_resolved;
7831 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7832 *tp_mdix = ETH_TP_MDI_INVALID;
7836 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
7838 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
7839 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
7840 HCLGE_PHY_MDIX_CTRL_S);
7842 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
7843 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
7844 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
7846 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
7848 switch (mdix_ctrl) {
7850 *tp_mdix_ctrl = ETH_TP_MDI;
7853 *tp_mdix_ctrl = ETH_TP_MDI_X;
7856 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
7859 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
7864 *tp_mdix = ETH_TP_MDI_INVALID;
7866 *tp_mdix = ETH_TP_MDI_X;
7868 *tp_mdix = ETH_TP_MDI;
7871 static void hclge_info_show(struct hclge_dev *hdev)
7873 struct device *dev = &hdev->pdev->dev;
7875 dev_info(dev, "PF info begin:\n");
7877 dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
7878 dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
7879 dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
7880 dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
7881 dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
7882 dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
7883 dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
7884 dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
7885 dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
7886 dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
7887 dev_info(dev, "This is %s PF\n",
7888 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
7889 dev_info(dev, "DCB %s\n",
7890 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
7891 dev_info(dev, "MQPRIO %s\n",
7892 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
7894 dev_info(dev, "PF info end.\n");
7897 static int hclge_init_client_instance(struct hnae3_client *client,
7898 struct hnae3_ae_dev *ae_dev)
7900 struct hclge_dev *hdev = ae_dev->priv;
7901 struct hclge_vport *vport;
7904 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
7905 vport = &hdev->vport[i];
7907 switch (client->type) {
7908 case HNAE3_CLIENT_KNIC:
7910 hdev->nic_client = client;
7911 vport->nic.client = client;
7912 ret = client->ops->init_instance(&vport->nic);
7916 hnae3_set_client_init_flag(client, ae_dev, 1);
7918 if (netif_msg_drv(&hdev->vport->nic))
7919 hclge_info_show(hdev);
7921 if (hdev->roce_client &&
7922 hnae3_dev_roce_supported(hdev)) {
7923 struct hnae3_client *rc = hdev->roce_client;
7925 ret = hclge_init_roce_base_info(vport);
7929 ret = rc->ops->init_instance(&vport->roce);
7933 hnae3_set_client_init_flag(hdev->roce_client,
7938 case HNAE3_CLIENT_UNIC:
7939 hdev->nic_client = client;
7940 vport->nic.client = client;
7942 ret = client->ops->init_instance(&vport->nic);
7946 hnae3_set_client_init_flag(client, ae_dev, 1);
7949 case HNAE3_CLIENT_ROCE:
7950 if (hnae3_dev_roce_supported(hdev)) {
7951 hdev->roce_client = client;
7952 vport->roce.client = client;
7955 if (hdev->roce_client && hdev->nic_client) {
7956 ret = hclge_init_roce_base_info(vport);
7960 ret = client->ops->init_instance(&vport->roce);
7964 hnae3_set_client_init_flag(client, ae_dev, 1);
7976 hdev->nic_client = NULL;
7977 vport->nic.client = NULL;
7980 hdev->roce_client = NULL;
7981 vport->roce.client = NULL;
7985 static void hclge_uninit_client_instance(struct hnae3_client *client,
7986 struct hnae3_ae_dev *ae_dev)
7988 struct hclge_dev *hdev = ae_dev->priv;
7989 struct hclge_vport *vport;
7992 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
7993 vport = &hdev->vport[i];
7994 if (hdev->roce_client) {
7995 hdev->roce_client->ops->uninit_instance(&vport->roce,
7997 hdev->roce_client = NULL;
7998 vport->roce.client = NULL;
8000 if (client->type == HNAE3_CLIENT_ROCE)
8002 if (hdev->nic_client && client->ops->uninit_instance) {
8003 client->ops->uninit_instance(&vport->nic, 0);
8004 hdev->nic_client = NULL;
8005 vport->nic.client = NULL;
8010 static int hclge_pci_init(struct hclge_dev *hdev)
8012 struct pci_dev *pdev = hdev->pdev;
8013 struct hclge_hw *hw;
8016 ret = pci_enable_device(pdev);
8018 dev_err(&pdev->dev, "failed to enable PCI device\n");
8022 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
8024 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
8027 "can't set consistent PCI DMA");
8028 goto err_disable_device;
8030 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
8033 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
8035 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
8036 goto err_disable_device;
8039 pci_set_master(pdev);
8041 hw->io_base = pcim_iomap(pdev, 2, 0);
8043 dev_err(&pdev->dev, "Can't map configuration register space\n");
8045 goto err_clr_master;
8048 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
8052 pci_clear_master(pdev);
8053 pci_release_regions(pdev);
8055 pci_disable_device(pdev);
8060 static void hclge_pci_uninit(struct hclge_dev *hdev)
8062 struct pci_dev *pdev = hdev->pdev;
8064 pcim_iounmap(pdev, hdev->hw.io_base);
8065 pci_free_irq_vectors(pdev);
8066 pci_clear_master(pdev);
8067 pci_release_mem_regions(pdev);
8068 pci_disable_device(pdev);
8071 static void hclge_state_init(struct hclge_dev *hdev)
8073 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
8074 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8075 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
8076 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
8077 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
8078 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
8081 static void hclge_state_uninit(struct hclge_dev *hdev)
8083 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8085 if (hdev->service_timer.function)
8086 del_timer_sync(&hdev->service_timer);
8087 if (hdev->reset_timer.function)
8088 del_timer_sync(&hdev->reset_timer);
8089 if (hdev->service_task.func)
8090 cancel_work_sync(&hdev->service_task);
8091 if (hdev->rst_service_task.func)
8092 cancel_work_sync(&hdev->rst_service_task);
8093 if (hdev->mbx_service_task.func)
8094 cancel_work_sync(&hdev->mbx_service_task);
8097 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
8099 #define HCLGE_FLR_WAIT_MS 100
8100 #define HCLGE_FLR_WAIT_CNT 50
8101 struct hclge_dev *hdev = ae_dev->priv;
8104 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
8105 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8106 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
8107 hclge_reset_event(hdev->pdev, NULL);
8109 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
8110 cnt++ < HCLGE_FLR_WAIT_CNT)
8111 msleep(HCLGE_FLR_WAIT_MS);
8113 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
8114 dev_err(&hdev->pdev->dev,
8115 "flr wait down timeout: %d\n", cnt);
8118 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
8120 struct hclge_dev *hdev = ae_dev->priv;
8122 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
8125 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
8127 struct pci_dev *pdev = ae_dev->pdev;
8128 struct hclge_dev *hdev;
8131 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
8138 hdev->ae_dev = ae_dev;
8139 hdev->reset_type = HNAE3_NONE_RESET;
8140 hdev->reset_level = HNAE3_FUNC_RESET;
8141 ae_dev->priv = hdev;
8142 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8144 mutex_init(&hdev->vport_lock);
8145 mutex_init(&hdev->vport_cfg_mutex);
8147 ret = hclge_pci_init(hdev);
8149 dev_err(&pdev->dev, "PCI init failed\n");
8153 /* Firmware command queue initialize */
8154 ret = hclge_cmd_queue_init(hdev);
8156 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
8157 goto err_pci_uninit;
8160 /* Firmware command initialize */
8161 ret = hclge_cmd_init(hdev);
8163 goto err_cmd_uninit;
8165 ret = hclge_get_cap(hdev);
8167 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
8169 goto err_cmd_uninit;
8172 ret = hclge_configure(hdev);
8174 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
8175 goto err_cmd_uninit;
8178 ret = hclge_init_msi(hdev);
8180 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
8181 goto err_cmd_uninit;
8184 ret = hclge_misc_irq_init(hdev);
8187 "Misc IRQ(vector0) init error, ret = %d.\n",
8189 goto err_msi_uninit;
8192 ret = hclge_alloc_tqps(hdev);
8194 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
8195 goto err_msi_irq_uninit;
8198 ret = hclge_alloc_vport(hdev);
8200 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
8201 goto err_msi_irq_uninit;
8204 ret = hclge_map_tqp(hdev);
8206 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8207 goto err_msi_irq_uninit;
8210 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
8211 ret = hclge_mac_mdio_config(hdev);
8213 dev_err(&hdev->pdev->dev,
8214 "mdio config fail ret=%d\n", ret);
8215 goto err_msi_irq_uninit;
8219 ret = hclge_init_umv_space(hdev);
8221 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
8222 goto err_mdiobus_unreg;
8225 ret = hclge_mac_init(hdev);
8227 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8228 goto err_mdiobus_unreg;
8231 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8233 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8234 goto err_mdiobus_unreg;
8237 ret = hclge_config_gro(hdev, true);
8239 goto err_mdiobus_unreg;
8241 ret = hclge_init_vlan_config(hdev);
8243 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8244 goto err_mdiobus_unreg;
8247 ret = hclge_tm_schd_init(hdev);
8249 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
8250 goto err_mdiobus_unreg;
8253 hclge_rss_init_cfg(hdev);
8254 ret = hclge_rss_init_hw(hdev);
8256 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8257 goto err_mdiobus_unreg;
8260 ret = init_mgr_tbl(hdev);
8262 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
8263 goto err_mdiobus_unreg;
8266 ret = hclge_init_fd_config(hdev);
8269 "fd table init fail, ret=%d\n", ret);
8270 goto err_mdiobus_unreg;
8273 ret = hclge_hw_error_set_state(hdev, true);
8276 "fail(%d) to enable hw error interrupts\n", ret);
8277 goto err_mdiobus_unreg;
8280 INIT_KFIFO(hdev->mac_tnl_log);
8282 hclge_dcb_ops_set(hdev);
8284 timer_setup(&hdev->service_timer, hclge_service_timer, 0);
8285 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
8286 INIT_WORK(&hdev->service_task, hclge_service_task);
8287 INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
8288 INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
8290 hclge_clear_all_event_cause(hdev);
8292 /* Enable MISC vector(vector0) */
8293 hclge_enable_vector(&hdev->misc_vector, true);
8295 hclge_state_init(hdev);
8296 hdev->last_reset_time = jiffies;
8298 pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
8302 if (hdev->hw.mac.phydev)
8303 mdiobus_unregister(hdev->hw.mac.mdio_bus);
8305 hclge_misc_irq_uninit(hdev);
8307 pci_free_irq_vectors(pdev);
8309 hclge_cmd_uninit(hdev);
8311 pcim_iounmap(pdev, hdev->hw.io_base);
8312 pci_clear_master(pdev);
8313 pci_release_regions(pdev);
8314 pci_disable_device(pdev);
8319 static void hclge_stats_clear(struct hclge_dev *hdev)
8321 memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
8324 static void hclge_reset_vport_state(struct hclge_dev *hdev)
8326 struct hclge_vport *vport = hdev->vport;
8329 for (i = 0; i < hdev->num_alloc_vport; i++) {
8330 hclge_vport_stop(vport);
8335 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
8337 struct hclge_dev *hdev = ae_dev->priv;
8338 struct pci_dev *pdev = ae_dev->pdev;
8341 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8343 hclge_stats_clear(hdev);
8344 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
8346 ret = hclge_cmd_init(hdev);
8348 dev_err(&pdev->dev, "Cmd queue init failed\n");
8352 ret = hclge_map_tqp(hdev);
8354 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
8358 hclge_reset_umv_space(hdev);
8360 ret = hclge_mac_init(hdev);
8362 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
8366 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
8368 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
8372 ret = hclge_config_gro(hdev, true);
8376 ret = hclge_init_vlan_config(hdev);
8378 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
8382 ret = hclge_tm_init_hw(hdev, true);
8384 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
8388 ret = hclge_rss_init_hw(hdev);
8390 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
8394 ret = hclge_init_fd_config(hdev);
8397 "fd table init fail, ret=%d\n", ret);
8401 /* Re-enable the hw error interrupts because
8402 * the interrupts get disabled on core/global reset.
8404 ret = hclge_hw_error_set_state(hdev, true);
8407 "fail(%d) to re-enable HNS hw error interrupts\n", ret);
8411 hclge_reset_vport_state(hdev);
8413 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
8419 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
8421 struct hclge_dev *hdev = ae_dev->priv;
8422 struct hclge_mac *mac = &hdev->hw.mac;
8424 hclge_state_uninit(hdev);
8427 mdiobus_unregister(mac->mdio_bus);
8429 hclge_uninit_umv_space(hdev);
8431 /* Disable MISC vector(vector0) */
8432 hclge_enable_vector(&hdev->misc_vector, false);
8433 synchronize_irq(hdev->misc_vector.vector_irq);
8435 hclge_config_mac_tnl_int(hdev, false);
8436 hclge_hw_error_set_state(hdev, false);
8437 hclge_cmd_uninit(hdev);
8438 hclge_misc_irq_uninit(hdev);
8439 hclge_pci_uninit(hdev);
8440 mutex_destroy(&hdev->vport_lock);
8441 hclge_uninit_vport_mac_table(hdev);
8442 hclge_uninit_vport_vlan_table(hdev);
8443 mutex_destroy(&hdev->vport_cfg_mutex);
8444 ae_dev->priv = NULL;
8447 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
8449 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8450 struct hclge_vport *vport = hclge_get_vport(handle);
8451 struct hclge_dev *hdev = vport->back;
8453 return min_t(u32, hdev->rss_size_max,
8454 vport->alloc_tqps / kinfo->num_tc);
8457 static void hclge_get_channels(struct hnae3_handle *handle,
8458 struct ethtool_channels *ch)
8460 ch->max_combined = hclge_get_max_channels(handle);
8461 ch->other_count = 1;
8463 ch->combined_count = handle->kinfo.rss_size;
8466 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
8467 u16 *alloc_tqps, u16 *max_rss_size)
8469 struct hclge_vport *vport = hclge_get_vport(handle);
8470 struct hclge_dev *hdev = vport->back;
8472 *alloc_tqps = vport->alloc_tqps;
8473 *max_rss_size = hdev->rss_size_max;
8476 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
8477 bool rxfh_configured)
8479 struct hclge_vport *vport = hclge_get_vport(handle);
8480 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
8481 struct hclge_dev *hdev = vport->back;
8482 int cur_rss_size = kinfo->rss_size;
8483 int cur_tqps = kinfo->num_tqps;
8484 u16 tc_offset[HCLGE_MAX_TC_NUM];
8485 u16 tc_valid[HCLGE_MAX_TC_NUM];
8486 u16 tc_size[HCLGE_MAX_TC_NUM];
8491 kinfo->req_rss_size = new_tqps_num;
8493 ret = hclge_tm_vport_map_update(hdev);
8495 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
8499 roundup_size = roundup_pow_of_two(kinfo->rss_size);
8500 roundup_size = ilog2(roundup_size);
8501 /* Set the RSS TC mode according to the new RSS size */
8502 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
8505 if (!(hdev->hw_tc_map & BIT(i)))
8509 tc_size[i] = roundup_size;
8510 tc_offset[i] = kinfo->rss_size * i;
8512 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
8516 /* RSS indirection table has been configuared by user */
8517 if (rxfh_configured)
8520 /* Reinitializes the rss indirect table according to the new RSS size */
8521 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
8525 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
8526 rss_indir[i] = i % kinfo->rss_size;
8528 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
8530 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
8537 dev_info(&hdev->pdev->dev,
8538 "Channels changed, rss_size from %d to %d, tqps from %d to %d",
8539 cur_rss_size, kinfo->rss_size,
8540 cur_tqps, kinfo->rss_size * kinfo->num_tc);
8545 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
8546 u32 *regs_num_64_bit)
8548 struct hclge_desc desc;
8552 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
8553 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8555 dev_err(&hdev->pdev->dev,
8556 "Query register number cmd failed, ret = %d.\n", ret);
8560 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
8561 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
8563 total_num = *regs_num_32_bit + *regs_num_64_bit;
8570 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8573 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
8575 struct hclge_desc *desc;
8576 u32 *reg_val = data;
8585 cmd_num = DIV_ROUND_UP(regs_num + 2, HCLGE_32_BIT_REG_RTN_DATANUM);
8586 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8590 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
8591 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8593 dev_err(&hdev->pdev->dev,
8594 "Query 32 bit register cmd failed, ret = %d.\n", ret);
8599 for (i = 0; i < cmd_num; i++) {
8601 desc_data = (__le32 *)(&desc[i].data[0]);
8602 n = HCLGE_32_BIT_REG_RTN_DATANUM - 2;
8604 desc_data = (__le32 *)(&desc[i]);
8605 n = HCLGE_32_BIT_REG_RTN_DATANUM;
8607 for (k = 0; k < n; k++) {
8608 *reg_val++ = le32_to_cpu(*desc_data++);
8620 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
8623 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
8625 struct hclge_desc *desc;
8626 u64 *reg_val = data;
8635 cmd_num = DIV_ROUND_UP(regs_num + 1, HCLGE_64_BIT_REG_RTN_DATANUM);
8636 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
8640 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
8641 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
8643 dev_err(&hdev->pdev->dev,
8644 "Query 64 bit register cmd failed, ret = %d.\n", ret);
8649 for (i = 0; i < cmd_num; i++) {
8651 desc_data = (__le64 *)(&desc[i].data[0]);
8652 n = HCLGE_64_BIT_REG_RTN_DATANUM - 1;
8654 desc_data = (__le64 *)(&desc[i]);
8655 n = HCLGE_64_BIT_REG_RTN_DATANUM;
8657 for (k = 0; k < n; k++) {
8658 *reg_val++ = le64_to_cpu(*desc_data++);
8670 #define MAX_SEPARATE_NUM 4
8671 #define SEPARATOR_VALUE 0xFFFFFFFF
8672 #define REG_NUM_PER_LINE 4
8673 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
8675 static int hclge_get_regs_len(struct hnae3_handle *handle)
8677 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
8678 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8679 struct hclge_vport *vport = hclge_get_vport(handle);
8680 struct hclge_dev *hdev = vport->back;
8681 u32 regs_num_32_bit, regs_num_64_bit;
8684 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
8686 dev_err(&hdev->pdev->dev,
8687 "Get register number failed, ret = %d.\n", ret);
8691 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
8692 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
8693 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
8694 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
8696 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
8697 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
8698 regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
8701 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
8704 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
8705 struct hclge_vport *vport = hclge_get_vport(handle);
8706 struct hclge_dev *hdev = vport->back;
8707 u32 regs_num_32_bit, regs_num_64_bit;
8708 int i, j, reg_um, separator_num;
8712 *version = hdev->fw_version;
8714 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
8716 dev_err(&hdev->pdev->dev,
8717 "Get register number failed, ret = %d.\n", ret);
8721 /* fetching per-PF registers valus from PF PCIe register space */
8722 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
8723 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8724 for (i = 0; i < reg_um; i++)
8725 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
8726 for (i = 0; i < separator_num; i++)
8727 *reg++ = SEPARATOR_VALUE;
8729 reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
8730 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8731 for (i = 0; i < reg_um; i++)
8732 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
8733 for (i = 0; i < separator_num; i++)
8734 *reg++ = SEPARATOR_VALUE;
8736 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
8737 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8738 for (j = 0; j < kinfo->num_tqps; j++) {
8739 for (i = 0; i < reg_um; i++)
8740 *reg++ = hclge_read_dev(&hdev->hw,
8741 ring_reg_addr_list[i] +
8743 for (i = 0; i < separator_num; i++)
8744 *reg++ = SEPARATOR_VALUE;
8747 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
8748 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
8749 for (j = 0; j < hdev->num_msi_used - 1; j++) {
8750 for (i = 0; i < reg_um; i++)
8751 *reg++ = hclge_read_dev(&hdev->hw,
8752 tqp_intr_reg_addr_list[i] +
8754 for (i = 0; i < separator_num; i++)
8755 *reg++ = SEPARATOR_VALUE;
8758 /* fetching PF common registers values from firmware */
8759 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
8761 dev_err(&hdev->pdev->dev,
8762 "Get 32 bit register failed, ret = %d.\n", ret);
8766 reg += regs_num_32_bit;
8767 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
8769 dev_err(&hdev->pdev->dev,
8770 "Get 64 bit register failed, ret = %d.\n", ret);
8773 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
8775 struct hclge_set_led_state_cmd *req;
8776 struct hclge_desc desc;
8779 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
8781 req = (struct hclge_set_led_state_cmd *)desc.data;
8782 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
8783 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
8785 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8787 dev_err(&hdev->pdev->dev,
8788 "Send set led state cmd error, ret =%d\n", ret);
8793 enum hclge_led_status {
8796 HCLGE_LED_NO_CHANGE = 0xFF,
8799 static int hclge_set_led_id(struct hnae3_handle *handle,
8800 enum ethtool_phys_id_state status)
8802 struct hclge_vport *vport = hclge_get_vport(handle);
8803 struct hclge_dev *hdev = vport->back;
8806 case ETHTOOL_ID_ACTIVE:
8807 return hclge_set_led_status(hdev, HCLGE_LED_ON);
8808 case ETHTOOL_ID_INACTIVE:
8809 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
8815 static void hclge_get_link_mode(struct hnae3_handle *handle,
8816 unsigned long *supported,
8817 unsigned long *advertising)
8819 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
8820 struct hclge_vport *vport = hclge_get_vport(handle);
8821 struct hclge_dev *hdev = vport->back;
8822 unsigned int idx = 0;
8824 for (; idx < size; idx++) {
8825 supported[idx] = hdev->hw.mac.supported[idx];
8826 advertising[idx] = hdev->hw.mac.advertising[idx];
8830 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
8832 struct hclge_vport *vport = hclge_get_vport(handle);
8833 struct hclge_dev *hdev = vport->back;
8835 return hclge_config_gro(hdev, enable);
8838 static const struct hnae3_ae_ops hclge_ops = {
8839 .init_ae_dev = hclge_init_ae_dev,
8840 .uninit_ae_dev = hclge_uninit_ae_dev,
8841 .flr_prepare = hclge_flr_prepare,
8842 .flr_done = hclge_flr_done,
8843 .init_client_instance = hclge_init_client_instance,
8844 .uninit_client_instance = hclge_uninit_client_instance,
8845 .map_ring_to_vector = hclge_map_ring_to_vector,
8846 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
8847 .get_vector = hclge_get_vector,
8848 .put_vector = hclge_put_vector,
8849 .set_promisc_mode = hclge_set_promisc_mode,
8850 .set_loopback = hclge_set_loopback,
8851 .start = hclge_ae_start,
8852 .stop = hclge_ae_stop,
8853 .client_start = hclge_client_start,
8854 .client_stop = hclge_client_stop,
8855 .get_status = hclge_get_status,
8856 .get_ksettings_an_result = hclge_get_ksettings_an_result,
8857 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
8858 .get_media_type = hclge_get_media_type,
8859 .check_port_speed = hclge_check_port_speed,
8860 .get_fec = hclge_get_fec,
8861 .set_fec = hclge_set_fec,
8862 .get_rss_key_size = hclge_get_rss_key_size,
8863 .get_rss_indir_size = hclge_get_rss_indir_size,
8864 .get_rss = hclge_get_rss,
8865 .set_rss = hclge_set_rss,
8866 .set_rss_tuple = hclge_set_rss_tuple,
8867 .get_rss_tuple = hclge_get_rss_tuple,
8868 .get_tc_size = hclge_get_tc_size,
8869 .get_mac_addr = hclge_get_mac_addr,
8870 .set_mac_addr = hclge_set_mac_addr,
8871 .do_ioctl = hclge_do_ioctl,
8872 .add_uc_addr = hclge_add_uc_addr,
8873 .rm_uc_addr = hclge_rm_uc_addr,
8874 .add_mc_addr = hclge_add_mc_addr,
8875 .rm_mc_addr = hclge_rm_mc_addr,
8876 .set_autoneg = hclge_set_autoneg,
8877 .get_autoneg = hclge_get_autoneg,
8878 .restart_autoneg = hclge_restart_autoneg,
8879 .get_pauseparam = hclge_get_pauseparam,
8880 .set_pauseparam = hclge_set_pauseparam,
8881 .set_mtu = hclge_set_mtu,
8882 .reset_queue = hclge_reset_tqp,
8883 .get_stats = hclge_get_stats,
8884 .get_mac_pause_stats = hclge_get_mac_pause_stat,
8885 .update_stats = hclge_update_stats,
8886 .get_strings = hclge_get_strings,
8887 .get_sset_count = hclge_get_sset_count,
8888 .get_fw_version = hclge_get_fw_version,
8889 .get_mdix_mode = hclge_get_mdix_mode,
8890 .enable_vlan_filter = hclge_enable_vlan_filter,
8891 .set_vlan_filter = hclge_set_vlan_filter,
8892 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
8893 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
8894 .reset_event = hclge_reset_event,
8895 .set_default_reset_request = hclge_set_def_reset_request,
8896 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
8897 .set_channels = hclge_set_channels,
8898 .get_channels = hclge_get_channels,
8899 .get_regs_len = hclge_get_regs_len,
8900 .get_regs = hclge_get_regs,
8901 .set_led_id = hclge_set_led_id,
8902 .get_link_mode = hclge_get_link_mode,
8903 .add_fd_entry = hclge_add_fd_entry,
8904 .del_fd_entry = hclge_del_fd_entry,
8905 .del_all_fd_entries = hclge_del_all_fd_entries,
8906 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
8907 .get_fd_rule_info = hclge_get_fd_rule_info,
8908 .get_fd_all_rules = hclge_get_all_rules,
8909 .restore_fd_rules = hclge_restore_fd_entries,
8910 .enable_fd = hclge_enable_fd,
8911 .dbg_run_cmd = hclge_dbg_run_cmd,
8912 .handle_hw_ras_error = hclge_handle_hw_ras_error,
8913 .get_hw_reset_stat = hclge_get_hw_reset_stat,
8914 .ae_dev_resetting = hclge_ae_dev_resetting,
8915 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
8916 .set_gro_en = hclge_gro_en,
8917 .get_global_queue_id = hclge_covert_handle_qid_global,
8918 .set_timer_task = hclge_set_timer_task,
8919 .mac_connect_phy = hclge_mac_connect_phy,
8920 .mac_disconnect_phy = hclge_mac_disconnect_phy,
8923 static struct hnae3_ae_algo ae_algo = {
8925 .pdev_id_table = ae_algo_pci_tbl,
8928 static int hclge_init(void)
8930 pr_info("%s is initializing\n", HCLGE_NAME);
8932 hnae3_register_ae_algo(&ae_algo);
8937 static void hclge_exit(void)
8939 hnae3_unregister_ae_algo(&ae_algo);
8941 module_init(hclge_init);
8942 module_exit(hclge_exit);
8944 MODULE_LICENSE("GPL");
8945 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
8946 MODULE_DESCRIPTION("HCLGE Driver");
8947 MODULE_VERSION(HCLGE_MOD_VERSION);