1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
16 #include <net/rtnetlink.h>
17 #include "hclge_cmd.h"
18 #include "hclge_dcb.h"
19 #include "hclge_main.h"
20 #include "hclge_mbx.h"
21 #include "hclge_mdio.h"
23 #include "hclge_err.h"
26 #define HCLGE_NAME "hclge"
27 #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
28 #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
30 #define HCLGE_BUF_SIZE_UNIT 256U
31 #define HCLGE_BUF_MUL_BY 2
32 #define HCLGE_BUF_DIV_BY 2
33 #define NEED_RESERVE_TC_NUM 2
34 #define BUF_MAX_PERCENT 100
35 #define BUF_RESERVE_PERCENT 90
37 #define HCLGE_RESET_MAX_FAIL_CNT 5
38 #define HCLGE_RESET_SYNC_TIME 100
39 #define HCLGE_PF_RESET_SYNC_TIME 20
40 #define HCLGE_PF_RESET_SYNC_CNT 1500
42 /* Get DFX BD number offset */
43 #define HCLGE_DFX_BIOS_BD_OFFSET 1
44 #define HCLGE_DFX_SSU_0_BD_OFFSET 2
45 #define HCLGE_DFX_SSU_1_BD_OFFSET 3
46 #define HCLGE_DFX_IGU_BD_OFFSET 4
47 #define HCLGE_DFX_RPU_0_BD_OFFSET 5
48 #define HCLGE_DFX_RPU_1_BD_OFFSET 6
49 #define HCLGE_DFX_NCSI_BD_OFFSET 7
50 #define HCLGE_DFX_RTC_BD_OFFSET 8
51 #define HCLGE_DFX_PPP_BD_OFFSET 9
52 #define HCLGE_DFX_RCB_BD_OFFSET 10
53 #define HCLGE_DFX_TQP_BD_OFFSET 11
54 #define HCLGE_DFX_SSU_2_BD_OFFSET 12
56 #define HCLGE_LINK_STATUS_MS 10
58 #define HCLGE_VF_VPORT_START_NUM 1
60 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
61 static int hclge_init_vlan_config(struct hclge_dev *hdev);
62 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
63 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
64 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
65 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
66 u16 *allocated_size, bool is_alloc);
67 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
68 static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
69 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
71 static int hclge_set_default_loopback(struct hclge_dev *hdev);
73 static struct hnae3_ae_algo ae_algo;
75 static struct workqueue_struct *hclge_wq;
77 static const struct pci_device_id ae_algo_pci_tbl[] = {
78 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
79 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
80 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
85 /* required last entry */
89 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
91 static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
92 HCLGE_CMDQ_TX_ADDR_H_REG,
93 HCLGE_CMDQ_TX_DEPTH_REG,
94 HCLGE_CMDQ_TX_TAIL_REG,
95 HCLGE_CMDQ_TX_HEAD_REG,
96 HCLGE_CMDQ_RX_ADDR_L_REG,
97 HCLGE_CMDQ_RX_ADDR_H_REG,
98 HCLGE_CMDQ_RX_DEPTH_REG,
99 HCLGE_CMDQ_RX_TAIL_REG,
100 HCLGE_CMDQ_RX_HEAD_REG,
101 HCLGE_VECTOR0_CMDQ_SRC_REG,
102 HCLGE_CMDQ_INTR_STS_REG,
103 HCLGE_CMDQ_INTR_EN_REG,
104 HCLGE_CMDQ_INTR_GEN_REG};
106 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
107 HCLGE_VECTOR0_OTER_EN_REG,
108 HCLGE_MISC_RESET_STS_REG,
109 HCLGE_MISC_VECTOR_INT_STS,
110 HCLGE_GLOBAL_RESET_REG,
114 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
115 HCLGE_RING_RX_ADDR_H_REG,
116 HCLGE_RING_RX_BD_NUM_REG,
117 HCLGE_RING_RX_BD_LENGTH_REG,
118 HCLGE_RING_RX_MERGE_EN_REG,
119 HCLGE_RING_RX_TAIL_REG,
120 HCLGE_RING_RX_HEAD_REG,
121 HCLGE_RING_RX_FBD_NUM_REG,
122 HCLGE_RING_RX_OFFSET_REG,
123 HCLGE_RING_RX_FBD_OFFSET_REG,
124 HCLGE_RING_RX_STASH_REG,
125 HCLGE_RING_RX_BD_ERR_REG,
126 HCLGE_RING_TX_ADDR_L_REG,
127 HCLGE_RING_TX_ADDR_H_REG,
128 HCLGE_RING_TX_BD_NUM_REG,
129 HCLGE_RING_TX_PRIORITY_REG,
130 HCLGE_RING_TX_TC_REG,
131 HCLGE_RING_TX_MERGE_EN_REG,
132 HCLGE_RING_TX_TAIL_REG,
133 HCLGE_RING_TX_HEAD_REG,
134 HCLGE_RING_TX_FBD_NUM_REG,
135 HCLGE_RING_TX_OFFSET_REG,
136 HCLGE_RING_TX_EBD_NUM_REG,
137 HCLGE_RING_TX_EBD_OFFSET_REG,
138 HCLGE_RING_TX_BD_ERR_REG,
141 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
142 HCLGE_TQP_INTR_GL0_REG,
143 HCLGE_TQP_INTR_GL1_REG,
144 HCLGE_TQP_INTR_GL2_REG,
145 HCLGE_TQP_INTR_RL_REG};
147 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
149 "Serdes serial Loopback test",
150 "Serdes parallel Loopback test",
154 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
155 {"mac_tx_mac_pause_num",
156 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
157 {"mac_rx_mac_pause_num",
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
159 {"mac_tx_control_pkt_num",
160 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
161 {"mac_rx_control_pkt_num",
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
163 {"mac_tx_pfc_pkt_num",
164 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
165 {"mac_tx_pfc_pri0_pkt_num",
166 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
167 {"mac_tx_pfc_pri1_pkt_num",
168 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
169 {"mac_tx_pfc_pri2_pkt_num",
170 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
171 {"mac_tx_pfc_pri3_pkt_num",
172 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
173 {"mac_tx_pfc_pri4_pkt_num",
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
175 {"mac_tx_pfc_pri5_pkt_num",
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
177 {"mac_tx_pfc_pri6_pkt_num",
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
179 {"mac_tx_pfc_pri7_pkt_num",
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
181 {"mac_rx_pfc_pkt_num",
182 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
183 {"mac_rx_pfc_pri0_pkt_num",
184 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
185 {"mac_rx_pfc_pri1_pkt_num",
186 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
187 {"mac_rx_pfc_pri2_pkt_num",
188 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
189 {"mac_rx_pfc_pri3_pkt_num",
190 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
191 {"mac_rx_pfc_pri4_pkt_num",
192 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
193 {"mac_rx_pfc_pri5_pkt_num",
194 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
195 {"mac_rx_pfc_pri6_pkt_num",
196 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
197 {"mac_rx_pfc_pri7_pkt_num",
198 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
199 {"mac_tx_total_pkt_num",
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
201 {"mac_tx_total_oct_num",
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
203 {"mac_tx_good_pkt_num",
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
205 {"mac_tx_bad_pkt_num",
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
207 {"mac_tx_good_oct_num",
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
209 {"mac_tx_bad_oct_num",
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
211 {"mac_tx_uni_pkt_num",
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
213 {"mac_tx_multi_pkt_num",
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
215 {"mac_tx_broad_pkt_num",
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
217 {"mac_tx_undersize_pkt_num",
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
219 {"mac_tx_oversize_pkt_num",
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
221 {"mac_tx_64_oct_pkt_num",
222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
223 {"mac_tx_65_127_oct_pkt_num",
224 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
225 {"mac_tx_128_255_oct_pkt_num",
226 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
227 {"mac_tx_256_511_oct_pkt_num",
228 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
229 {"mac_tx_512_1023_oct_pkt_num",
230 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
231 {"mac_tx_1024_1518_oct_pkt_num",
232 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
233 {"mac_tx_1519_2047_oct_pkt_num",
234 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
235 {"mac_tx_2048_4095_oct_pkt_num",
236 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
237 {"mac_tx_4096_8191_oct_pkt_num",
238 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
239 {"mac_tx_8192_9216_oct_pkt_num",
240 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
241 {"mac_tx_9217_12287_oct_pkt_num",
242 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
243 {"mac_tx_12288_16383_oct_pkt_num",
244 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
245 {"mac_tx_1519_max_good_pkt_num",
246 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
247 {"mac_tx_1519_max_bad_pkt_num",
248 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
249 {"mac_rx_total_pkt_num",
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
251 {"mac_rx_total_oct_num",
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
253 {"mac_rx_good_pkt_num",
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
255 {"mac_rx_bad_pkt_num",
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
257 {"mac_rx_good_oct_num",
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
259 {"mac_rx_bad_oct_num",
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
261 {"mac_rx_uni_pkt_num",
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
263 {"mac_rx_multi_pkt_num",
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
265 {"mac_rx_broad_pkt_num",
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
267 {"mac_rx_undersize_pkt_num",
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
269 {"mac_rx_oversize_pkt_num",
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
271 {"mac_rx_64_oct_pkt_num",
272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
273 {"mac_rx_65_127_oct_pkt_num",
274 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
275 {"mac_rx_128_255_oct_pkt_num",
276 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
277 {"mac_rx_256_511_oct_pkt_num",
278 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
279 {"mac_rx_512_1023_oct_pkt_num",
280 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
281 {"mac_rx_1024_1518_oct_pkt_num",
282 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
283 {"mac_rx_1519_2047_oct_pkt_num",
284 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
285 {"mac_rx_2048_4095_oct_pkt_num",
286 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
287 {"mac_rx_4096_8191_oct_pkt_num",
288 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
289 {"mac_rx_8192_9216_oct_pkt_num",
290 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
291 {"mac_rx_9217_12287_oct_pkt_num",
292 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
293 {"mac_rx_12288_16383_oct_pkt_num",
294 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
295 {"mac_rx_1519_max_good_pkt_num",
296 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
297 {"mac_rx_1519_max_bad_pkt_num",
298 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
300 {"mac_tx_fragment_pkt_num",
301 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
302 {"mac_tx_undermin_pkt_num",
303 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
304 {"mac_tx_jabber_pkt_num",
305 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
306 {"mac_tx_err_all_pkt_num",
307 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
308 {"mac_tx_from_app_good_pkt_num",
309 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
310 {"mac_tx_from_app_bad_pkt_num",
311 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
312 {"mac_rx_fragment_pkt_num",
313 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
314 {"mac_rx_undermin_pkt_num",
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
316 {"mac_rx_jabber_pkt_num",
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
318 {"mac_rx_fcs_err_pkt_num",
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
320 {"mac_rx_send_app_good_pkt_num",
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
322 {"mac_rx_send_app_bad_pkt_num",
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
326 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
328 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
329 .ethter_type = cpu_to_le16(ETH_P_LLDP),
330 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
331 .i_port_bitmap = 0x1,
335 static const u8 hclge_hash_key[] = {
336 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
337 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
338 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
339 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
340 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
343 static const u32 hclge_dfx_bd_offset_list[] = {
344 HCLGE_DFX_BIOS_BD_OFFSET,
345 HCLGE_DFX_SSU_0_BD_OFFSET,
346 HCLGE_DFX_SSU_1_BD_OFFSET,
347 HCLGE_DFX_IGU_BD_OFFSET,
348 HCLGE_DFX_RPU_0_BD_OFFSET,
349 HCLGE_DFX_RPU_1_BD_OFFSET,
350 HCLGE_DFX_NCSI_BD_OFFSET,
351 HCLGE_DFX_RTC_BD_OFFSET,
352 HCLGE_DFX_PPP_BD_OFFSET,
353 HCLGE_DFX_RCB_BD_OFFSET,
354 HCLGE_DFX_TQP_BD_OFFSET,
355 HCLGE_DFX_SSU_2_BD_OFFSET
358 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
359 HCLGE_OPC_DFX_BIOS_COMMON_REG,
360 HCLGE_OPC_DFX_SSU_REG_0,
361 HCLGE_OPC_DFX_SSU_REG_1,
362 HCLGE_OPC_DFX_IGU_EGU_REG,
363 HCLGE_OPC_DFX_RPU_REG_0,
364 HCLGE_OPC_DFX_RPU_REG_1,
365 HCLGE_OPC_DFX_NCSI_REG,
366 HCLGE_OPC_DFX_RTC_REG,
367 HCLGE_OPC_DFX_PPP_REG,
368 HCLGE_OPC_DFX_RCB_REG,
369 HCLGE_OPC_DFX_TQP_REG,
370 HCLGE_OPC_DFX_SSU_REG_2
373 static const struct key_info meta_data_key_info[] = {
374 { PACKET_TYPE_ID, 6},
384 static const struct key_info tuple_key_info[] = {
385 { OUTER_DST_MAC, 48},
386 { OUTER_SRC_MAC, 48},
387 { OUTER_VLAN_TAG_FST, 16},
388 { OUTER_VLAN_TAG_SEC, 16},
389 { OUTER_ETH_TYPE, 16},
392 { OUTER_IP_PROTO, 8},
396 { OUTER_SRC_PORT, 16},
397 { OUTER_DST_PORT, 16},
399 { OUTER_TUN_VNI, 24},
400 { OUTER_TUN_FLOW_ID, 8},
401 { INNER_DST_MAC, 48},
402 { INNER_SRC_MAC, 48},
403 { INNER_VLAN_TAG_FST, 16},
404 { INNER_VLAN_TAG_SEC, 16},
405 { INNER_ETH_TYPE, 16},
408 { INNER_IP_PROTO, 8},
412 { INNER_SRC_PORT, 16},
413 { INNER_DST_PORT, 16},
417 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
419 #define HCLGE_MAC_CMD_NUM 21
421 u64 *data = (u64 *)(&hdev->mac_stats);
422 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
427 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
428 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
430 dev_err(&hdev->pdev->dev,
431 "Get MAC pkt stats fail, status = %d.\n", ret);
436 for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
437 /* for special opcode 0032, only the first desc has the head */
438 if (unlikely(i == 0)) {
439 desc_data = (__le64 *)(&desc[i].data[0]);
440 n = HCLGE_RD_FIRST_STATS_NUM;
442 desc_data = (__le64 *)(&desc[i]);
443 n = HCLGE_RD_OTHER_STATS_NUM;
446 for (k = 0; k < n; k++) {
447 *data += le64_to_cpu(*desc_data);
456 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
458 u64 *data = (u64 *)(&hdev->mac_stats);
459 struct hclge_desc *desc;
464 /* This may be called inside atomic sections,
465 * so GFP_ATOMIC is more suitalbe here
467 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
471 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
472 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
478 for (i = 0; i < desc_num; i++) {
479 /* for special opcode 0034, only the first desc has the head */
481 desc_data = (__le64 *)(&desc[i].data[0]);
482 n = HCLGE_RD_FIRST_STATS_NUM;
484 desc_data = (__le64 *)(&desc[i]);
485 n = HCLGE_RD_OTHER_STATS_NUM;
488 for (k = 0; k < n; k++) {
489 *data += le64_to_cpu(*desc_data);
500 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
502 struct hclge_desc desc;
507 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
508 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
512 desc_data = (__le32 *)(&desc.data[0]);
513 reg_num = le32_to_cpu(*desc_data);
515 *desc_num = 1 + ((reg_num - 3) >> 2) +
516 (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
521 static int hclge_mac_update_stats(struct hclge_dev *hdev)
526 ret = hclge_mac_query_reg_num(hdev, &desc_num);
528 /* The firmware supports the new statistics acquisition method */
530 ret = hclge_mac_update_stats_complete(hdev, desc_num);
531 else if (ret == -EOPNOTSUPP)
532 ret = hclge_mac_update_stats_defective(hdev);
534 dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
539 static int hclge_tqps_update_stats(struct hnae3_handle *handle)
541 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
542 struct hclge_vport *vport = hclge_get_vport(handle);
543 struct hclge_dev *hdev = vport->back;
544 struct hnae3_queue *queue;
545 struct hclge_desc desc[1];
546 struct hclge_tqp *tqp;
549 for (i = 0; i < kinfo->num_tqps; i++) {
550 queue = handle->kinfo.tqp[i];
551 tqp = container_of(queue, struct hclge_tqp, q);
552 /* command : HCLGE_OPC_QUERY_IGU_STAT */
553 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATUS,
556 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
557 ret = hclge_cmd_send(&hdev->hw, desc, 1);
559 dev_err(&hdev->pdev->dev,
560 "Query tqp stat fail, status = %d,queue = %d\n",
564 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
565 le32_to_cpu(desc[0].data[1]);
568 for (i = 0; i < kinfo->num_tqps; i++) {
569 queue = handle->kinfo.tqp[i];
570 tqp = container_of(queue, struct hclge_tqp, q);
571 /* command : HCLGE_OPC_QUERY_IGU_STAT */
572 hclge_cmd_setup_basic_desc(&desc[0],
573 HCLGE_OPC_QUERY_TX_STATUS,
576 desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
577 ret = hclge_cmd_send(&hdev->hw, desc, 1);
579 dev_err(&hdev->pdev->dev,
580 "Query tqp stat fail, status = %d,queue = %d\n",
584 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
585 le32_to_cpu(desc[0].data[1]);
591 static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
593 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
594 struct hclge_tqp *tqp;
598 for (i = 0; i < kinfo->num_tqps; i++) {
599 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
600 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
603 for (i = 0; i < kinfo->num_tqps; i++) {
604 tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
605 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
611 static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
613 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
615 /* each tqp has TX & RX two queues */
616 return kinfo->num_tqps * (2);
619 static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
621 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
625 for (i = 0; i < kinfo->num_tqps; i++) {
626 struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
627 struct hclge_tqp, q);
628 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
630 buff = buff + ETH_GSTRING_LEN;
633 for (i = 0; i < kinfo->num_tqps; i++) {
634 struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
635 struct hclge_tqp, q);
636 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
638 buff = buff + ETH_GSTRING_LEN;
644 static u64 *hclge_comm_get_stats(const void *comm_stats,
645 const struct hclge_comm_stats_str strs[],
651 for (i = 0; i < size; i++)
652 buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
657 static u8 *hclge_comm_get_strings(u32 stringset,
658 const struct hclge_comm_stats_str strs[],
661 char *buff = (char *)data;
664 if (stringset != ETH_SS_STATS)
667 for (i = 0; i < size; i++) {
668 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
669 buff = buff + ETH_GSTRING_LEN;
675 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
677 struct hnae3_handle *handle;
680 handle = &hdev->vport[0].nic;
681 if (handle->client) {
682 status = hclge_tqps_update_stats(handle);
684 dev_err(&hdev->pdev->dev,
685 "Update TQPS stats fail, status = %d.\n",
690 status = hclge_mac_update_stats(hdev);
692 dev_err(&hdev->pdev->dev,
693 "Update MAC stats fail, status = %d.\n", status);
696 static void hclge_update_stats(struct hnae3_handle *handle,
697 struct net_device_stats *net_stats)
699 struct hclge_vport *vport = hclge_get_vport(handle);
700 struct hclge_dev *hdev = vport->back;
703 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
706 status = hclge_mac_update_stats(hdev);
708 dev_err(&hdev->pdev->dev,
709 "Update MAC stats fail, status = %d.\n",
712 status = hclge_tqps_update_stats(handle);
714 dev_err(&hdev->pdev->dev,
715 "Update TQPS stats fail, status = %d.\n",
718 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
721 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
723 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
724 HNAE3_SUPPORT_PHY_LOOPBACK |\
725 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
726 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
728 struct hclge_vport *vport = hclge_get_vport(handle);
729 struct hclge_dev *hdev = vport->back;
732 /* Loopback test support rules:
733 * mac: only GE mode support
734 * serdes: all mac mode will support include GE/XGE/LGE/CGE
735 * phy: only support when phy device exist on board
737 if (stringset == ETH_SS_TEST) {
738 /* clear loopback bit flags at first */
739 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
740 if (hdev->pdev->revision >= 0x21 ||
741 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
742 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
743 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
745 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
749 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
750 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
752 if (hdev->hw.mac.phydev) {
754 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
757 } else if (stringset == ETH_SS_STATS) {
758 count = ARRAY_SIZE(g_mac_stats_string) +
759 hclge_tqps_get_sset_count(handle, stringset);
765 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
768 u8 *p = (char *)data;
771 if (stringset == ETH_SS_STATS) {
772 size = ARRAY_SIZE(g_mac_stats_string);
773 p = hclge_comm_get_strings(stringset, g_mac_stats_string,
775 p = hclge_tqps_get_strings(handle, p);
776 } else if (stringset == ETH_SS_TEST) {
777 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
778 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
780 p += ETH_GSTRING_LEN;
782 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
783 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
785 p += ETH_GSTRING_LEN;
787 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
789 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
791 p += ETH_GSTRING_LEN;
793 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
794 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
796 p += ETH_GSTRING_LEN;
801 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
803 struct hclge_vport *vport = hclge_get_vport(handle);
804 struct hclge_dev *hdev = vport->back;
807 p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
808 ARRAY_SIZE(g_mac_stats_string), data);
809 p = hclge_tqps_get_stats(handle, p);
812 static void hclge_get_mac_stat(struct hnae3_handle *handle,
813 struct hns3_mac_stats *mac_stats)
815 struct hclge_vport *vport = hclge_get_vport(handle);
816 struct hclge_dev *hdev = vport->back;
818 hclge_update_stats(handle, NULL);
820 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
821 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
824 static int hclge_parse_func_status(struct hclge_dev *hdev,
825 struct hclge_func_status_cmd *status)
827 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
830 /* Set the pf to main pf */
831 if (status->pf_state & HCLGE_PF_STATE_MAIN)
832 hdev->flag |= HCLGE_FLAG_MAIN;
834 hdev->flag &= ~HCLGE_FLAG_MAIN;
839 static int hclge_query_function_status(struct hclge_dev *hdev)
841 #define HCLGE_QUERY_MAX_CNT 5
843 struct hclge_func_status_cmd *req;
844 struct hclge_desc desc;
848 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
849 req = (struct hclge_func_status_cmd *)desc.data;
852 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
854 dev_err(&hdev->pdev->dev,
855 "query function status failed %d.\n", ret);
859 /* Check pf reset is done */
862 usleep_range(1000, 2000);
863 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
865 ret = hclge_parse_func_status(hdev, req);
870 static int hclge_query_pf_resource(struct hclge_dev *hdev)
872 struct hclge_pf_res_cmd *req;
873 struct hclge_desc desc;
876 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
877 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
879 dev_err(&hdev->pdev->dev,
880 "query pf resource failed %d.\n", ret);
884 req = (struct hclge_pf_res_cmd *)desc.data;
885 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
886 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
888 if (req->tx_buf_size)
890 __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
892 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
894 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
896 if (req->dv_buf_size)
898 __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
900 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
902 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
904 if (hnae3_dev_roce_supported(hdev)) {
905 hdev->roce_base_msix_offset =
906 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
907 HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
909 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
910 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
912 /* nic's msix numbers is always equals to the roce's. */
913 hdev->num_nic_msi = hdev->num_roce_msi;
915 /* PF should have NIC vectors and Roce vectors,
916 * NIC vectors are queued before Roce vectors.
918 hdev->num_msi = hdev->num_roce_msi +
919 hdev->roce_base_msix_offset;
922 hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
923 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
925 hdev->num_nic_msi = hdev->num_msi;
928 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
929 dev_err(&hdev->pdev->dev,
930 "Just %u msi resources, not enough for pf(min:2).\n",
938 static int hclge_parse_speed(int speed_cmd, int *speed)
942 *speed = HCLGE_MAC_SPEED_10M;
945 *speed = HCLGE_MAC_SPEED_100M;
948 *speed = HCLGE_MAC_SPEED_1G;
951 *speed = HCLGE_MAC_SPEED_10G;
954 *speed = HCLGE_MAC_SPEED_25G;
957 *speed = HCLGE_MAC_SPEED_40G;
960 *speed = HCLGE_MAC_SPEED_50G;
963 *speed = HCLGE_MAC_SPEED_100G;
972 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
974 struct hclge_vport *vport = hclge_get_vport(handle);
975 struct hclge_dev *hdev = vport->back;
976 u32 speed_ability = hdev->hw.mac.speed_ability;
980 case HCLGE_MAC_SPEED_10M:
981 speed_bit = HCLGE_SUPPORT_10M_BIT;
983 case HCLGE_MAC_SPEED_100M:
984 speed_bit = HCLGE_SUPPORT_100M_BIT;
986 case HCLGE_MAC_SPEED_1G:
987 speed_bit = HCLGE_SUPPORT_1G_BIT;
989 case HCLGE_MAC_SPEED_10G:
990 speed_bit = HCLGE_SUPPORT_10G_BIT;
992 case HCLGE_MAC_SPEED_25G:
993 speed_bit = HCLGE_SUPPORT_25G_BIT;
995 case HCLGE_MAC_SPEED_40G:
996 speed_bit = HCLGE_SUPPORT_40G_BIT;
998 case HCLGE_MAC_SPEED_50G:
999 speed_bit = HCLGE_SUPPORT_50G_BIT;
1001 case HCLGE_MAC_SPEED_100G:
1002 speed_bit = HCLGE_SUPPORT_100G_BIT;
1008 if (speed_bit & speed_ability)
1014 static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
1016 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1017 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1019 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1020 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1022 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1023 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1025 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1026 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1028 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1029 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1033 static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
1035 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1036 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1038 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1039 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1041 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1042 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1044 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1045 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1047 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1048 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1052 static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
1054 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1055 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1057 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1058 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1060 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1061 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1063 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1064 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1066 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1067 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1071 static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
1073 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1074 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1076 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1077 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1079 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1080 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1082 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1083 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1085 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1086 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1088 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1089 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1093 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1095 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1096 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1098 switch (mac->speed) {
1099 case HCLGE_MAC_SPEED_10G:
1100 case HCLGE_MAC_SPEED_40G:
1101 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1104 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1106 case HCLGE_MAC_SPEED_25G:
1107 case HCLGE_MAC_SPEED_50G:
1108 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1111 BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1112 BIT(HNAE3_FEC_AUTO);
1114 case HCLGE_MAC_SPEED_100G:
1115 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1116 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1119 mac->fec_ability = 0;
1124 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1127 struct hclge_mac *mac = &hdev->hw.mac;
1129 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1130 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1133 hclge_convert_setting_sr(mac, speed_ability);
1134 hclge_convert_setting_lr(mac, speed_ability);
1135 hclge_convert_setting_cr(mac, speed_ability);
1136 if (hdev->pdev->revision >= 0x21)
1137 hclge_convert_setting_fec(mac);
1139 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1140 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1141 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1144 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1147 struct hclge_mac *mac = &hdev->hw.mac;
1149 hclge_convert_setting_kr(mac, speed_ability);
1150 if (hdev->pdev->revision >= 0x21)
1151 hclge_convert_setting_fec(mac);
1152 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1153 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1154 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1157 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1160 unsigned long *supported = hdev->hw.mac.supported;
1162 /* default to support all speed for GE port */
1164 speed_ability = HCLGE_SUPPORT_GE;
1166 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1167 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1170 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1171 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1173 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1177 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1178 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1179 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1182 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1183 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1184 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1185 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1188 static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
1190 u8 media_type = hdev->hw.mac.media_type;
1192 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1193 hclge_parse_fiber_link_mode(hdev, speed_ability);
1194 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1195 hclge_parse_copper_link_mode(hdev, speed_ability);
1196 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1197 hclge_parse_backplane_link_mode(hdev, speed_ability);
1200 static u32 hclge_get_max_speed(u8 speed_ability)
1202 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1203 return HCLGE_MAC_SPEED_100G;
1205 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1206 return HCLGE_MAC_SPEED_50G;
1208 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1209 return HCLGE_MAC_SPEED_40G;
1211 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1212 return HCLGE_MAC_SPEED_25G;
1214 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1215 return HCLGE_MAC_SPEED_10G;
1217 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1218 return HCLGE_MAC_SPEED_1G;
1220 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1221 return HCLGE_MAC_SPEED_100M;
1223 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1224 return HCLGE_MAC_SPEED_10M;
1226 return HCLGE_MAC_SPEED_1G;
1229 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1231 struct hclge_cfg_param_cmd *req;
1232 u64 mac_addr_tmp_high;
1236 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1238 /* get the configuration */
1239 cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1242 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1243 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1244 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1245 HCLGE_CFG_TQP_DESC_N_M,
1246 HCLGE_CFG_TQP_DESC_N_S);
1248 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1249 HCLGE_CFG_PHY_ADDR_M,
1250 HCLGE_CFG_PHY_ADDR_S);
1251 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1252 HCLGE_CFG_MEDIA_TP_M,
1253 HCLGE_CFG_MEDIA_TP_S);
1254 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1255 HCLGE_CFG_RX_BUF_LEN_M,
1256 HCLGE_CFG_RX_BUF_LEN_S);
1257 /* get mac_address */
1258 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1259 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1260 HCLGE_CFG_MAC_ADDR_H_M,
1261 HCLGE_CFG_MAC_ADDR_H_S);
1263 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1265 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1266 HCLGE_CFG_DEFAULT_SPEED_M,
1267 HCLGE_CFG_DEFAULT_SPEED_S);
1268 cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1269 HCLGE_CFG_RSS_SIZE_M,
1270 HCLGE_CFG_RSS_SIZE_S);
1272 for (i = 0; i < ETH_ALEN; i++)
1273 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1275 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1276 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1278 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1279 HCLGE_CFG_SPEED_ABILITY_M,
1280 HCLGE_CFG_SPEED_ABILITY_S);
1281 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1282 HCLGE_CFG_UMV_TBL_SPACE_M,
1283 HCLGE_CFG_UMV_TBL_SPACE_S);
1284 if (!cfg->umv_space)
1285 cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1288 /* hclge_get_cfg: query the static parameter from flash
1289 * @hdev: pointer to struct hclge_dev
1290 * @hcfg: the config structure to be getted
1292 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1294 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1295 struct hclge_cfg_param_cmd *req;
1299 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1302 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1303 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1305 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1306 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1307 /* Len should be united by 4 bytes when send to hardware */
1308 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1309 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1310 req->offset = cpu_to_le32(offset);
1313 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1315 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1319 hclge_parse_cfg(hcfg, desc);
1324 static int hclge_get_cap(struct hclge_dev *hdev)
1328 ret = hclge_query_function_status(hdev);
1330 dev_err(&hdev->pdev->dev,
1331 "query function status error %d.\n", ret);
1335 /* get pf resource */
1336 ret = hclge_query_pf_resource(hdev);
1338 dev_err(&hdev->pdev->dev, "query pf resource error %d.\n", ret);
1343 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1345 #define HCLGE_MIN_TX_DESC 64
1346 #define HCLGE_MIN_RX_DESC 64
1348 if (!is_kdump_kernel())
1351 dev_info(&hdev->pdev->dev,
1352 "Running kdump kernel. Using minimal resources\n");
1354 /* minimal queue pairs equals to the number of vports */
1355 hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1356 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1357 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1360 static int hclge_configure(struct hclge_dev *hdev)
1362 struct hclge_cfg cfg;
1366 ret = hclge_get_cfg(hdev, &cfg);
1368 dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret);
1372 hdev->num_vmdq_vport = cfg.vmdq_vport_num;
1373 hdev->base_tqp_pid = 0;
1374 hdev->rss_size_max = cfg.rss_size_max;
1375 hdev->rx_buf_len = cfg.rx_buf_len;
1376 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1377 hdev->hw.mac.media_type = cfg.media_type;
1378 hdev->hw.mac.phy_addr = cfg.phy_addr;
1379 hdev->num_tx_desc = cfg.tqp_desc_num;
1380 hdev->num_rx_desc = cfg.tqp_desc_num;
1381 hdev->tm_info.num_pg = 1;
1382 hdev->tc_max = cfg.tc_num;
1383 hdev->tm_info.hw_pfc_map = 0;
1384 hdev->wanted_umv_size = cfg.umv_space;
1386 if (hnae3_dev_fd_supported(hdev)) {
1388 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1391 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1393 dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret);
1397 hclge_parse_link_mode(hdev, cfg.speed_ability);
1399 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1401 if ((hdev->tc_max > HNAE3_MAX_TC) ||
1402 (hdev->tc_max < 1)) {
1403 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1408 /* Dev does not support DCB */
1409 if (!hnae3_dev_dcb_supported(hdev)) {
1413 hdev->pfc_max = hdev->tc_max;
1416 hdev->tm_info.num_tc = 1;
1418 /* Currently not support uncontiuous tc */
1419 for (i = 0; i < hdev->tm_info.num_tc; i++)
1420 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1422 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1424 hclge_init_kdump_kernel_config(hdev);
1426 /* Set the init affinity based on pci func number */
1427 i = cpumask_weight(cpumask_of_node(dev_to_node(&hdev->pdev->dev)));
1428 i = i ? PCI_FUNC(hdev->pdev->devfn) % i : 0;
1429 cpumask_set_cpu(cpumask_local_spread(i, dev_to_node(&hdev->pdev->dev)),
1430 &hdev->affinity_mask);
1435 static int hclge_config_tso(struct hclge_dev *hdev, unsigned int tso_mss_min,
1436 unsigned int tso_mss_max)
1438 struct hclge_cfg_tso_status_cmd *req;
1439 struct hclge_desc desc;
1442 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1444 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1447 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1448 HCLGE_TSO_MSS_MIN_S, tso_mss_min);
1449 req->tso_mss_min = cpu_to_le16(tso_mss);
1452 hnae3_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
1453 HCLGE_TSO_MSS_MIN_S, tso_mss_max);
1454 req->tso_mss_max = cpu_to_le16(tso_mss);
1456 return hclge_cmd_send(&hdev->hw, &desc, 1);
1459 static int hclge_config_gro(struct hclge_dev *hdev, bool en)
1461 struct hclge_cfg_gro_status_cmd *req;
1462 struct hclge_desc desc;
1465 if (!hnae3_dev_gro_supported(hdev))
1468 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1469 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1471 req->gro_en = cpu_to_le16(en ? 1 : 0);
1473 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1475 dev_err(&hdev->pdev->dev,
1476 "GRO hardware config cmd failed, ret = %d\n", ret);
1481 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1483 struct hclge_tqp *tqp;
1486 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1487 sizeof(struct hclge_tqp), GFP_KERNEL);
1493 for (i = 0; i < hdev->num_tqps; i++) {
1494 tqp->dev = &hdev->pdev->dev;
1497 tqp->q.ae_algo = &ae_algo;
1498 tqp->q.buf_size = hdev->rx_buf_len;
1499 tqp->q.tx_desc_num = hdev->num_tx_desc;
1500 tqp->q.rx_desc_num = hdev->num_rx_desc;
1501 tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
1502 i * HCLGE_TQP_REG_SIZE;
1510 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1511 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1513 struct hclge_tqp_map_cmd *req;
1514 struct hclge_desc desc;
1517 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1519 req = (struct hclge_tqp_map_cmd *)desc.data;
1520 req->tqp_id = cpu_to_le16(tqp_pid);
1521 req->tqp_vf = func_id;
1522 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1524 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1525 req->tqp_vid = cpu_to_le16(tqp_vid);
1527 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1529 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1534 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1536 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1537 struct hclge_dev *hdev = vport->back;
1540 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1541 alloced < num_tqps; i++) {
1542 if (!hdev->htqp[i].alloced) {
1543 hdev->htqp[i].q.handle = &vport->nic;
1544 hdev->htqp[i].q.tqp_index = alloced;
1545 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1546 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1547 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1548 hdev->htqp[i].alloced = true;
1552 vport->alloc_tqps = alloced;
1553 kinfo->rss_size = min_t(u16, hdev->rss_size_max,
1554 vport->alloc_tqps / hdev->tm_info.num_tc);
1556 /* ensure one to one mapping between irq and queue at default */
1557 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1558 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1563 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1564 u16 num_tx_desc, u16 num_rx_desc)
1567 struct hnae3_handle *nic = &vport->nic;
1568 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1569 struct hclge_dev *hdev = vport->back;
1572 kinfo->num_tx_desc = num_tx_desc;
1573 kinfo->num_rx_desc = num_rx_desc;
1575 kinfo->rx_buf_len = hdev->rx_buf_len;
1577 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1578 sizeof(struct hnae3_queue *), GFP_KERNEL);
1582 ret = hclge_assign_tqp(vport, num_tqps);
1584 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1589 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1590 struct hclge_vport *vport)
1592 struct hnae3_handle *nic = &vport->nic;
1593 struct hnae3_knic_private_info *kinfo;
1596 kinfo = &nic->kinfo;
1597 for (i = 0; i < vport->alloc_tqps; i++) {
1598 struct hclge_tqp *q =
1599 container_of(kinfo->tqp[i], struct hclge_tqp, q);
1603 is_pf = !(vport->vport_id);
1604 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1613 static int hclge_map_tqp(struct hclge_dev *hdev)
1615 struct hclge_vport *vport = hdev->vport;
1618 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1619 for (i = 0; i < num_vport; i++) {
1622 ret = hclge_map_tqp_to_vport(hdev, vport);
1632 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1634 struct hnae3_handle *nic = &vport->nic;
1635 struct hclge_dev *hdev = vport->back;
1638 nic->pdev = hdev->pdev;
1639 nic->ae_algo = &ae_algo;
1640 nic->numa_node_mask = hdev->numa_node_mask;
1642 ret = hclge_knic_setup(vport, num_tqps,
1643 hdev->num_tx_desc, hdev->num_rx_desc);
1645 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1650 static int hclge_alloc_vport(struct hclge_dev *hdev)
1652 struct pci_dev *pdev = hdev->pdev;
1653 struct hclge_vport *vport;
1659 /* We need to alloc a vport for main NIC of PF */
1660 num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
1662 if (hdev->num_tqps < num_vport) {
1663 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1664 hdev->num_tqps, num_vport);
1668 /* Alloc the same number of TQPs for every vport */
1669 tqp_per_vport = hdev->num_tqps / num_vport;
1670 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1672 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1677 hdev->vport = vport;
1678 hdev->num_alloc_vport = num_vport;
1680 if (IS_ENABLED(CONFIG_PCI_IOV))
1681 hdev->num_alloc_vfs = hdev->num_req_vfs;
1683 for (i = 0; i < num_vport; i++) {
1685 vport->vport_id = i;
1686 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1687 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1688 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1689 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1690 INIT_LIST_HEAD(&vport->vlan_list);
1691 INIT_LIST_HEAD(&vport->uc_mac_list);
1692 INIT_LIST_HEAD(&vport->mc_mac_list);
1695 ret = hclge_vport_setup(vport, tqp_main_vport);
1697 ret = hclge_vport_setup(vport, tqp_per_vport);
1700 "vport setup failed for vport %d, %d\n",
1711 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1712 struct hclge_pkt_buf_alloc *buf_alloc)
1714 /* TX buffer size is unit by 128 byte */
1715 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1716 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1717 struct hclge_tx_buff_alloc_cmd *req;
1718 struct hclge_desc desc;
1722 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1724 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1725 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1726 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1728 req->tx_pkt_buff[i] =
1729 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1730 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1733 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1735 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1741 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1742 struct hclge_pkt_buf_alloc *buf_alloc)
1744 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1747 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1752 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1757 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1758 if (hdev->hw_tc_map & BIT(i))
1763 /* Get the number of pfc enabled TCs, which have private buffer */
1764 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1765 struct hclge_pkt_buf_alloc *buf_alloc)
1767 struct hclge_priv_buf *priv;
1771 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1772 priv = &buf_alloc->priv_buf[i];
1773 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1781 /* Get the number of pfc disabled TCs, which have private buffer */
1782 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1783 struct hclge_pkt_buf_alloc *buf_alloc)
1785 struct hclge_priv_buf *priv;
1789 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1790 priv = &buf_alloc->priv_buf[i];
1791 if (hdev->hw_tc_map & BIT(i) &&
1792 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1800 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1802 struct hclge_priv_buf *priv;
1806 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1807 priv = &buf_alloc->priv_buf[i];
1809 rx_priv += priv->buf_size;
1814 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1816 u32 i, total_tx_size = 0;
1818 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1819 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1821 return total_tx_size;
1824 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1825 struct hclge_pkt_buf_alloc *buf_alloc,
1828 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1829 u32 tc_num = hclge_get_tc_num(hdev);
1830 u32 shared_buf, aligned_mps;
1834 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1836 if (hnae3_dev_dcb_supported(hdev))
1837 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1840 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1841 + hdev->dv_buf_size;
1843 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1844 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1845 HCLGE_BUF_SIZE_UNIT);
1847 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1848 if (rx_all < rx_priv + shared_std)
1851 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1852 buf_alloc->s_buf.buf_size = shared_buf;
1853 if (hnae3_dev_dcb_supported(hdev)) {
1854 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1855 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1856 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1857 HCLGE_BUF_SIZE_UNIT);
1859 buf_alloc->s_buf.self.high = aligned_mps +
1860 HCLGE_NON_DCB_ADDITIONAL_BUF;
1861 buf_alloc->s_buf.self.low = aligned_mps;
1864 if (hnae3_dev_dcb_supported(hdev)) {
1865 hi_thrd = shared_buf - hdev->dv_buf_size;
1867 if (tc_num <= NEED_RESERVE_TC_NUM)
1868 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1872 hi_thrd = hi_thrd / tc_num;
1874 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1875 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1876 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1878 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
1879 lo_thrd = aligned_mps;
1882 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1883 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
1884 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
1890 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
1891 struct hclge_pkt_buf_alloc *buf_alloc)
1895 total_size = hdev->pkt_buf_size;
1897 /* alloc tx buffer for all enabled tc */
1898 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1899 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1901 if (hdev->hw_tc_map & BIT(i)) {
1902 if (total_size < hdev->tx_buf_size)
1905 priv->tx_buf_size = hdev->tx_buf_size;
1907 priv->tx_buf_size = 0;
1910 total_size -= priv->tx_buf_size;
1916 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
1917 struct hclge_pkt_buf_alloc *buf_alloc)
1919 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1920 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1923 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1924 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1931 if (!(hdev->hw_tc_map & BIT(i)))
1936 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1937 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
1938 priv->wl.high = roundup(priv->wl.low + aligned_mps,
1939 HCLGE_BUF_SIZE_UNIT);
1942 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
1946 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
1949 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1952 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
1953 struct hclge_pkt_buf_alloc *buf_alloc)
1955 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1956 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
1959 /* let the last to be cleared first */
1960 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1961 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1962 unsigned int mask = BIT((unsigned int)i);
1964 if (hdev->hw_tc_map & mask &&
1965 !(hdev->tm_info.hw_pfc_map & mask)) {
1966 /* Clear the no pfc TC private buffer */
1974 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
1975 no_pfc_priv_num == 0)
1979 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
1982 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
1983 struct hclge_pkt_buf_alloc *buf_alloc)
1985 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
1986 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
1989 /* let the last to be cleared first */
1990 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
1991 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
1992 unsigned int mask = BIT((unsigned int)i);
1994 if (hdev->hw_tc_map & mask &&
1995 hdev->tm_info.hw_pfc_map & mask) {
1996 /* Reduce the number of pfc TC with private buffer */
2004 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2009 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2012 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2013 struct hclge_pkt_buf_alloc *buf_alloc)
2015 #define COMPENSATE_BUFFER 0x3C00
2016 #define COMPENSATE_HALF_MPS_NUM 5
2017 #define PRIV_WL_GAP 0x1800
2019 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2020 u32 tc_num = hclge_get_tc_num(hdev);
2021 u32 half_mps = hdev->mps >> 1;
2026 rx_priv = rx_priv / tc_num;
2028 if (tc_num <= NEED_RESERVE_TC_NUM)
2029 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2031 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2032 COMPENSATE_HALF_MPS_NUM * half_mps;
2033 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2034 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2036 if (rx_priv < min_rx_priv)
2039 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2040 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2047 if (!(hdev->hw_tc_map & BIT(i)))
2051 priv->buf_size = rx_priv;
2052 priv->wl.high = rx_priv - hdev->dv_buf_size;
2053 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2056 buf_alloc->s_buf.buf_size = 0;
2061 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2062 * @hdev: pointer to struct hclge_dev
2063 * @buf_alloc: pointer to buffer calculation data
2064 * @return: 0: calculate sucessful, negative: fail
2066 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2067 struct hclge_pkt_buf_alloc *buf_alloc)
2069 /* When DCB is not supported, rx private buffer is not allocated. */
2070 if (!hnae3_dev_dcb_supported(hdev)) {
2071 u32 rx_all = hdev->pkt_buf_size;
2073 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2074 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2080 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2083 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2086 /* try to decrease the buffer size */
2087 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2090 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2093 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2099 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2100 struct hclge_pkt_buf_alloc *buf_alloc)
2102 struct hclge_rx_priv_buff_cmd *req;
2103 struct hclge_desc desc;
2107 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2108 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2110 /* Alloc private buffer TCs */
2111 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2112 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2115 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2117 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2121 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2122 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2124 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2126 dev_err(&hdev->pdev->dev,
2127 "rx private buffer alloc cmd failed %d\n", ret);
2132 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2133 struct hclge_pkt_buf_alloc *buf_alloc)
2135 struct hclge_rx_priv_wl_buf *req;
2136 struct hclge_priv_buf *priv;
2137 struct hclge_desc desc[2];
2141 for (i = 0; i < 2; i++) {
2142 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2144 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2146 /* The first descriptor set the NEXT bit to 1 */
2148 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2150 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2152 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2153 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2155 priv = &buf_alloc->priv_buf[idx];
2156 req->tc_wl[j].high =
2157 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2158 req->tc_wl[j].high |=
2159 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2161 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2162 req->tc_wl[j].low |=
2163 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2167 /* Send 2 descriptor at one time */
2168 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2170 dev_err(&hdev->pdev->dev,
2171 "rx private waterline config cmd failed %d\n",
2176 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2177 struct hclge_pkt_buf_alloc *buf_alloc)
2179 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2180 struct hclge_rx_com_thrd *req;
2181 struct hclge_desc desc[2];
2182 struct hclge_tc_thrd *tc;
2186 for (i = 0; i < 2; i++) {
2187 hclge_cmd_setup_basic_desc(&desc[i],
2188 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2189 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2191 /* The first descriptor set the NEXT bit to 1 */
2193 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2195 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
2197 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2198 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2200 req->com_thrd[j].high =
2201 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2202 req->com_thrd[j].high |=
2203 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2204 req->com_thrd[j].low =
2205 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2206 req->com_thrd[j].low |=
2207 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2211 /* Send 2 descriptors at one time */
2212 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2214 dev_err(&hdev->pdev->dev,
2215 "common threshold config cmd failed %d\n", ret);
2219 static int hclge_common_wl_config(struct hclge_dev *hdev,
2220 struct hclge_pkt_buf_alloc *buf_alloc)
2222 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2223 struct hclge_rx_com_wl *req;
2224 struct hclge_desc desc;
2227 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2229 req = (struct hclge_rx_com_wl *)desc.data;
2230 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2231 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2233 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2234 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2236 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2238 dev_err(&hdev->pdev->dev,
2239 "common waterline config cmd failed %d\n", ret);
2244 int hclge_buffer_alloc(struct hclge_dev *hdev)
2246 struct hclge_pkt_buf_alloc *pkt_buf;
2249 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2253 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2255 dev_err(&hdev->pdev->dev,
2256 "could not calc tx buffer size for all TCs %d\n", ret);
2260 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2262 dev_err(&hdev->pdev->dev,
2263 "could not alloc tx buffers %d\n", ret);
2267 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2269 dev_err(&hdev->pdev->dev,
2270 "could not calc rx priv buffer size for all TCs %d\n",
2275 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2277 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2282 if (hnae3_dev_dcb_supported(hdev)) {
2283 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2285 dev_err(&hdev->pdev->dev,
2286 "could not configure rx private waterline %d\n",
2291 ret = hclge_common_thrd_config(hdev, pkt_buf);
2293 dev_err(&hdev->pdev->dev,
2294 "could not configure common threshold %d\n",
2300 ret = hclge_common_wl_config(hdev, pkt_buf);
2302 dev_err(&hdev->pdev->dev,
2303 "could not configure common waterline %d\n", ret);
2310 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2312 struct hnae3_handle *roce = &vport->roce;
2313 struct hnae3_handle *nic = &vport->nic;
2315 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2317 if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
2318 vport->back->num_msi_left == 0)
2321 roce->rinfo.base_vector = vport->back->roce_base_vector;
2323 roce->rinfo.netdev = nic->kinfo.netdev;
2324 roce->rinfo.roce_io_base = vport->back->hw.io_base;
2326 roce->pdev = nic->pdev;
2327 roce->ae_algo = nic->ae_algo;
2328 roce->numa_node_mask = nic->numa_node_mask;
2333 static int hclge_init_msi(struct hclge_dev *hdev)
2335 struct pci_dev *pdev = hdev->pdev;
2339 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2341 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2344 "failed(%d) to allocate MSI/MSI-X vectors\n",
2348 if (vectors < hdev->num_msi)
2349 dev_warn(&hdev->pdev->dev,
2350 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2351 hdev->num_msi, vectors);
2353 hdev->num_msi = vectors;
2354 hdev->num_msi_left = vectors;
2356 hdev->base_msi_vector = pdev->irq;
2357 hdev->roce_base_vector = hdev->base_msi_vector +
2358 hdev->roce_base_msix_offset;
2360 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2361 sizeof(u16), GFP_KERNEL);
2362 if (!hdev->vector_status) {
2363 pci_free_irq_vectors(pdev);
2367 for (i = 0; i < hdev->num_msi; i++)
2368 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2370 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2371 sizeof(int), GFP_KERNEL);
2372 if (!hdev->vector_irq) {
2373 pci_free_irq_vectors(pdev);
2380 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2382 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2383 duplex = HCLGE_MAC_FULL;
2388 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2391 struct hclge_config_mac_speed_dup_cmd *req;
2392 struct hclge_desc desc;
2395 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2397 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2400 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2403 case HCLGE_MAC_SPEED_10M:
2404 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2405 HCLGE_CFG_SPEED_S, 6);
2407 case HCLGE_MAC_SPEED_100M:
2408 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2409 HCLGE_CFG_SPEED_S, 7);
2411 case HCLGE_MAC_SPEED_1G:
2412 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2413 HCLGE_CFG_SPEED_S, 0);
2415 case HCLGE_MAC_SPEED_10G:
2416 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2417 HCLGE_CFG_SPEED_S, 1);
2419 case HCLGE_MAC_SPEED_25G:
2420 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2421 HCLGE_CFG_SPEED_S, 2);
2423 case HCLGE_MAC_SPEED_40G:
2424 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2425 HCLGE_CFG_SPEED_S, 3);
2427 case HCLGE_MAC_SPEED_50G:
2428 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2429 HCLGE_CFG_SPEED_S, 4);
2431 case HCLGE_MAC_SPEED_100G:
2432 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
2433 HCLGE_CFG_SPEED_S, 5);
2436 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2440 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2443 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2445 dev_err(&hdev->pdev->dev,
2446 "mac speed/duplex config cmd failed %d.\n", ret);
2453 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2457 duplex = hclge_check_speed_dup(duplex, speed);
2458 if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
2461 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2465 hdev->hw.mac.speed = speed;
2466 hdev->hw.mac.duplex = duplex;
2471 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2474 struct hclge_vport *vport = hclge_get_vport(handle);
2475 struct hclge_dev *hdev = vport->back;
2477 return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2480 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2482 struct hclge_config_auto_neg_cmd *req;
2483 struct hclge_desc desc;
2487 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2489 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2491 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2492 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2494 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2496 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2502 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2504 struct hclge_vport *vport = hclge_get_vport(handle);
2505 struct hclge_dev *hdev = vport->back;
2507 if (!hdev->hw.mac.support_autoneg) {
2509 dev_err(&hdev->pdev->dev,
2510 "autoneg is not supported by current port\n");
2517 return hclge_set_autoneg_en(hdev, enable);
2520 static int hclge_get_autoneg(struct hnae3_handle *handle)
2522 struct hclge_vport *vport = hclge_get_vport(handle);
2523 struct hclge_dev *hdev = vport->back;
2524 struct phy_device *phydev = hdev->hw.mac.phydev;
2527 return phydev->autoneg;
2529 return hdev->hw.mac.autoneg;
2532 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2534 struct hclge_vport *vport = hclge_get_vport(handle);
2535 struct hclge_dev *hdev = vport->back;
2538 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2540 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2543 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2546 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2548 struct hclge_vport *vport = hclge_get_vport(handle);
2549 struct hclge_dev *hdev = vport->back;
2551 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2552 return hclge_set_autoneg_en(hdev, !halt);
2557 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2559 struct hclge_config_fec_cmd *req;
2560 struct hclge_desc desc;
2563 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2565 req = (struct hclge_config_fec_cmd *)desc.data;
2566 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2567 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2568 if (fec_mode & BIT(HNAE3_FEC_RS))
2569 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2570 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2571 if (fec_mode & BIT(HNAE3_FEC_BASER))
2572 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2573 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2575 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2577 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2582 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2584 struct hclge_vport *vport = hclge_get_vport(handle);
2585 struct hclge_dev *hdev = vport->back;
2586 struct hclge_mac *mac = &hdev->hw.mac;
2589 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2590 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2594 ret = hclge_set_fec_hw(hdev, fec_mode);
2598 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2602 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2605 struct hclge_vport *vport = hclge_get_vport(handle);
2606 struct hclge_dev *hdev = vport->back;
2607 struct hclge_mac *mac = &hdev->hw.mac;
2610 *fec_ability = mac->fec_ability;
2612 *fec_mode = mac->fec_mode;
2615 static int hclge_mac_init(struct hclge_dev *hdev)
2617 struct hclge_mac *mac = &hdev->hw.mac;
2620 hdev->support_sfp_query = true;
2621 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2622 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2623 hdev->hw.mac.duplex);
2625 dev_err(&hdev->pdev->dev,
2626 "Config mac speed dup fail ret=%d\n", ret);
2630 if (hdev->hw.mac.support_autoneg) {
2631 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2633 dev_err(&hdev->pdev->dev,
2634 "Config mac autoneg fail ret=%d\n", ret);
2641 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2642 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2644 dev_err(&hdev->pdev->dev,
2645 "Fec mode init fail, ret = %d\n", ret);
2650 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2652 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2656 ret = hclge_set_default_loopback(hdev);
2660 ret = hclge_buffer_alloc(hdev);
2662 dev_err(&hdev->pdev->dev,
2663 "allocate buffer fail, ret=%d\n", ret);
2668 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2670 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2671 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
2672 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2673 hclge_wq, &hdev->service_task, 0);
2676 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2678 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2679 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
2680 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2681 hclge_wq, &hdev->service_task, 0);
2684 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2686 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state))
2687 mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
2688 hclge_wq, &hdev->service_task,
2692 static int hclge_get_mac_link_status(struct hclge_dev *hdev)
2694 struct hclge_link_status_cmd *req;
2695 struct hclge_desc desc;
2699 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2700 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2702 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2707 req = (struct hclge_link_status_cmd *)desc.data;
2708 link_status = req->status & HCLGE_LINK_STATUS_UP_M;
2710 return !!link_status;
2713 static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
2715 unsigned int mac_state;
2718 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2721 mac_state = hclge_get_mac_link_status(hdev);
2723 if (hdev->hw.mac.phydev) {
2724 if (hdev->hw.mac.phydev->state == PHY_RUNNING)
2725 link_stat = mac_state &
2726 hdev->hw.mac.phydev->link;
2731 link_stat = mac_state;
2737 static void hclge_update_link_status(struct hclge_dev *hdev)
2739 struct hnae3_client *rclient = hdev->roce_client;
2740 struct hnae3_client *client = hdev->nic_client;
2741 struct hnae3_handle *rhandle;
2742 struct hnae3_handle *handle;
2749 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2752 state = hclge_get_mac_phy_link(hdev);
2753 if (state != hdev->hw.mac.link) {
2754 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
2755 handle = &hdev->vport[i].nic;
2756 client->ops->link_status_change(handle, state);
2757 hclge_config_mac_tnl_int(hdev, state);
2758 rhandle = &hdev->vport[i].roce;
2759 if (rclient && rclient->ops->link_status_change)
2760 rclient->ops->link_status_change(rhandle,
2763 hdev->hw.mac.link = state;
2766 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2769 static void hclge_update_port_capability(struct hclge_mac *mac)
2771 /* update fec ability by speed */
2772 hclge_convert_setting_fec(mac);
2774 /* firmware can not identify back plane type, the media type
2775 * read from configuration can help deal it
2777 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
2778 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
2779 mac->module_type = HNAE3_MODULE_TYPE_KR;
2780 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2781 mac->module_type = HNAE3_MODULE_TYPE_TP;
2783 if (mac->support_autoneg) {
2784 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
2785 linkmode_copy(mac->advertising, mac->supported);
2787 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
2789 linkmode_zero(mac->advertising);
2793 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
2795 struct hclge_sfp_info_cmd *resp;
2796 struct hclge_desc desc;
2799 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2800 resp = (struct hclge_sfp_info_cmd *)desc.data;
2801 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2802 if (ret == -EOPNOTSUPP) {
2803 dev_warn(&hdev->pdev->dev,
2804 "IMP do not support get SFP speed %d\n", ret);
2807 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
2811 *speed = le32_to_cpu(resp->speed);
2816 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
2818 struct hclge_sfp_info_cmd *resp;
2819 struct hclge_desc desc;
2822 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
2823 resp = (struct hclge_sfp_info_cmd *)desc.data;
2825 resp->query_type = QUERY_ACTIVE_SPEED;
2827 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2828 if (ret == -EOPNOTSUPP) {
2829 dev_warn(&hdev->pdev->dev,
2830 "IMP does not support get SFP info %d\n", ret);
2833 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
2837 mac->speed = le32_to_cpu(resp->speed);
2838 /* if resp->speed_ability is 0, it means it's an old version
2839 * firmware, do not update these params
2841 if (resp->speed_ability) {
2842 mac->module_type = le32_to_cpu(resp->module_type);
2843 mac->speed_ability = le32_to_cpu(resp->speed_ability);
2844 mac->autoneg = resp->autoneg;
2845 mac->support_autoneg = resp->autoneg_ability;
2846 mac->speed_type = QUERY_ACTIVE_SPEED;
2847 if (!resp->active_fec)
2850 mac->fec_mode = BIT(resp->active_fec);
2852 mac->speed_type = QUERY_SFP_SPEED;
2858 static int hclge_update_port_info(struct hclge_dev *hdev)
2860 struct hclge_mac *mac = &hdev->hw.mac;
2861 int speed = HCLGE_MAC_SPEED_UNKNOWN;
2864 /* get the port info from SFP cmd if not copper port */
2865 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
2868 /* if IMP does not support get SFP/qSFP info, return directly */
2869 if (!hdev->support_sfp_query)
2872 if (hdev->pdev->revision >= 0x21)
2873 ret = hclge_get_sfp_info(hdev, mac);
2875 ret = hclge_get_sfp_speed(hdev, &speed);
2877 if (ret == -EOPNOTSUPP) {
2878 hdev->support_sfp_query = false;
2884 if (hdev->pdev->revision >= 0x21) {
2885 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
2886 hclge_update_port_capability(mac);
2889 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
2892 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
2893 return 0; /* do nothing if no SFP */
2895 /* must config full duplex for SFP */
2896 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
2900 static int hclge_get_status(struct hnae3_handle *handle)
2902 struct hclge_vport *vport = hclge_get_vport(handle);
2903 struct hclge_dev *hdev = vport->back;
2905 hclge_update_link_status(hdev);
2907 return hdev->hw.mac.link;
2910 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
2912 if (pci_num_vf(hdev->pdev) == 0) {
2913 dev_err(&hdev->pdev->dev,
2914 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
2918 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
2919 dev_err(&hdev->pdev->dev,
2920 "vf id(%d) is out of range(0 <= vfid < %d)\n",
2921 vf, pci_num_vf(hdev->pdev));
2925 /* VF start from 1 in vport */
2926 vf += HCLGE_VF_VPORT_START_NUM;
2927 return &hdev->vport[vf];
2930 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
2931 struct ifla_vf_info *ivf)
2933 struct hclge_vport *vport = hclge_get_vport(handle);
2934 struct hclge_dev *hdev = vport->back;
2936 vport = hclge_get_vf_vport(hdev, vf);
2941 ivf->linkstate = vport->vf_info.link_state;
2942 ivf->spoofchk = vport->vf_info.spoofchk;
2943 ivf->trusted = vport->vf_info.trusted;
2944 ivf->min_tx_rate = 0;
2945 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
2946 ether_addr_copy(ivf->mac, vport->vf_info.mac);
2951 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
2954 struct hclge_vport *vport = hclge_get_vport(handle);
2955 struct hclge_dev *hdev = vport->back;
2957 vport = hclge_get_vf_vport(hdev, vf);
2961 vport->vf_info.link_state = link_state;
2966 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
2968 u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
2970 /* fetch the events from their corresponding regs */
2971 rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
2972 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
2973 msix_src_reg = hclge_read_dev(&hdev->hw,
2974 HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
2976 /* Assumption: If by any chance reset and mailbox events are reported
2977 * together then we will only process reset event in this go and will
2978 * defer the processing of the mailbox events. Since, we would have not
2979 * cleared RX CMDQ event this time we would receive again another
2980 * interrupt from H/W just for the mailbox.
2982 * check for vector0 reset event sources
2984 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
2985 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
2986 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
2987 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2988 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
2989 hdev->rst_stats.imp_rst_cnt++;
2990 return HCLGE_VECTOR0_EVENT_RST;
2993 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
2994 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
2995 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
2996 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
2997 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
2998 hdev->rst_stats.global_rst_cnt++;
2999 return HCLGE_VECTOR0_EVENT_RST;
3002 /* check for vector0 msix event source */
3003 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
3004 dev_info(&hdev->pdev->dev, "received event 0x%x\n",
3006 *clearval = msix_src_reg;
3007 return HCLGE_VECTOR0_EVENT_ERR;
3010 /* check for vector0 mailbox(=CMDQ RX) event source */
3011 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3012 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3013 *clearval = cmdq_src_reg;
3014 return HCLGE_VECTOR0_EVENT_MBX;
3017 /* print other vector0 event source */
3018 dev_info(&hdev->pdev->dev,
3019 "CMDQ INT status:0x%x, other INT status:0x%x\n",
3020 cmdq_src_reg, msix_src_reg);
3021 *clearval = msix_src_reg;
3023 return HCLGE_VECTOR0_EVENT_OTHER;
3026 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3029 switch (event_type) {
3030 case HCLGE_VECTOR0_EVENT_RST:
3031 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3033 case HCLGE_VECTOR0_EVENT_MBX:
3034 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3041 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3043 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3044 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3045 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3046 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3047 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3050 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3052 writel(enable ? 1 : 0, vector->addr);
3055 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3057 struct hclge_dev *hdev = data;
3061 hclge_enable_vector(&hdev->misc_vector, false);
3062 event_cause = hclge_check_event_cause(hdev, &clearval);
3064 /* vector 0 interrupt is shared with reset and mailbox source events.*/
3065 switch (event_cause) {
3066 case HCLGE_VECTOR0_EVENT_ERR:
3067 /* we do not know what type of reset is required now. This could
3068 * only be decided after we fetch the type of errors which
3069 * caused this event. Therefore, we will do below for now:
3070 * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
3071 * have defered type of reset to be used.
3072 * 2. Schedule the reset serivce task.
3073 * 3. When service task receives HNAE3_UNKNOWN_RESET type it
3074 * will fetch the correct type of reset. This would be done
3075 * by first decoding the types of errors.
3077 set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
3079 case HCLGE_VECTOR0_EVENT_RST:
3080 hclge_reset_task_schedule(hdev);
3082 case HCLGE_VECTOR0_EVENT_MBX:
3083 /* If we are here then,
3084 * 1. Either we are not handling any mbx task and we are not
3087 * 2. We could be handling a mbx task but nothing more is
3089 * In both cases, we should schedule mbx task as there are more
3090 * mbx messages reported by this interrupt.
3092 hclge_mbx_task_schedule(hdev);
3095 dev_warn(&hdev->pdev->dev,
3096 "received unknown or unhandled event of vector0\n");
3100 hclge_clear_event_cause(hdev, event_cause, clearval);
3102 /* Enable interrupt if it is not cause by reset. And when
3103 * clearval equal to 0, it means interrupt status may be
3104 * cleared by hardware before driver reads status register.
3105 * For this case, vector0 interrupt also should be enabled.
3108 event_cause == HCLGE_VECTOR0_EVENT_MBX) {
3109 hclge_enable_vector(&hdev->misc_vector, true);
3115 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3117 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3118 dev_warn(&hdev->pdev->dev,
3119 "vector(vector_id %d) has been freed.\n", vector_id);
3123 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3124 hdev->num_msi_left += 1;
3125 hdev->num_msi_used -= 1;
3128 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3130 struct hclge_misc_vector *vector = &hdev->misc_vector;
3132 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3134 vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3135 hdev->vector_status[0] = 0;
3137 hdev->num_msi_left -= 1;
3138 hdev->num_msi_used += 1;
3141 static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
3142 const cpumask_t *mask)
3144 struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
3147 cpumask_copy(&hdev->affinity_mask, mask);
3150 static void hclge_irq_affinity_release(struct kref *ref)
3154 static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
3156 irq_set_affinity_hint(hdev->misc_vector.vector_irq,
3157 &hdev->affinity_mask);
3159 hdev->affinity_notify.notify = hclge_irq_affinity_notify;
3160 hdev->affinity_notify.release = hclge_irq_affinity_release;
3161 irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
3162 &hdev->affinity_notify);
3165 static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
3167 irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
3168 irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
3171 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3175 hclge_get_misc_vector(hdev);
3177 /* this would be explicitly freed in the end */
3178 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3179 0, "hclge_misc", hdev);
3181 hclge_free_vector(hdev, 0);
3182 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3183 hdev->misc_vector.vector_irq);
3189 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3191 free_irq(hdev->misc_vector.vector_irq, hdev);
3192 hclge_free_vector(hdev, 0);
3195 int hclge_notify_client(struct hclge_dev *hdev,
3196 enum hnae3_reset_notify_type type)
3198 struct hnae3_client *client = hdev->nic_client;
3201 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3204 if (!client->ops->reset_notify)
3207 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3208 struct hnae3_handle *handle = &hdev->vport[i].nic;
3211 ret = client->ops->reset_notify(handle, type);
3213 dev_err(&hdev->pdev->dev,
3214 "notify nic client failed %d(%d)\n", type, ret);
3222 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3223 enum hnae3_reset_notify_type type)
3225 struct hnae3_client *client = hdev->roce_client;
3229 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3232 if (!client->ops->reset_notify)
3235 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
3236 struct hnae3_handle *handle = &hdev->vport[i].roce;
3238 ret = client->ops->reset_notify(handle, type);
3240 dev_err(&hdev->pdev->dev,
3241 "notify roce client failed %d(%d)",
3250 static int hclge_reset_wait(struct hclge_dev *hdev)
3252 #define HCLGE_RESET_WATI_MS 100
3253 #define HCLGE_RESET_WAIT_CNT 200
3254 u32 val, reg, reg_bit;
3257 switch (hdev->reset_type) {
3258 case HNAE3_IMP_RESET:
3259 reg = HCLGE_GLOBAL_RESET_REG;
3260 reg_bit = HCLGE_IMP_RESET_BIT;
3262 case HNAE3_GLOBAL_RESET:
3263 reg = HCLGE_GLOBAL_RESET_REG;
3264 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3266 case HNAE3_FUNC_RESET:
3267 reg = HCLGE_FUN_RST_ING;
3268 reg_bit = HCLGE_FUN_RST_ING_B;
3270 case HNAE3_FLR_RESET:
3273 dev_err(&hdev->pdev->dev,
3274 "Wait for unsupported reset type: %d\n",
3279 if (hdev->reset_type == HNAE3_FLR_RESET) {
3280 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
3281 cnt++ < HCLGE_RESET_WAIT_CNT)
3282 msleep(HCLGE_RESET_WATI_MS);
3284 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
3285 dev_err(&hdev->pdev->dev,
3286 "flr wait timeout: %u\n", cnt);
3293 val = hclge_read_dev(&hdev->hw, reg);
3294 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3295 msleep(HCLGE_RESET_WATI_MS);
3296 val = hclge_read_dev(&hdev->hw, reg);
3300 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3301 dev_warn(&hdev->pdev->dev,
3302 "Wait for reset timeout: %d\n", hdev->reset_type);
3309 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3311 struct hclge_vf_rst_cmd *req;
3312 struct hclge_desc desc;
3314 req = (struct hclge_vf_rst_cmd *)desc.data;
3315 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3316 req->dest_vfid = func_id;
3321 return hclge_cmd_send(&hdev->hw, &desc, 1);
3324 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3328 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
3329 struct hclge_vport *vport = &hdev->vport[i];
3332 /* Send cmd to set/clear VF's FUNC_RST_ING */
3333 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3335 dev_err(&hdev->pdev->dev,
3336 "set vf(%u) rst failed %d!\n",
3337 vport->vport_id, ret);
3341 if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3344 /* Inform VF to process the reset.
3345 * hclge_inform_reset_assert_to_vf may fail if VF
3346 * driver is not loaded.
3348 ret = hclge_inform_reset_assert_to_vf(vport);
3350 dev_warn(&hdev->pdev->dev,
3351 "inform reset to vf(%u) failed %d!\n",
3352 vport->vport_id, ret);
3358 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3360 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3361 test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
3362 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3365 hclge_mbx_handler(hdev);
3367 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3370 static int hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3372 struct hclge_pf_rst_sync_cmd *req;
3373 struct hclge_desc desc;
3377 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3378 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3381 /* vf need to down netdev by mbx during PF or FLR reset */
3382 hclge_mailbox_service_task(hdev);
3384 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3385 /* for compatible with old firmware, wait
3386 * 100 ms for VF to stop IO
3388 if (ret == -EOPNOTSUPP) {
3389 msleep(HCLGE_RESET_SYNC_TIME);
3392 dev_err(&hdev->pdev->dev, "sync with VF fail %d!\n",
3395 } else if (req->all_vf_ready) {
3398 msleep(HCLGE_PF_RESET_SYNC_TIME);
3399 hclge_cmd_reuse_desc(&desc, true);
3400 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3402 dev_err(&hdev->pdev->dev, "sync with VF timeout!\n");
3406 void hclge_report_hw_error(struct hclge_dev *hdev,
3407 enum hnae3_hw_error_type type)
3409 struct hnae3_client *client = hdev->nic_client;
3412 if (!client || !client->ops->process_hw_error ||
3413 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3416 for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
3417 client->ops->process_hw_error(&hdev->vport[i].nic, type);
3420 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3424 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3425 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3426 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3427 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3428 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3431 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3432 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3433 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3434 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3438 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3440 struct hclge_desc desc;
3441 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3444 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3445 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3446 req->fun_reset_vfid = func_id;
3448 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3450 dev_err(&hdev->pdev->dev,
3451 "send function reset cmd fail, status =%d\n", ret);
3456 static void hclge_do_reset(struct hclge_dev *hdev)
3458 struct hnae3_handle *handle = &hdev->vport[0].nic;
3459 struct pci_dev *pdev = hdev->pdev;
3462 if (hclge_get_hw_reset_stat(handle)) {
3463 dev_info(&pdev->dev, "Hardware reset not finish\n");
3464 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3465 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3466 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3470 switch (hdev->reset_type) {
3471 case HNAE3_GLOBAL_RESET:
3472 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3473 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3474 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3475 dev_info(&pdev->dev, "Global Reset requested\n");
3477 case HNAE3_FUNC_RESET:
3478 dev_info(&pdev->dev, "PF Reset requested\n");
3479 /* schedule again to check later */
3480 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3481 hclge_reset_task_schedule(hdev);
3483 case HNAE3_FLR_RESET:
3484 dev_info(&pdev->dev, "FLR requested\n");
3485 /* schedule again to check later */
3486 set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
3487 hclge_reset_task_schedule(hdev);
3490 dev_warn(&pdev->dev,
3491 "Unsupported reset type: %d\n", hdev->reset_type);
3496 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3497 unsigned long *addr)
3499 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3500 struct hclge_dev *hdev = ae_dev->priv;
3502 /* first, resolve any unknown reset type to the known type(s) */
3503 if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
3504 /* we will intentionally ignore any errors from this function
3505 * as we will end up in *some* reset request in any case
3507 hclge_handle_hw_msix_error(hdev, addr);
3508 clear_bit(HNAE3_UNKNOWN_RESET, addr);
3509 /* We defered the clearing of the error event which caused
3510 * interrupt since it was not posssible to do that in
3511 * interrupt context (and this is the reason we introduced
3512 * new UNKNOWN reset type). Now, the errors have been
3513 * handled and cleared in hardware we can safely enable
3514 * interrupts. This is an exception to the norm.
3516 hclge_enable_vector(&hdev->misc_vector, true);
3519 /* return the highest priority reset level amongst all */
3520 if (test_bit(HNAE3_IMP_RESET, addr)) {
3521 rst_level = HNAE3_IMP_RESET;
3522 clear_bit(HNAE3_IMP_RESET, addr);
3523 clear_bit(HNAE3_GLOBAL_RESET, addr);
3524 clear_bit(HNAE3_FUNC_RESET, addr);
3525 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3526 rst_level = HNAE3_GLOBAL_RESET;
3527 clear_bit(HNAE3_GLOBAL_RESET, addr);
3528 clear_bit(HNAE3_FUNC_RESET, addr);
3529 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3530 rst_level = HNAE3_FUNC_RESET;
3531 clear_bit(HNAE3_FUNC_RESET, addr);
3532 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3533 rst_level = HNAE3_FLR_RESET;
3534 clear_bit(HNAE3_FLR_RESET, addr);
3537 if (hdev->reset_type != HNAE3_NONE_RESET &&
3538 rst_level < hdev->reset_type)
3539 return HNAE3_NONE_RESET;
3544 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3548 switch (hdev->reset_type) {
3549 case HNAE3_IMP_RESET:
3550 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3552 case HNAE3_GLOBAL_RESET:
3553 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3562 /* For revision 0x20, the reset interrupt source
3563 * can only be cleared after hardware reset done
3565 if (hdev->pdev->revision == 0x20)
3566 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3569 hclge_enable_vector(&hdev->misc_vector, true);
3572 static int hclge_reset_prepare_down(struct hclge_dev *hdev)
3576 switch (hdev->reset_type) {
3577 case HNAE3_FUNC_RESET:
3579 case HNAE3_FLR_RESET:
3580 ret = hclge_set_all_vf_rst(hdev, true);
3589 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3593 reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
3595 reg_val |= HCLGE_NIC_SW_RST_RDY;
3597 reg_val &= ~HCLGE_NIC_SW_RST_RDY;
3599 hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
3602 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3607 switch (hdev->reset_type) {
3608 case HNAE3_FUNC_RESET:
3609 /* to confirm whether all running VF is ready
3610 * before request PF reset
3612 ret = hclge_func_reset_sync_vf(hdev);
3616 ret = hclge_func_reset_cmd(hdev, 0);
3618 dev_err(&hdev->pdev->dev,
3619 "asserting function reset fail %d!\n", ret);
3623 /* After performaning pf reset, it is not necessary to do the
3624 * mailbox handling or send any command to firmware, because
3625 * any mailbox handling or command to firmware is only valid
3626 * after hclge_cmd_init is called.
3628 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3629 hdev->rst_stats.pf_rst_cnt++;
3631 case HNAE3_FLR_RESET:
3632 /* to confirm whether all running VF is ready
3633 * before request PF reset
3635 ret = hclge_func_reset_sync_vf(hdev);
3639 set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
3640 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
3641 hdev->rst_stats.flr_rst_cnt++;
3643 case HNAE3_IMP_RESET:
3644 hclge_handle_imp_error(hdev);
3645 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3646 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3647 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3653 /* inform hardware that preparatory work is done */
3654 msleep(HCLGE_RESET_SYNC_TIME);
3655 hclge_reset_handshake(hdev, true);
3656 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
3661 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
3663 #define MAX_RESET_FAIL_CNT 5
3665 if (hdev->reset_pending) {
3666 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
3667 hdev->reset_pending);
3669 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
3670 HCLGE_RESET_INT_M) {
3671 dev_info(&hdev->pdev->dev,
3672 "reset failed because new reset interrupt\n");
3673 hclge_clear_reset_cause(hdev);
3675 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
3676 hdev->rst_stats.reset_fail_cnt++;
3677 set_bit(hdev->reset_type, &hdev->reset_pending);
3678 dev_info(&hdev->pdev->dev,
3679 "re-schedule reset task(%u)\n",
3680 hdev->rst_stats.reset_fail_cnt);
3684 hclge_clear_reset_cause(hdev);
3686 /* recover the handshake status when reset fail */
3687 hclge_reset_handshake(hdev, true);
3689 dev_err(&hdev->pdev->dev, "Reset fail!\n");
3691 hclge_dbg_dump_rst_info(hdev);
3696 static int hclge_set_rst_done(struct hclge_dev *hdev)
3698 struct hclge_pf_rst_done_cmd *req;
3699 struct hclge_desc desc;
3702 req = (struct hclge_pf_rst_done_cmd *)desc.data;
3703 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3704 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3706 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3707 /* To be compatible with the old firmware, which does not support
3708 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3711 if (ret == -EOPNOTSUPP) {
3712 dev_warn(&hdev->pdev->dev,
3713 "current firmware does not support command(0x%x)!\n",
3714 HCLGE_OPC_PF_RST_DONE);
3717 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3724 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
3728 switch (hdev->reset_type) {
3729 case HNAE3_FUNC_RESET:
3731 case HNAE3_FLR_RESET:
3732 ret = hclge_set_all_vf_rst(hdev, false);
3734 case HNAE3_GLOBAL_RESET:
3736 case HNAE3_IMP_RESET:
3737 ret = hclge_set_rst_done(hdev);
3743 /* clear up the handshake status after re-initialize done */
3744 hclge_reset_handshake(hdev, false);
3749 static int hclge_reset_stack(struct hclge_dev *hdev)
3753 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
3757 ret = hclge_reset_ae_dev(hdev->ae_dev);
3761 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
3765 return hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT);
3768 static void hclge_reset(struct hclge_dev *hdev)
3770 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3771 enum hnae3_reset_type reset_level;
3774 /* Initialize ae_dev reset status as well, in case enet layer wants to
3775 * know if device is undergoing reset
3777 ae_dev->reset_type = hdev->reset_type;
3778 hdev->rst_stats.reset_cnt++;
3779 /* perform reset of the stack & ae device for a client */
3780 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
3784 ret = hclge_reset_prepare_down(hdev);
3789 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
3791 goto err_reset_lock;
3795 ret = hclge_reset_prepare_wait(hdev);
3799 if (hclge_reset_wait(hdev))
3802 hdev->rst_stats.hw_reset_done_cnt++;
3804 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
3810 ret = hclge_reset_stack(hdev);
3812 goto err_reset_lock;
3814 hclge_clear_reset_cause(hdev);
3816 ret = hclge_reset_prepare_up(hdev);
3818 goto err_reset_lock;
3822 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
3823 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
3827 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
3832 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
3834 goto err_reset_lock;
3838 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
3842 hdev->last_reset_time = jiffies;
3843 hdev->rst_stats.reset_fail_cnt = 0;
3844 hdev->rst_stats.reset_done_cnt++;
3845 ae_dev->reset_type = HNAE3_NONE_RESET;
3847 /* if default_reset_request has a higher level reset request,
3848 * it should be handled as soon as possible. since some errors
3849 * need this kind of reset to fix.
3851 reset_level = hclge_get_reset_level(ae_dev,
3852 &hdev->default_reset_request);
3853 if (reset_level != HNAE3_NONE_RESET)
3854 set_bit(reset_level, &hdev->reset_request);
3861 if (hclge_reset_err_handle(hdev))
3862 hclge_reset_task_schedule(hdev);
3865 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
3867 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
3868 struct hclge_dev *hdev = ae_dev->priv;
3870 /* We might end up getting called broadly because of 2 below cases:
3871 * 1. Recoverable error was conveyed through APEI and only way to bring
3872 * normalcy is to reset.
3873 * 2. A new reset request from the stack due to timeout
3875 * For the first case,error event might not have ae handle available.
3876 * check if this is a new reset request and we are not here just because
3877 * last reset attempt did not succeed and watchdog hit us again. We will
3878 * know this if last reset request did not occur very recently (watchdog
3879 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
3880 * In case of new request we reset the "reset level" to PF reset.
3881 * And if it is a repeat reset request of the most recent one then we
3882 * want to make sure we throttle the reset request. Therefore, we will
3883 * not allow it again before 3*HZ times.
3886 handle = &hdev->vport[0].nic;
3888 if (time_before(jiffies, (hdev->last_reset_time +
3889 HCLGE_RESET_INTERVAL))) {
3890 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
3892 } else if (hdev->default_reset_request) {
3894 hclge_get_reset_level(ae_dev,
3895 &hdev->default_reset_request);
3896 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
3897 hdev->reset_level = HNAE3_FUNC_RESET;
3900 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
3903 /* request reset & schedule reset task */
3904 set_bit(hdev->reset_level, &hdev->reset_request);
3905 hclge_reset_task_schedule(hdev);
3907 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
3908 hdev->reset_level++;
3911 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
3912 enum hnae3_reset_type rst_type)
3914 struct hclge_dev *hdev = ae_dev->priv;
3916 set_bit(rst_type, &hdev->default_reset_request);
3919 static void hclge_reset_timer(struct timer_list *t)
3921 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
3923 /* if default_reset_request has no value, it means that this reset
3924 * request has already be handled, so just return here
3926 if (!hdev->default_reset_request)
3929 dev_info(&hdev->pdev->dev,
3930 "triggering reset in reset timer\n");
3931 hclge_reset_event(hdev->pdev, NULL);
3934 static void hclge_reset_subtask(struct hclge_dev *hdev)
3936 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
3938 /* check if there is any ongoing reset in the hardware. This status can
3939 * be checked from reset_pending. If there is then, we need to wait for
3940 * hardware to complete reset.
3941 * a. If we are able to figure out in reasonable time that hardware
3942 * has fully resetted then, we can proceed with driver, client
3944 * b. else, we can come back later to check this status so re-sched
3947 hdev->last_reset_time = jiffies;
3948 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
3949 if (hdev->reset_type != HNAE3_NONE_RESET)
3952 /* check if we got any *new* reset requests to be honored */
3953 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
3954 if (hdev->reset_type != HNAE3_NONE_RESET)
3955 hclge_do_reset(hdev);
3957 hdev->reset_type = HNAE3_NONE_RESET;
3960 static void hclge_reset_service_task(struct hclge_dev *hdev)
3962 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
3965 if (test_and_set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
3968 hclge_reset_subtask(hdev);
3970 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
3973 static void hclge_update_vport_alive(struct hclge_dev *hdev)
3977 /* start from vport 1 for PF is always alive */
3978 for (i = 1; i < hdev->num_alloc_vport; i++) {
3979 struct hclge_vport *vport = &hdev->vport[i];
3981 if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
3982 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
3984 /* If vf is not alive, set to default value */
3985 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3986 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
3990 static void hclge_periodic_service_task(struct hclge_dev *hdev)
3992 unsigned long delta = round_jiffies_relative(HZ);
3994 /* Always handle the link updating to make sure link state is
3995 * updated when it is triggered by mbx.
3997 hclge_update_link_status(hdev);
3999 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4000 delta = jiffies - hdev->last_serv_processed;
4002 if (delta < round_jiffies_relative(HZ)) {
4003 delta = round_jiffies_relative(HZ) - delta;
4008 hdev->serv_processed_cnt++;
4009 hclge_update_vport_alive(hdev);
4011 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4012 hdev->last_serv_processed = jiffies;
4016 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4017 hclge_update_stats_for_all(hdev);
4019 hclge_update_port_info(hdev);
4020 hclge_sync_vlan_filter(hdev);
4022 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4023 hclge_rfs_filter_expire(hdev);
4025 hdev->last_serv_processed = jiffies;
4028 hclge_task_schedule(hdev, delta);
4031 static void hclge_service_task(struct work_struct *work)
4033 struct hclge_dev *hdev =
4034 container_of(work, struct hclge_dev, service_task.work);
4036 hclge_reset_service_task(hdev);
4037 hclge_mailbox_service_task(hdev);
4038 hclge_periodic_service_task(hdev);
4040 /* Handle reset and mbx again in case periodical task delays the
4041 * handling by calling hclge_task_schedule() in
4042 * hclge_periodic_service_task().
4044 hclge_reset_service_task(hdev);
4045 hclge_mailbox_service_task(hdev);
4048 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4050 /* VF handle has no client */
4051 if (!handle->client)
4052 return container_of(handle, struct hclge_vport, nic);
4053 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4054 return container_of(handle, struct hclge_vport, roce);
4056 return container_of(handle, struct hclge_vport, nic);
4059 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4060 struct hnae3_vector_info *vector_info)
4062 struct hclge_vport *vport = hclge_get_vport(handle);
4063 struct hnae3_vector_info *vector = vector_info;
4064 struct hclge_dev *hdev = vport->back;
4068 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4069 vector_num = min(hdev->num_msi_left, vector_num);
4071 for (j = 0; j < vector_num; j++) {
4072 for (i = 1; i < hdev->num_msi; i++) {
4073 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4074 vector->vector = pci_irq_vector(hdev->pdev, i);
4075 vector->io_addr = hdev->hw.io_base +
4076 HCLGE_VECTOR_REG_BASE +
4077 (i - 1) * HCLGE_VECTOR_REG_OFFSET +
4079 HCLGE_VECTOR_VF_OFFSET;
4080 hdev->vector_status[i] = vport->vport_id;
4081 hdev->vector_irq[i] = vector->vector;
4090 hdev->num_msi_left -= alloc;
4091 hdev->num_msi_used += alloc;
4096 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4100 for (i = 0; i < hdev->num_msi; i++)
4101 if (vector == hdev->vector_irq[i])
4107 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4109 struct hclge_vport *vport = hclge_get_vport(handle);
4110 struct hclge_dev *hdev = vport->back;
4113 vector_id = hclge_get_vector_index(hdev, vector);
4114 if (vector_id < 0) {
4115 dev_err(&hdev->pdev->dev,
4116 "Get vector index fail. vector_id =%d\n", vector_id);
4120 hclge_free_vector(hdev, vector_id);
4125 static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
4127 return HCLGE_RSS_KEY_SIZE;
4130 static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
4132 return HCLGE_RSS_IND_TBL_SIZE;
4135 static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
4136 const u8 hfunc, const u8 *key)
4138 struct hclge_rss_config_cmd *req;
4139 unsigned int key_offset = 0;
4140 struct hclge_desc desc;
4145 key_counts = HCLGE_RSS_KEY_SIZE;
4146 req = (struct hclge_rss_config_cmd *)desc.data;
4148 while (key_counts) {
4149 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
4152 req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
4153 req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
4155 key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
4156 memcpy(req->hash_key,
4157 key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
4159 key_counts -= key_size;
4161 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4163 dev_err(&hdev->pdev->dev,
4164 "Configure RSS config fail, status = %d\n",
4172 static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
4174 struct hclge_rss_indirection_table_cmd *req;
4175 struct hclge_desc desc;
4179 req = (struct hclge_rss_indirection_table_cmd *)desc.data;
4181 for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
4182 hclge_cmd_setup_basic_desc
4183 (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
4185 req->start_table_index =
4186 cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
4187 req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
4189 for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
4190 req->rss_result[j] =
4191 indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
4193 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4195 dev_err(&hdev->pdev->dev,
4196 "Configure rss indir table fail,status = %d\n",
4204 static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
4205 u16 *tc_size, u16 *tc_offset)
4207 struct hclge_rss_tc_mode_cmd *req;
4208 struct hclge_desc desc;
4212 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
4213 req = (struct hclge_rss_tc_mode_cmd *)desc.data;
4215 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4218 hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
4219 hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
4220 HCLGE_RSS_TC_SIZE_S, tc_size[i]);
4221 hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
4222 HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
4224 req->rss_tc_mode[i] = cpu_to_le16(mode);
4227 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4229 dev_err(&hdev->pdev->dev,
4230 "Configure rss tc mode fail, status = %d\n", ret);
4235 static void hclge_get_rss_type(struct hclge_vport *vport)
4237 if (vport->rss_tuple_sets.ipv4_tcp_en ||
4238 vport->rss_tuple_sets.ipv4_udp_en ||
4239 vport->rss_tuple_sets.ipv4_sctp_en ||
4240 vport->rss_tuple_sets.ipv6_tcp_en ||
4241 vport->rss_tuple_sets.ipv6_udp_en ||
4242 vport->rss_tuple_sets.ipv6_sctp_en)
4243 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
4244 else if (vport->rss_tuple_sets.ipv4_fragment_en ||
4245 vport->rss_tuple_sets.ipv6_fragment_en)
4246 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
4248 vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
4251 static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
4253 struct hclge_rss_input_tuple_cmd *req;
4254 struct hclge_desc desc;
4257 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4259 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4261 /* Get the tuple cfg from pf */
4262 req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
4263 req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
4264 req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
4265 req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
4266 req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
4267 req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
4268 req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
4269 req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
4270 hclge_get_rss_type(&hdev->vport[0]);
4271 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4273 dev_err(&hdev->pdev->dev,
4274 "Configure rss input fail, status = %d\n", ret);
4278 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4281 struct hclge_vport *vport = hclge_get_vport(handle);
4284 /* Get hash algorithm */
4286 switch (vport->rss_algo) {
4287 case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
4288 *hfunc = ETH_RSS_HASH_TOP;
4290 case HCLGE_RSS_HASH_ALGO_SIMPLE:
4291 *hfunc = ETH_RSS_HASH_XOR;
4294 *hfunc = ETH_RSS_HASH_UNKNOWN;
4299 /* Get the RSS Key required by the user */
4301 memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
4303 /* Get indirect table */
4305 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4306 indir[i] = vport->rss_indirection_tbl[i];
4311 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4312 const u8 *key, const u8 hfunc)
4314 struct hclge_vport *vport = hclge_get_vport(handle);
4315 struct hclge_dev *hdev = vport->back;
4319 /* Set the RSS Hash Key if specififed by the user */
4322 case ETH_RSS_HASH_TOP:
4323 hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4325 case ETH_RSS_HASH_XOR:
4326 hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4328 case ETH_RSS_HASH_NO_CHANGE:
4329 hash_algo = vport->rss_algo;
4335 ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
4339 /* Update the shadow RSS key with user specified qids */
4340 memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
4341 vport->rss_algo = hash_algo;
4344 /* Update the shadow RSS table with user specified qids */
4345 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4346 vport->rss_indirection_tbl[i] = indir[i];
4348 /* Update the hardware */
4349 return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
4352 static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
4354 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
4356 if (nfc->data & RXH_L4_B_2_3)
4357 hash_sets |= HCLGE_D_PORT_BIT;
4359 hash_sets &= ~HCLGE_D_PORT_BIT;
4361 if (nfc->data & RXH_IP_SRC)
4362 hash_sets |= HCLGE_S_IP_BIT;
4364 hash_sets &= ~HCLGE_S_IP_BIT;
4366 if (nfc->data & RXH_IP_DST)
4367 hash_sets |= HCLGE_D_IP_BIT;
4369 hash_sets &= ~HCLGE_D_IP_BIT;
4371 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
4372 hash_sets |= HCLGE_V_TAG_BIT;
4377 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4378 struct ethtool_rxnfc *nfc)
4380 struct hclge_vport *vport = hclge_get_vport(handle);
4381 struct hclge_dev *hdev = vport->back;
4382 struct hclge_rss_input_tuple_cmd *req;
4383 struct hclge_desc desc;
4387 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
4388 RXH_L4_B_0_1 | RXH_L4_B_2_3))
4391 req = (struct hclge_rss_input_tuple_cmd *)desc.data;
4392 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
4394 req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
4395 req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
4396 req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
4397 req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
4398 req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
4399 req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
4400 req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
4401 req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
4403 tuple_sets = hclge_get_rss_hash_bits(nfc);
4404 switch (nfc->flow_type) {
4406 req->ipv4_tcp_en = tuple_sets;
4409 req->ipv6_tcp_en = tuple_sets;
4412 req->ipv4_udp_en = tuple_sets;
4415 req->ipv6_udp_en = tuple_sets;
4418 req->ipv4_sctp_en = tuple_sets;
4421 if ((nfc->data & RXH_L4_B_0_1) ||
4422 (nfc->data & RXH_L4_B_2_3))
4425 req->ipv6_sctp_en = tuple_sets;
4428 req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4431 req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
4437 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4439 dev_err(&hdev->pdev->dev,
4440 "Set rss tuple fail, status = %d\n", ret);
4444 vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
4445 vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
4446 vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
4447 vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
4448 vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
4449 vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
4450 vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
4451 vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
4452 hclge_get_rss_type(vport);
4456 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4457 struct ethtool_rxnfc *nfc)
4459 struct hclge_vport *vport = hclge_get_vport(handle);
4464 switch (nfc->flow_type) {
4466 tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
4469 tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
4472 tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
4475 tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
4478 tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
4481 tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
4485 tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
4494 if (tuple_sets & HCLGE_D_PORT_BIT)
4495 nfc->data |= RXH_L4_B_2_3;
4496 if (tuple_sets & HCLGE_S_PORT_BIT)
4497 nfc->data |= RXH_L4_B_0_1;
4498 if (tuple_sets & HCLGE_D_IP_BIT)
4499 nfc->data |= RXH_IP_DST;
4500 if (tuple_sets & HCLGE_S_IP_BIT)
4501 nfc->data |= RXH_IP_SRC;
4506 static int hclge_get_tc_size(struct hnae3_handle *handle)
4508 struct hclge_vport *vport = hclge_get_vport(handle);
4509 struct hclge_dev *hdev = vport->back;
4511 return hdev->rss_size_max;
4514 int hclge_rss_init_hw(struct hclge_dev *hdev)
4516 struct hclge_vport *vport = hdev->vport;
4517 u8 *rss_indir = vport[0].rss_indirection_tbl;
4518 u16 rss_size = vport[0].alloc_rss_size;
4519 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4520 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4521 u8 *key = vport[0].rss_hash_key;
4522 u8 hfunc = vport[0].rss_algo;
4523 u16 tc_valid[HCLGE_MAX_TC_NUM];
4528 ret = hclge_set_rss_indir_table(hdev, rss_indir);
4532 ret = hclge_set_rss_algo_key(hdev, hfunc, key);
4536 ret = hclge_set_rss_input_tuple(hdev);
4540 /* Each TC have the same queue size, and tc_size set to hardware is
4541 * the log2 of roundup power of two of rss_size, the acutal queue
4542 * size is limited by indirection table.
4544 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
4545 dev_err(&hdev->pdev->dev,
4546 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4551 roundup_size = roundup_pow_of_two(rss_size);
4552 roundup_size = ilog2(roundup_size);
4554 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4557 if (!(hdev->hw_tc_map & BIT(i)))
4561 tc_size[i] = roundup_size;
4562 tc_offset[i] = rss_size * i;
4565 return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
4568 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
4570 struct hclge_vport *vport = hdev->vport;
4573 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
4574 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
4575 vport[j].rss_indirection_tbl[i] =
4576 i % vport[j].alloc_rss_size;
4580 static void hclge_rss_init_cfg(struct hclge_dev *hdev)
4582 int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
4583 struct hclge_vport *vport = hdev->vport;
4585 if (hdev->pdev->revision >= 0x21)
4586 rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
4588 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
4589 vport[i].rss_tuple_sets.ipv4_tcp_en =
4590 HCLGE_RSS_INPUT_TUPLE_OTHER;
4591 vport[i].rss_tuple_sets.ipv4_udp_en =
4592 HCLGE_RSS_INPUT_TUPLE_OTHER;
4593 vport[i].rss_tuple_sets.ipv4_sctp_en =
4594 HCLGE_RSS_INPUT_TUPLE_SCTP;
4595 vport[i].rss_tuple_sets.ipv4_fragment_en =
4596 HCLGE_RSS_INPUT_TUPLE_OTHER;
4597 vport[i].rss_tuple_sets.ipv6_tcp_en =
4598 HCLGE_RSS_INPUT_TUPLE_OTHER;
4599 vport[i].rss_tuple_sets.ipv6_udp_en =
4600 HCLGE_RSS_INPUT_TUPLE_OTHER;
4601 vport[i].rss_tuple_sets.ipv6_sctp_en =
4602 HCLGE_RSS_INPUT_TUPLE_SCTP;
4603 vport[i].rss_tuple_sets.ipv6_fragment_en =
4604 HCLGE_RSS_INPUT_TUPLE_OTHER;
4606 vport[i].rss_algo = rss_algo;
4608 memcpy(vport[i].rss_hash_key, hclge_hash_key,
4609 HCLGE_RSS_KEY_SIZE);
4612 hclge_rss_indir_init_cfg(hdev);
4615 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4616 int vector_id, bool en,
4617 struct hnae3_ring_chain_node *ring_chain)
4619 struct hclge_dev *hdev = vport->back;
4620 struct hnae3_ring_chain_node *node;
4621 struct hclge_desc desc;
4622 struct hclge_ctrl_vector_chain_cmd *req =
4623 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4624 enum hclge_cmd_status status;
4625 enum hclge_opcode_type op;
4626 u16 tqp_type_and_id;
4629 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4630 hclge_cmd_setup_basic_desc(&desc, op, false);
4631 req->int_vector_id = vector_id;
4634 for (node = ring_chain; node; node = node->next) {
4635 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4636 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4638 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4639 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4640 HCLGE_TQP_ID_S, node->tqp_index);
4641 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4643 hnae3_get_field(node->int_gl_idx,
4644 HNAE3_RING_GL_IDX_M,
4645 HNAE3_RING_GL_IDX_S));
4646 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4647 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4648 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4649 req->vfid = vport->vport_id;
4651 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4653 dev_err(&hdev->pdev->dev,
4654 "Map TQP fail, status is %d.\n",
4660 hclge_cmd_setup_basic_desc(&desc,
4663 req->int_vector_id = vector_id;
4668 req->int_cause_num = i;
4669 req->vfid = vport->vport_id;
4670 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4672 dev_err(&hdev->pdev->dev,
4673 "Map TQP fail, status is %d.\n", status);
4681 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4682 struct hnae3_ring_chain_node *ring_chain)
4684 struct hclge_vport *vport = hclge_get_vport(handle);
4685 struct hclge_dev *hdev = vport->back;
4688 vector_id = hclge_get_vector_index(hdev, vector);
4689 if (vector_id < 0) {
4690 dev_err(&hdev->pdev->dev,
4691 "Get vector index fail. vector_id =%d\n", vector_id);
4695 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4698 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4699 struct hnae3_ring_chain_node *ring_chain)
4701 struct hclge_vport *vport = hclge_get_vport(handle);
4702 struct hclge_dev *hdev = vport->back;
4705 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4708 vector_id = hclge_get_vector_index(hdev, vector);
4709 if (vector_id < 0) {
4710 dev_err(&handle->pdev->dev,
4711 "Get vector index fail. ret =%d\n", vector_id);
4715 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4717 dev_err(&handle->pdev->dev,
4718 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4724 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
4725 struct hclge_promisc_param *param)
4727 struct hclge_promisc_cfg_cmd *req;
4728 struct hclge_desc desc;
4731 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4733 req = (struct hclge_promisc_cfg_cmd *)desc.data;
4734 req->vf_id = param->vf_id;
4736 /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
4737 * pdev revision(0x20), new revision support them. The
4738 * value of this two fields will not return error when driver
4739 * send command to fireware in revision(0x20).
4741 req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
4742 HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
4744 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4746 dev_err(&hdev->pdev->dev,
4747 "Set promisc mode fail, status is %d.\n", ret);
4752 static void hclge_promisc_param_init(struct hclge_promisc_param *param,
4753 bool en_uc, bool en_mc, bool en_bc,
4759 memset(param, 0, sizeof(struct hclge_promisc_param));
4761 param->enable = HCLGE_PROMISC_EN_UC;
4763 param->enable |= HCLGE_PROMISC_EN_MC;
4765 param->enable |= HCLGE_PROMISC_EN_BC;
4766 param->vf_id = vport_id;
4769 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4770 bool en_mc_pmc, bool en_bc_pmc)
4772 struct hclge_dev *hdev = vport->back;
4773 struct hclge_promisc_param param;
4775 hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc,
4777 return hclge_cmd_set_promisc_mode(hdev, ¶m);
4780 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4783 struct hclge_vport *vport = hclge_get_vport(handle);
4784 bool en_bc_pmc = true;
4786 /* For revision 0x20, if broadcast promisc enabled, vlan filter is
4787 * always bypassed. So broadcast promisc should be disabled until
4788 * user enable promisc mode
4790 if (handle->pdev->revision == 0x20)
4791 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4793 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4797 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
4799 struct hclge_get_fd_mode_cmd *req;
4800 struct hclge_desc desc;
4803 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
4805 req = (struct hclge_get_fd_mode_cmd *)desc.data;
4807 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4809 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
4813 *fd_mode = req->mode;
4818 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
4819 u32 *stage1_entry_num,
4820 u32 *stage2_entry_num,
4821 u16 *stage1_counter_num,
4822 u16 *stage2_counter_num)
4824 struct hclge_get_fd_allocation_cmd *req;
4825 struct hclge_desc desc;
4828 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
4830 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
4832 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4834 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
4839 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
4840 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
4841 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
4842 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
4847 static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
4849 struct hclge_set_fd_key_config_cmd *req;
4850 struct hclge_fd_key_cfg *stage;
4851 struct hclge_desc desc;
4854 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
4856 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
4857 stage = &hdev->fd_cfg.key_cfg[stage_num];
4858 req->stage = stage_num;
4859 req->key_select = stage->key_sel;
4860 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
4861 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
4862 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
4863 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
4864 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
4865 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
4867 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4869 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
4874 static int hclge_init_fd_config(struct hclge_dev *hdev)
4876 #define LOW_2_WORDS 0x03
4877 struct hclge_fd_key_cfg *key_cfg;
4880 if (!hnae3_dev_fd_supported(hdev))
4883 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
4887 switch (hdev->fd_cfg.fd_mode) {
4888 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
4889 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
4891 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
4892 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
4895 dev_err(&hdev->pdev->dev,
4896 "Unsupported flow director mode %u\n",
4897 hdev->fd_cfg.fd_mode);
4901 hdev->fd_cfg.proto_support =
4902 TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
4903 UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
4904 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
4905 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
4906 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
4907 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
4908 key_cfg->outer_sipv6_word_en = 0;
4909 key_cfg->outer_dipv6_word_en = 0;
4911 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
4912 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
4913 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
4914 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
4916 /* If use max 400bit key, we can support tuples for ether type */
4917 if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
4918 hdev->fd_cfg.proto_support |= ETHER_FLOW;
4919 key_cfg->tuple_active |=
4920 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
4923 /* roce_type is used to filter roce frames
4924 * dst_vport is used to specify the rule
4926 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
4928 ret = hclge_get_fd_allocation(hdev,
4929 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
4930 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
4931 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
4932 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
4936 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
4939 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
4940 int loc, u8 *key, bool is_add)
4942 struct hclge_fd_tcam_config_1_cmd *req1;
4943 struct hclge_fd_tcam_config_2_cmd *req2;
4944 struct hclge_fd_tcam_config_3_cmd *req3;
4945 struct hclge_desc desc[3];
4948 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
4949 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4950 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
4951 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
4952 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
4954 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
4955 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
4956 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
4958 req1->stage = stage;
4959 req1->xy_sel = sel_x ? 1 : 0;
4960 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
4961 req1->index = cpu_to_le32(loc);
4962 req1->entry_vld = sel_x ? is_add : 0;
4965 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
4966 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
4967 sizeof(req2->tcam_data));
4968 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
4969 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
4972 ret = hclge_cmd_send(&hdev->hw, desc, 3);
4974 dev_err(&hdev->pdev->dev,
4975 "config tcam key fail, ret=%d\n",
4981 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
4982 struct hclge_fd_ad_data *action)
4984 struct hclge_fd_ad_config_cmd *req;
4985 struct hclge_desc desc;
4989 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
4991 req = (struct hclge_fd_ad_config_cmd *)desc.data;
4992 req->index = cpu_to_le32(loc);
4995 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
4996 action->write_rule_id_to_bd);
4997 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5000 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5001 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5002 action->forward_to_direct_queue);
5003 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5005 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5006 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5007 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5008 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5009 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5010 action->counter_id);
5012 req->ad_data = cpu_to_le64(ad_data);
5013 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5015 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5020 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5021 struct hclge_fd_rule *rule)
5023 u16 tmp_x_s, tmp_y_s;
5024 u32 tmp_x_l, tmp_y_l;
5027 if (rule->unused_tuple & tuple_bit)
5030 switch (tuple_bit) {
5033 case BIT(INNER_DST_MAC):
5034 for (i = 0; i < ETH_ALEN; i++) {
5035 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5036 rule->tuples_mask.dst_mac[i]);
5037 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
5038 rule->tuples_mask.dst_mac[i]);
5042 case BIT(INNER_SRC_MAC):
5043 for (i = 0; i < ETH_ALEN; i++) {
5044 calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5045 rule->tuples.src_mac[i]);
5046 calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
5047 rule->tuples.src_mac[i]);
5051 case BIT(INNER_VLAN_TAG_FST):
5052 calc_x(tmp_x_s, rule->tuples.vlan_tag1,
5053 rule->tuples_mask.vlan_tag1);
5054 calc_y(tmp_y_s, rule->tuples.vlan_tag1,
5055 rule->tuples_mask.vlan_tag1);
5056 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5057 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5060 case BIT(INNER_ETH_TYPE):
5061 calc_x(tmp_x_s, rule->tuples.ether_proto,
5062 rule->tuples_mask.ether_proto);
5063 calc_y(tmp_y_s, rule->tuples.ether_proto,
5064 rule->tuples_mask.ether_proto);
5065 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5066 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5069 case BIT(INNER_IP_TOS):
5070 calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5071 calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
5074 case BIT(INNER_IP_PROTO):
5075 calc_x(*key_x, rule->tuples.ip_proto,
5076 rule->tuples_mask.ip_proto);
5077 calc_y(*key_y, rule->tuples.ip_proto,
5078 rule->tuples_mask.ip_proto);
5081 case BIT(INNER_SRC_IP):
5082 calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
5083 rule->tuples_mask.src_ip[IPV4_INDEX]);
5084 calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
5085 rule->tuples_mask.src_ip[IPV4_INDEX]);
5086 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5087 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5090 case BIT(INNER_DST_IP):
5091 calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
5092 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5093 calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
5094 rule->tuples_mask.dst_ip[IPV4_INDEX]);
5095 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5096 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5099 case BIT(INNER_SRC_PORT):
5100 calc_x(tmp_x_s, rule->tuples.src_port,
5101 rule->tuples_mask.src_port);
5102 calc_y(tmp_y_s, rule->tuples.src_port,
5103 rule->tuples_mask.src_port);
5104 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5105 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5108 case BIT(INNER_DST_PORT):
5109 calc_x(tmp_x_s, rule->tuples.dst_port,
5110 rule->tuples_mask.dst_port);
5111 calc_y(tmp_y_s, rule->tuples.dst_port,
5112 rule->tuples_mask.dst_port);
5113 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5114 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5122 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5123 u8 vf_id, u8 network_port_id)
5125 u32 port_number = 0;
5127 if (port_type == HOST_PORT) {
5128 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5130 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5132 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5134 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5135 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5136 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5142 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5143 __le32 *key_x, __le32 *key_y,
5144 struct hclge_fd_rule *rule)
5146 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5147 u8 cur_pos = 0, tuple_size, shift_bits;
5150 for (i = 0; i < MAX_META_DATA; i++) {
5151 tuple_size = meta_data_key_info[i].key_length;
5152 tuple_bit = key_cfg->meta_data_active & BIT(i);
5154 switch (tuple_bit) {
5155 case BIT(ROCE_TYPE):
5156 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5157 cur_pos += tuple_size;
5159 case BIT(DST_VPORT):
5160 port_number = hclge_get_port_number(HOST_PORT, 0,
5162 hnae3_set_field(meta_data,
5163 GENMASK(cur_pos + tuple_size, cur_pos),
5164 cur_pos, port_number);
5165 cur_pos += tuple_size;
5172 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5173 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5174 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5176 *key_x = cpu_to_le32(tmp_x << shift_bits);
5177 *key_y = cpu_to_le32(tmp_y << shift_bits);
5180 /* A complete key is combined with meta data key and tuple key.
5181 * Meta data key is stored at the MSB region, and tuple key is stored at
5182 * the LSB region, unused bits will be filled 0.
5184 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5185 struct hclge_fd_rule *rule)
5187 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5188 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5189 u8 *cur_key_x, *cur_key_y;
5191 int ret, tuple_size;
5192 u8 meta_data_region;
5194 memset(key_x, 0, sizeof(key_x));
5195 memset(key_y, 0, sizeof(key_y));
5199 for (i = 0 ; i < MAX_TUPLE; i++) {
5203 tuple_size = tuple_key_info[i].key_length / 8;
5204 check_tuple = key_cfg->tuple_active & BIT(i);
5206 tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
5209 cur_key_x += tuple_size;
5210 cur_key_y += tuple_size;
5214 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5215 MAX_META_DATA_LENGTH / 8;
5217 hclge_fd_convert_meta_data(key_cfg,
5218 (__le32 *)(key_x + meta_data_region),
5219 (__le32 *)(key_y + meta_data_region),
5222 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5225 dev_err(&hdev->pdev->dev,
5226 "fd key_y config fail, loc=%u, ret=%d\n",
5227 rule->queue_id, ret);
5231 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5234 dev_err(&hdev->pdev->dev,
5235 "fd key_x config fail, loc=%u, ret=%d\n",
5236 rule->queue_id, ret);
5240 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5241 struct hclge_fd_rule *rule)
5243 struct hclge_fd_ad_data ad_data;
5245 ad_data.ad_id = rule->location;
5247 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5248 ad_data.drop_packet = true;
5249 ad_data.forward_to_direct_queue = false;
5250 ad_data.queue_id = 0;
5252 ad_data.drop_packet = false;
5253 ad_data.forward_to_direct_queue = true;
5254 ad_data.queue_id = rule->queue_id;
5257 ad_data.use_counter = false;
5258 ad_data.counter_id = 0;
5260 ad_data.use_next_stage = false;
5261 ad_data.next_input_key = 0;
5263 ad_data.write_rule_id_to_bd = true;
5264 ad_data.rule_id = rule->location;
5266 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5269 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5270 struct ethtool_rx_flow_spec *fs, u32 *unused)
5272 struct ethtool_tcpip4_spec *tcp_ip4_spec;
5273 struct ethtool_usrip4_spec *usr_ip4_spec;
5274 struct ethtool_tcpip6_spec *tcp_ip6_spec;
5275 struct ethtool_usrip6_spec *usr_ip6_spec;
5276 struct ethhdr *ether_spec;
5278 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5281 if (!(fs->flow_type & hdev->fd_cfg.proto_support))
5284 if ((fs->flow_type & FLOW_EXT) &&
5285 (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
5286 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5290 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5294 tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
5295 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5297 if (!tcp_ip4_spec->ip4src)
5298 *unused |= BIT(INNER_SRC_IP);
5300 if (!tcp_ip4_spec->ip4dst)
5301 *unused |= BIT(INNER_DST_IP);
5303 if (!tcp_ip4_spec->psrc)
5304 *unused |= BIT(INNER_SRC_PORT);
5306 if (!tcp_ip4_spec->pdst)
5307 *unused |= BIT(INNER_DST_PORT);
5309 if (!tcp_ip4_spec->tos)
5310 *unused |= BIT(INNER_IP_TOS);
5314 usr_ip4_spec = &fs->h_u.usr_ip4_spec;
5315 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5316 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5318 if (!usr_ip4_spec->ip4src)
5319 *unused |= BIT(INNER_SRC_IP);
5321 if (!usr_ip4_spec->ip4dst)
5322 *unused |= BIT(INNER_DST_IP);
5324 if (!usr_ip4_spec->tos)
5325 *unused |= BIT(INNER_IP_TOS);
5327 if (!usr_ip4_spec->proto)
5328 *unused |= BIT(INNER_IP_PROTO);
5330 if (usr_ip4_spec->l4_4_bytes)
5333 if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
5340 tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
5341 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5344 /* check whether src/dst ip address used */
5345 if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
5346 !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
5347 *unused |= BIT(INNER_SRC_IP);
5349 if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
5350 !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
5351 *unused |= BIT(INNER_DST_IP);
5353 if (!tcp_ip6_spec->psrc)
5354 *unused |= BIT(INNER_SRC_PORT);
5356 if (!tcp_ip6_spec->pdst)
5357 *unused |= BIT(INNER_DST_PORT);
5359 if (tcp_ip6_spec->tclass)
5363 case IPV6_USER_FLOW:
5364 usr_ip6_spec = &fs->h_u.usr_ip6_spec;
5365 *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5366 BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
5367 BIT(INNER_DST_PORT);
5369 /* check whether src/dst ip address used */
5370 if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
5371 !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
5372 *unused |= BIT(INNER_SRC_IP);
5374 if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
5375 !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
5376 *unused |= BIT(INNER_DST_IP);
5378 if (!usr_ip6_spec->l4_proto)
5379 *unused |= BIT(INNER_IP_PROTO);
5381 if (usr_ip6_spec->tclass)
5384 if (usr_ip6_spec->l4_4_bytes)
5389 ether_spec = &fs->h_u.ether_spec;
5390 *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5391 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5392 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5394 if (is_zero_ether_addr(ether_spec->h_source))
5395 *unused |= BIT(INNER_SRC_MAC);
5397 if (is_zero_ether_addr(ether_spec->h_dest))
5398 *unused |= BIT(INNER_DST_MAC);
5400 if (!ether_spec->h_proto)
5401 *unused |= BIT(INNER_ETH_TYPE);
5408 if ((fs->flow_type & FLOW_EXT)) {
5409 if (fs->h_ext.vlan_etype)
5411 if (!fs->h_ext.vlan_tci)
5412 *unused |= BIT(INNER_VLAN_TAG_FST);
5414 if (fs->m_ext.vlan_tci) {
5415 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
5419 *unused |= BIT(INNER_VLAN_TAG_FST);
5422 if (fs->flow_type & FLOW_MAC_EXT) {
5423 if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
5426 if (is_zero_ether_addr(fs->h_ext.h_dest))
5427 *unused |= BIT(INNER_DST_MAC);
5429 *unused &= ~(BIT(INNER_DST_MAC));
5435 static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
5437 struct hclge_fd_rule *rule = NULL;
5438 struct hlist_node *node2;
5440 spin_lock_bh(&hdev->fd_rule_lock);
5441 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5442 if (rule->location >= location)
5446 spin_unlock_bh(&hdev->fd_rule_lock);
5448 return rule && rule->location == location;
5451 /* make sure being called after lock up with fd_rule_lock */
5452 static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
5453 struct hclge_fd_rule *new_rule,
5457 struct hclge_fd_rule *rule = NULL, *parent = NULL;
5458 struct hlist_node *node2;
5460 if (is_add && !new_rule)
5463 hlist_for_each_entry_safe(rule, node2,
5464 &hdev->fd_rule_list, rule_node) {
5465 if (rule->location >= location)
5470 if (rule && rule->location == location) {
5471 hlist_del(&rule->rule_node);
5473 hdev->hclge_fd_rule_num--;
5476 if (!hdev->hclge_fd_rule_num)
5477 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5478 clear_bit(location, hdev->fd_bmap);
5482 } else if (!is_add) {
5483 dev_err(&hdev->pdev->dev,
5484 "delete fail, rule %u is inexistent\n",
5489 INIT_HLIST_NODE(&new_rule->rule_node);
5492 hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
5494 hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
5496 set_bit(location, hdev->fd_bmap);
5497 hdev->hclge_fd_rule_num++;
5498 hdev->fd_active_type = new_rule->rule_type;
5503 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
5504 struct ethtool_rx_flow_spec *fs,
5505 struct hclge_fd_rule *rule)
5507 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5509 switch (flow_type) {
5513 rule->tuples.src_ip[IPV4_INDEX] =
5514 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
5515 rule->tuples_mask.src_ip[IPV4_INDEX] =
5516 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
5518 rule->tuples.dst_ip[IPV4_INDEX] =
5519 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
5520 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5521 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
5523 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
5524 rule->tuples_mask.src_port =
5525 be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
5527 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
5528 rule->tuples_mask.dst_port =
5529 be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
5531 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
5532 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
5534 rule->tuples.ether_proto = ETH_P_IP;
5535 rule->tuples_mask.ether_proto = 0xFFFF;
5539 rule->tuples.src_ip[IPV4_INDEX] =
5540 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
5541 rule->tuples_mask.src_ip[IPV4_INDEX] =
5542 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
5544 rule->tuples.dst_ip[IPV4_INDEX] =
5545 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
5546 rule->tuples_mask.dst_ip[IPV4_INDEX] =
5547 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
5549 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
5550 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
5552 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
5553 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
5555 rule->tuples.ether_proto = ETH_P_IP;
5556 rule->tuples_mask.ether_proto = 0xFFFF;
5562 be32_to_cpu_array(rule->tuples.src_ip,
5563 fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5564 be32_to_cpu_array(rule->tuples_mask.src_ip,
5565 fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
5567 be32_to_cpu_array(rule->tuples.dst_ip,
5568 fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5569 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5570 fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
5572 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
5573 rule->tuples_mask.src_port =
5574 be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
5576 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
5577 rule->tuples_mask.dst_port =
5578 be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
5580 rule->tuples.ether_proto = ETH_P_IPV6;
5581 rule->tuples_mask.ether_proto = 0xFFFF;
5584 case IPV6_USER_FLOW:
5585 be32_to_cpu_array(rule->tuples.src_ip,
5586 fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5587 be32_to_cpu_array(rule->tuples_mask.src_ip,
5588 fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
5590 be32_to_cpu_array(rule->tuples.dst_ip,
5591 fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5592 be32_to_cpu_array(rule->tuples_mask.dst_ip,
5593 fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
5595 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
5596 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
5598 rule->tuples.ether_proto = ETH_P_IPV6;
5599 rule->tuples_mask.ether_proto = 0xFFFF;
5603 ether_addr_copy(rule->tuples.src_mac,
5604 fs->h_u.ether_spec.h_source);
5605 ether_addr_copy(rule->tuples_mask.src_mac,
5606 fs->m_u.ether_spec.h_source);
5608 ether_addr_copy(rule->tuples.dst_mac,
5609 fs->h_u.ether_spec.h_dest);
5610 ether_addr_copy(rule->tuples_mask.dst_mac,
5611 fs->m_u.ether_spec.h_dest);
5613 rule->tuples.ether_proto =
5614 be16_to_cpu(fs->h_u.ether_spec.h_proto);
5615 rule->tuples_mask.ether_proto =
5616 be16_to_cpu(fs->m_u.ether_spec.h_proto);
5623 switch (flow_type) {
5626 rule->tuples.ip_proto = IPPROTO_SCTP;
5627 rule->tuples_mask.ip_proto = 0xFF;
5631 rule->tuples.ip_proto = IPPROTO_TCP;
5632 rule->tuples_mask.ip_proto = 0xFF;
5636 rule->tuples.ip_proto = IPPROTO_UDP;
5637 rule->tuples_mask.ip_proto = 0xFF;
5643 if ((fs->flow_type & FLOW_EXT)) {
5644 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
5645 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
5648 if (fs->flow_type & FLOW_MAC_EXT) {
5649 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
5650 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
5656 /* make sure being called after lock up with fd_rule_lock */
5657 static int hclge_fd_config_rule(struct hclge_dev *hdev,
5658 struct hclge_fd_rule *rule)
5663 dev_err(&hdev->pdev->dev,
5664 "The flow director rule is NULL\n");
5668 /* it will never fail here, so needn't to check return value */
5669 hclge_fd_update_rule_list(hdev, rule, rule->location, true);
5671 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5675 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5682 hclge_fd_update_rule_list(hdev, rule, rule->location, false);
5686 static int hclge_add_fd_entry(struct hnae3_handle *handle,
5687 struct ethtool_rxnfc *cmd)
5689 struct hclge_vport *vport = hclge_get_vport(handle);
5690 struct hclge_dev *hdev = vport->back;
5691 u16 dst_vport_id = 0, q_index = 0;
5692 struct ethtool_rx_flow_spec *fs;
5693 struct hclge_fd_rule *rule;
5698 if (!hnae3_dev_fd_supported(hdev))
5702 dev_warn(&hdev->pdev->dev,
5703 "Please enable flow director first\n");
5707 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5709 ret = hclge_fd_check_spec(hdev, fs, &unused);
5711 dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
5715 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
5716 action = HCLGE_FD_ACTION_DROP_PACKET;
5718 u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
5719 u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
5722 if (vf > hdev->num_req_vfs) {
5723 dev_err(&hdev->pdev->dev,
5724 "Error: vf id (%u) > max vf num (%u)\n",
5725 vf, hdev->num_req_vfs);
5729 dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
5730 tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
5733 dev_err(&hdev->pdev->dev,
5734 "Error: queue id (%u) > max tqp num (%u)\n",
5739 action = HCLGE_FD_ACTION_ACCEPT_PACKET;
5743 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
5747 ret = hclge_fd_get_tuple(hdev, fs, rule);
5753 rule->flow_type = fs->flow_type;
5755 rule->location = fs->location;
5756 rule->unused_tuple = unused;
5757 rule->vf_id = dst_vport_id;
5758 rule->queue_id = q_index;
5759 rule->action = action;
5760 rule->rule_type = HCLGE_FD_EP_ACTIVE;
5762 /* to avoid rule conflict, when user configure rule by ethtool,
5763 * we need to clear all arfs rules
5765 hclge_clear_arfs_rules(handle);
5767 spin_lock_bh(&hdev->fd_rule_lock);
5768 ret = hclge_fd_config_rule(hdev, rule);
5770 spin_unlock_bh(&hdev->fd_rule_lock);
5775 static int hclge_del_fd_entry(struct hnae3_handle *handle,
5776 struct ethtool_rxnfc *cmd)
5778 struct hclge_vport *vport = hclge_get_vport(handle);
5779 struct hclge_dev *hdev = vport->back;
5780 struct ethtool_rx_flow_spec *fs;
5783 if (!hnae3_dev_fd_supported(hdev))
5786 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5788 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5791 if (!hclge_fd_rule_exist(hdev, fs->location)) {
5792 dev_err(&hdev->pdev->dev,
5793 "Delete fail, rule %u is inexistent\n", fs->location);
5797 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
5802 spin_lock_bh(&hdev->fd_rule_lock);
5803 ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
5805 spin_unlock_bh(&hdev->fd_rule_lock);
5810 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
5813 struct hclge_vport *vport = hclge_get_vport(handle);
5814 struct hclge_dev *hdev = vport->back;
5815 struct hclge_fd_rule *rule;
5816 struct hlist_node *node;
5819 if (!hnae3_dev_fd_supported(hdev))
5822 spin_lock_bh(&hdev->fd_rule_lock);
5823 for_each_set_bit(location, hdev->fd_bmap,
5824 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
5825 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
5829 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
5831 hlist_del(&rule->rule_node);
5834 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5835 hdev->hclge_fd_rule_num = 0;
5836 bitmap_zero(hdev->fd_bmap,
5837 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
5840 spin_unlock_bh(&hdev->fd_rule_lock);
5843 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
5845 struct hclge_vport *vport = hclge_get_vport(handle);
5846 struct hclge_dev *hdev = vport->back;
5847 struct hclge_fd_rule *rule;
5848 struct hlist_node *node;
5851 /* Return ok here, because reset error handling will check this
5852 * return value. If error is returned here, the reset process will
5855 if (!hnae3_dev_fd_supported(hdev))
5858 /* if fd is disabled, should not restore it when reset */
5862 spin_lock_bh(&hdev->fd_rule_lock);
5863 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
5864 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
5866 ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
5869 dev_warn(&hdev->pdev->dev,
5870 "Restore rule %u failed, remove it\n",
5872 clear_bit(rule->location, hdev->fd_bmap);
5873 hlist_del(&rule->rule_node);
5875 hdev->hclge_fd_rule_num--;
5879 if (hdev->hclge_fd_rule_num)
5880 hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
5882 spin_unlock_bh(&hdev->fd_rule_lock);
5887 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
5888 struct ethtool_rxnfc *cmd)
5890 struct hclge_vport *vport = hclge_get_vport(handle);
5891 struct hclge_dev *hdev = vport->back;
5893 if (!hnae3_dev_fd_supported(hdev))
5896 cmd->rule_cnt = hdev->hclge_fd_rule_num;
5897 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
5902 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
5903 struct ethtool_rxnfc *cmd)
5905 struct hclge_vport *vport = hclge_get_vport(handle);
5906 struct hclge_fd_rule *rule = NULL;
5907 struct hclge_dev *hdev = vport->back;
5908 struct ethtool_rx_flow_spec *fs;
5909 struct hlist_node *node2;
5911 if (!hnae3_dev_fd_supported(hdev))
5914 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
5916 spin_lock_bh(&hdev->fd_rule_lock);
5918 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
5919 if (rule->location >= fs->location)
5923 if (!rule || fs->location != rule->location) {
5924 spin_unlock_bh(&hdev->fd_rule_lock);
5929 fs->flow_type = rule->flow_type;
5930 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
5934 fs->h_u.tcp_ip4_spec.ip4src =
5935 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5936 fs->m_u.tcp_ip4_spec.ip4src =
5937 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5938 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5940 fs->h_u.tcp_ip4_spec.ip4dst =
5941 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5942 fs->m_u.tcp_ip4_spec.ip4dst =
5943 rule->unused_tuple & BIT(INNER_DST_IP) ?
5944 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5946 fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
5947 fs->m_u.tcp_ip4_spec.psrc =
5948 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
5949 0 : cpu_to_be16(rule->tuples_mask.src_port);
5951 fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
5952 fs->m_u.tcp_ip4_spec.pdst =
5953 rule->unused_tuple & BIT(INNER_DST_PORT) ?
5954 0 : cpu_to_be16(rule->tuples_mask.dst_port);
5956 fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
5957 fs->m_u.tcp_ip4_spec.tos =
5958 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5959 0 : rule->tuples_mask.ip_tos;
5963 fs->h_u.usr_ip4_spec.ip4src =
5964 cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
5965 fs->m_u.tcp_ip4_spec.ip4src =
5966 rule->unused_tuple & BIT(INNER_SRC_IP) ?
5967 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
5969 fs->h_u.usr_ip4_spec.ip4dst =
5970 cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
5971 fs->m_u.usr_ip4_spec.ip4dst =
5972 rule->unused_tuple & BIT(INNER_DST_IP) ?
5973 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
5975 fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
5976 fs->m_u.usr_ip4_spec.tos =
5977 rule->unused_tuple & BIT(INNER_IP_TOS) ?
5978 0 : rule->tuples_mask.ip_tos;
5980 fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
5981 fs->m_u.usr_ip4_spec.proto =
5982 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
5983 0 : rule->tuples_mask.ip_proto;
5985 fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
5991 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
5992 rule->tuples.src_ip, IPV6_SIZE);
5993 if (rule->unused_tuple & BIT(INNER_SRC_IP))
5994 memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
5995 sizeof(int) * IPV6_SIZE);
5997 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
5998 rule->tuples_mask.src_ip, IPV6_SIZE);
6000 cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
6001 rule->tuples.dst_ip, IPV6_SIZE);
6002 if (rule->unused_tuple & BIT(INNER_DST_IP))
6003 memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
6004 sizeof(int) * IPV6_SIZE);
6006 cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
6007 rule->tuples_mask.dst_ip, IPV6_SIZE);
6009 fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
6010 fs->m_u.tcp_ip6_spec.psrc =
6011 rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6012 0 : cpu_to_be16(rule->tuples_mask.src_port);
6014 fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
6015 fs->m_u.tcp_ip6_spec.pdst =
6016 rule->unused_tuple & BIT(INNER_DST_PORT) ?
6017 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6020 case IPV6_USER_FLOW:
6021 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
6022 rule->tuples.src_ip, IPV6_SIZE);
6023 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6024 memset(fs->m_u.usr_ip6_spec.ip6src, 0,
6025 sizeof(int) * IPV6_SIZE);
6027 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
6028 rule->tuples_mask.src_ip, IPV6_SIZE);
6030 cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
6031 rule->tuples.dst_ip, IPV6_SIZE);
6032 if (rule->unused_tuple & BIT(INNER_DST_IP))
6033 memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
6034 sizeof(int) * IPV6_SIZE);
6036 cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
6037 rule->tuples_mask.dst_ip, IPV6_SIZE);
6039 fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
6040 fs->m_u.usr_ip6_spec.l4_proto =
6041 rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6042 0 : rule->tuples_mask.ip_proto;
6046 ether_addr_copy(fs->h_u.ether_spec.h_source,
6047 rule->tuples.src_mac);
6048 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6049 eth_zero_addr(fs->m_u.ether_spec.h_source);
6051 ether_addr_copy(fs->m_u.ether_spec.h_source,
6052 rule->tuples_mask.src_mac);
6054 ether_addr_copy(fs->h_u.ether_spec.h_dest,
6055 rule->tuples.dst_mac);
6056 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6057 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6059 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6060 rule->tuples_mask.dst_mac);
6062 fs->h_u.ether_spec.h_proto =
6063 cpu_to_be16(rule->tuples.ether_proto);
6064 fs->m_u.ether_spec.h_proto =
6065 rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6066 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6070 spin_unlock_bh(&hdev->fd_rule_lock);
6074 if (fs->flow_type & FLOW_EXT) {
6075 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6076 fs->m_ext.vlan_tci =
6077 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6078 cpu_to_be16(VLAN_VID_MASK) :
6079 cpu_to_be16(rule->tuples_mask.vlan_tag1);
6082 if (fs->flow_type & FLOW_MAC_EXT) {
6083 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6084 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6085 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6087 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6088 rule->tuples_mask.dst_mac);
6091 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6092 fs->ring_cookie = RX_CLS_FLOW_DISC;
6096 fs->ring_cookie = rule->queue_id;
6097 vf_id = rule->vf_id;
6098 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6099 fs->ring_cookie |= vf_id;
6102 spin_unlock_bh(&hdev->fd_rule_lock);
6107 static int hclge_get_all_rules(struct hnae3_handle *handle,
6108 struct ethtool_rxnfc *cmd, u32 *rule_locs)
6110 struct hclge_vport *vport = hclge_get_vport(handle);
6111 struct hclge_dev *hdev = vport->back;
6112 struct hclge_fd_rule *rule;
6113 struct hlist_node *node2;
6116 if (!hnae3_dev_fd_supported(hdev))
6119 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6121 spin_lock_bh(&hdev->fd_rule_lock);
6122 hlist_for_each_entry_safe(rule, node2,
6123 &hdev->fd_rule_list, rule_node) {
6124 if (cnt == cmd->rule_cnt) {
6125 spin_unlock_bh(&hdev->fd_rule_lock);
6129 rule_locs[cnt] = rule->location;
6133 spin_unlock_bh(&hdev->fd_rule_lock);
6135 cmd->rule_cnt = cnt;
6140 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6141 struct hclge_fd_rule_tuples *tuples)
6143 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6144 tuples->ip_proto = fkeys->basic.ip_proto;
6145 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6147 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6148 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6149 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6151 memcpy(tuples->src_ip,
6152 fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
6153 sizeof(tuples->src_ip));
6154 memcpy(tuples->dst_ip,
6155 fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
6156 sizeof(tuples->dst_ip));
6160 /* traverse all rules, check whether an existed rule has the same tuples */
6161 static struct hclge_fd_rule *
6162 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6163 const struct hclge_fd_rule_tuples *tuples)
6165 struct hclge_fd_rule *rule = NULL;
6166 struct hlist_node *node;
6168 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6169 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6176 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6177 struct hclge_fd_rule *rule)
6179 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6180 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6181 BIT(INNER_SRC_PORT);
6184 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6185 if (tuples->ether_proto == ETH_P_IP) {
6186 if (tuples->ip_proto == IPPROTO_TCP)
6187 rule->flow_type = TCP_V4_FLOW;
6189 rule->flow_type = UDP_V4_FLOW;
6191 if (tuples->ip_proto == IPPROTO_TCP)
6192 rule->flow_type = TCP_V6_FLOW;
6194 rule->flow_type = UDP_V6_FLOW;
6196 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6197 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6200 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6201 u16 flow_id, struct flow_keys *fkeys)
6203 struct hclge_vport *vport = hclge_get_vport(handle);
6204 struct hclge_fd_rule_tuples new_tuples;
6205 struct hclge_dev *hdev = vport->back;
6206 struct hclge_fd_rule *rule;
6211 if (!hnae3_dev_fd_supported(hdev))
6214 memset(&new_tuples, 0, sizeof(new_tuples));
6215 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6217 spin_lock_bh(&hdev->fd_rule_lock);
6219 /* when there is already fd rule existed add by user,
6220 * arfs should not work
6222 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
6223 spin_unlock_bh(&hdev->fd_rule_lock);
6228 /* check is there flow director filter existed for this flow,
6229 * if not, create a new filter for it;
6230 * if filter exist with different queue id, modify the filter;
6231 * if filter exist with same queue id, do nothing
6233 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6235 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6236 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6237 spin_unlock_bh(&hdev->fd_rule_lock);
6242 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6244 spin_unlock_bh(&hdev->fd_rule_lock);
6249 set_bit(bit_id, hdev->fd_bmap);
6250 rule->location = bit_id;
6251 rule->flow_id = flow_id;
6252 rule->queue_id = queue_id;
6253 hclge_fd_build_arfs_rule(&new_tuples, rule);
6254 ret = hclge_fd_config_rule(hdev, rule);
6256 spin_unlock_bh(&hdev->fd_rule_lock);
6261 return rule->location;
6264 spin_unlock_bh(&hdev->fd_rule_lock);
6266 if (rule->queue_id == queue_id)
6267 return rule->location;
6269 tmp_queue_id = rule->queue_id;
6270 rule->queue_id = queue_id;
6271 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6273 rule->queue_id = tmp_queue_id;
6277 return rule->location;
6280 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6282 #ifdef CONFIG_RFS_ACCEL
6283 struct hnae3_handle *handle = &hdev->vport[0].nic;
6284 struct hclge_fd_rule *rule;
6285 struct hlist_node *node;
6286 HLIST_HEAD(del_list);
6288 spin_lock_bh(&hdev->fd_rule_lock);
6289 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6290 spin_unlock_bh(&hdev->fd_rule_lock);
6293 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6294 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6295 rule->flow_id, rule->location)) {
6296 hlist_del_init(&rule->rule_node);
6297 hlist_add_head(&rule->rule_node, &del_list);
6298 hdev->hclge_fd_rule_num--;
6299 clear_bit(rule->location, hdev->fd_bmap);
6302 spin_unlock_bh(&hdev->fd_rule_lock);
6304 hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
6305 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6306 rule->location, NULL, false);
6312 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
6314 #ifdef CONFIG_RFS_ACCEL
6315 struct hclge_vport *vport = hclge_get_vport(handle);
6316 struct hclge_dev *hdev = vport->back;
6318 if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
6319 hclge_del_all_fd_entries(handle, true);
6323 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
6325 struct hclge_vport *vport = hclge_get_vport(handle);
6326 struct hclge_dev *hdev = vport->back;
6328 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
6329 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
6332 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
6334 struct hclge_vport *vport = hclge_get_vport(handle);
6335 struct hclge_dev *hdev = vport->back;
6337 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
6340 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
6342 struct hclge_vport *vport = hclge_get_vport(handle);
6343 struct hclge_dev *hdev = vport->back;
6345 return hdev->rst_stats.hw_reset_done_cnt;
6348 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
6350 struct hclge_vport *vport = hclge_get_vport(handle);
6351 struct hclge_dev *hdev = vport->back;
6354 hdev->fd_en = enable;
6355 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
6357 hclge_del_all_fd_entries(handle, clear);
6359 hclge_restore_fd_entries(handle);
6362 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
6364 struct hclge_desc desc;
6365 struct hclge_config_mac_mode_cmd *req =
6366 (struct hclge_config_mac_mode_cmd *)desc.data;
6370 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
6373 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
6374 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
6375 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
6376 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
6377 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
6378 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
6379 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
6380 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
6381 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
6382 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
6385 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6387 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6389 dev_err(&hdev->pdev->dev,
6390 "mac enable fail, ret =%d.\n", ret);
6393 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
6394 u8 switch_param, u8 param_mask)
6396 struct hclge_mac_vlan_switch_cmd *req;
6397 struct hclge_desc desc;
6401 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
6402 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
6404 /* read current config parameter */
6405 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
6407 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
6408 req->func_id = cpu_to_le32(func_id);
6410 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6412 dev_err(&hdev->pdev->dev,
6413 "read mac vlan switch parameter fail, ret = %d\n", ret);
6417 /* modify and write new config parameter */
6418 hclge_cmd_reuse_desc(&desc, false);
6419 req->switch_param = (req->switch_param & param_mask) | switch_param;
6420 req->param_mask = param_mask;
6422 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6424 dev_err(&hdev->pdev->dev,
6425 "set mac vlan switch parameter fail, ret = %d\n", ret);
6429 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
6432 #define HCLGE_PHY_LINK_STATUS_NUM 200
6434 struct phy_device *phydev = hdev->hw.mac.phydev;
6439 ret = phy_read_status(phydev);
6441 dev_err(&hdev->pdev->dev,
6442 "phy update link status fail, ret = %d\n", ret);
6446 if (phydev->link == link_ret)
6449 msleep(HCLGE_LINK_STATUS_MS);
6450 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
6453 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
6455 #define HCLGE_MAC_LINK_STATUS_NUM 100
6461 ret = hclge_get_mac_link_status(hdev);
6464 else if (ret == link_ret)
6467 msleep(HCLGE_LINK_STATUS_MS);
6468 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
6472 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
6475 #define HCLGE_LINK_STATUS_DOWN 0
6476 #define HCLGE_LINK_STATUS_UP 1
6480 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
6483 hclge_phy_link_status_wait(hdev, link_ret);
6485 return hclge_mac_link_status_wait(hdev, link_ret);
6488 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
6490 struct hclge_config_mac_mode_cmd *req;
6491 struct hclge_desc desc;
6495 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
6496 /* 1 Read out the MAC mode config at first */
6497 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
6498 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6500 dev_err(&hdev->pdev->dev,
6501 "mac loopback get fail, ret =%d.\n", ret);
6505 /* 2 Then setup the loopback flag */
6506 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
6507 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
6508 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0);
6509 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0);
6511 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
6513 /* 3 Config mac work mode with loopback flag
6514 * and its original configure parameters
6516 hclge_cmd_reuse_desc(&desc, false);
6517 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6519 dev_err(&hdev->pdev->dev,
6520 "mac loopback set fail, ret =%d.\n", ret);
6524 static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
6525 enum hnae3_loop loop_mode)
6527 #define HCLGE_SERDES_RETRY_MS 10
6528 #define HCLGE_SERDES_RETRY_NUM 100
6530 struct hclge_serdes_lb_cmd *req;
6531 struct hclge_desc desc;
6535 req = (struct hclge_serdes_lb_cmd *)desc.data;
6536 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
6538 switch (loop_mode) {
6539 case HNAE3_LOOP_SERIAL_SERDES:
6540 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
6542 case HNAE3_LOOP_PARALLEL_SERDES:
6543 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
6546 dev_err(&hdev->pdev->dev,
6547 "unsupported serdes loopback mode %d\n", loop_mode);
6552 req->enable = loop_mode_b;
6553 req->mask = loop_mode_b;
6555 req->mask = loop_mode_b;
6558 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6560 dev_err(&hdev->pdev->dev,
6561 "serdes loopback set fail, ret = %d\n", ret);
6566 msleep(HCLGE_SERDES_RETRY_MS);
6567 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
6569 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6571 dev_err(&hdev->pdev->dev,
6572 "serdes loopback get, ret = %d\n", ret);
6575 } while (++i < HCLGE_SERDES_RETRY_NUM &&
6576 !(req->result & HCLGE_CMD_SERDES_DONE_B));
6578 if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
6579 dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
6581 } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
6582 dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
6588 static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
6589 enum hnae3_loop loop_mode)
6593 ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
6597 hclge_cfg_mac_mode(hdev, en);
6599 ret = hclge_mac_phy_link_status_wait(hdev, en, FALSE);
6601 dev_err(&hdev->pdev->dev,
6602 "serdes loopback config mac mode timeout\n");
6607 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
6608 struct phy_device *phydev)
6612 if (!phydev->suspended) {
6613 ret = phy_suspend(phydev);
6618 ret = phy_resume(phydev);
6622 return phy_loopback(phydev, true);
6625 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
6626 struct phy_device *phydev)
6630 ret = phy_loopback(phydev, false);
6634 return phy_suspend(phydev);
6637 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
6639 struct phy_device *phydev = hdev->hw.mac.phydev;
6646 ret = hclge_enable_phy_loopback(hdev, phydev);
6648 ret = hclge_disable_phy_loopback(hdev, phydev);
6650 dev_err(&hdev->pdev->dev,
6651 "set phy loopback fail, ret = %d\n", ret);
6655 hclge_cfg_mac_mode(hdev, en);
6657 ret = hclge_mac_phy_link_status_wait(hdev, en, TRUE);
6659 dev_err(&hdev->pdev->dev,
6660 "phy loopback config mac mode timeout\n");
6665 static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
6666 int stream_id, bool enable)
6668 struct hclge_desc desc;
6669 struct hclge_cfg_com_tqp_queue_cmd *req =
6670 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
6673 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
6674 req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
6675 req->stream_id = cpu_to_le16(stream_id);
6677 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
6679 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
6681 dev_err(&hdev->pdev->dev,
6682 "Tqp enable fail, status =%d.\n", ret);
6686 static int hclge_set_loopback(struct hnae3_handle *handle,
6687 enum hnae3_loop loop_mode, bool en)
6689 struct hclge_vport *vport = hclge_get_vport(handle);
6690 struct hnae3_knic_private_info *kinfo;
6691 struct hclge_dev *hdev = vport->back;
6694 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
6695 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
6696 * the same, the packets are looped back in the SSU. If SSU loopback
6697 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
6699 if (hdev->pdev->revision >= 0x21) {
6700 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
6702 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
6703 HCLGE_SWITCH_ALW_LPBK_MASK);
6708 switch (loop_mode) {
6709 case HNAE3_LOOP_APP:
6710 ret = hclge_set_app_loopback(hdev, en);
6712 case HNAE3_LOOP_SERIAL_SERDES:
6713 case HNAE3_LOOP_PARALLEL_SERDES:
6714 ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
6716 case HNAE3_LOOP_PHY:
6717 ret = hclge_set_phy_loopback(hdev, en);
6721 dev_err(&hdev->pdev->dev,
6722 "loop_mode %d is not supported\n", loop_mode);
6729 kinfo = &vport->nic.kinfo;
6730 for (i = 0; i < kinfo->num_tqps; i++) {
6731 ret = hclge_tqp_enable(hdev, i, 0, en);
6739 static int hclge_set_default_loopback(struct hclge_dev *hdev)
6743 ret = hclge_set_app_loopback(hdev, false);
6747 ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
6751 return hclge_cfg_serdes_loopback(hdev, false,
6752 HNAE3_LOOP_PARALLEL_SERDES);
6755 static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
6757 struct hclge_vport *vport = hclge_get_vport(handle);
6758 struct hnae3_knic_private_info *kinfo;
6759 struct hnae3_queue *queue;
6760 struct hclge_tqp *tqp;
6763 kinfo = &vport->nic.kinfo;
6764 for (i = 0; i < kinfo->num_tqps; i++) {
6765 queue = handle->kinfo.tqp[i];
6766 tqp = container_of(queue, struct hclge_tqp, q);
6767 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
6771 static void hclge_flush_link_update(struct hclge_dev *hdev)
6773 #define HCLGE_FLUSH_LINK_TIMEOUT 100000
6775 unsigned long last = hdev->serv_processed_cnt;
6778 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
6779 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
6780 last == hdev->serv_processed_cnt)
6784 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
6786 struct hclge_vport *vport = hclge_get_vport(handle);
6787 struct hclge_dev *hdev = vport->back;
6790 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
6792 /* Set the DOWN flag here to disable link updating */
6793 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6795 /* flush memory to make sure DOWN is seen by service task */
6796 smp_mb__before_atomic();
6797 hclge_flush_link_update(hdev);
6801 static int hclge_ae_start(struct hnae3_handle *handle)
6803 struct hclge_vport *vport = hclge_get_vport(handle);
6804 struct hclge_dev *hdev = vport->back;
6807 hclge_cfg_mac_mode(hdev, true);
6808 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
6809 hdev->hw.mac.link = 0;
6811 /* reset tqp stats */
6812 hclge_reset_tqp_stats(handle);
6814 hclge_mac_start_phy(hdev);
6819 static void hclge_ae_stop(struct hnae3_handle *handle)
6821 struct hclge_vport *vport = hclge_get_vport(handle);
6822 struct hclge_dev *hdev = vport->back;
6825 set_bit(HCLGE_STATE_DOWN, &hdev->state);
6827 hclge_clear_arfs_rules(handle);
6829 /* If it is not PF reset, the firmware will disable the MAC,
6830 * so it only need to stop phy here.
6832 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
6833 hdev->reset_type != HNAE3_FUNC_RESET) {
6834 hclge_mac_stop_phy(hdev);
6835 hclge_update_link_status(hdev);
6839 for (i = 0; i < handle->kinfo.num_tqps; i++)
6840 hclge_reset_tqp(handle, i);
6842 hclge_config_mac_tnl_int(hdev, false);
6845 hclge_cfg_mac_mode(hdev, false);
6847 hclge_mac_stop_phy(hdev);
6849 /* reset tqp stats */
6850 hclge_reset_tqp_stats(handle);
6851 hclge_update_link_status(hdev);
6854 int hclge_vport_start(struct hclge_vport *vport)
6856 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6857 vport->last_active_jiffies = jiffies;
6861 void hclge_vport_stop(struct hclge_vport *vport)
6863 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
6866 static int hclge_client_start(struct hnae3_handle *handle)
6868 struct hclge_vport *vport = hclge_get_vport(handle);
6870 return hclge_vport_start(vport);
6873 static void hclge_client_stop(struct hnae3_handle *handle)
6875 struct hclge_vport *vport = hclge_get_vport(handle);
6877 hclge_vport_stop(vport);
6880 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
6881 u16 cmdq_resp, u8 resp_code,
6882 enum hclge_mac_vlan_tbl_opcode op)
6884 struct hclge_dev *hdev = vport->back;
6887 dev_err(&hdev->pdev->dev,
6888 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
6893 if (op == HCLGE_MAC_VLAN_ADD) {
6894 if ((!resp_code) || (resp_code == 1)) {
6896 } else if (resp_code == HCLGE_ADD_UC_OVERFLOW) {
6897 dev_err(&hdev->pdev->dev,
6898 "add mac addr failed for uc_overflow.\n");
6900 } else if (resp_code == HCLGE_ADD_MC_OVERFLOW) {
6901 dev_err(&hdev->pdev->dev,
6902 "add mac addr failed for mc_overflow.\n");
6906 dev_err(&hdev->pdev->dev,
6907 "add mac addr failed for undefined, code=%u.\n",
6910 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
6913 } else if (resp_code == 1) {
6914 dev_dbg(&hdev->pdev->dev,
6915 "remove mac addr failed for miss.\n");
6919 dev_err(&hdev->pdev->dev,
6920 "remove mac addr failed for undefined, code=%u.\n",
6923 } else if (op == HCLGE_MAC_VLAN_LKUP) {
6926 } else if (resp_code == 1) {
6927 dev_dbg(&hdev->pdev->dev,
6928 "lookup mac addr failed for miss.\n");
6932 dev_err(&hdev->pdev->dev,
6933 "lookup mac addr failed for undefined, code=%u.\n",
6938 dev_err(&hdev->pdev->dev,
6939 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
6944 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
6946 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
6948 unsigned int word_num;
6949 unsigned int bit_num;
6951 if (vfid > 255 || vfid < 0)
6954 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
6955 word_num = vfid / 32;
6956 bit_num = vfid % 32;
6958 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6960 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
6962 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
6963 bit_num = vfid % 32;
6965 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
6967 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
6973 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
6975 #define HCLGE_DESC_NUMBER 3
6976 #define HCLGE_FUNC_NUMBER_PER_DESC 6
6979 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
6980 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
6981 if (desc[i].data[j])
6987 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
6988 const u8 *addr, bool is_mc)
6990 const unsigned char *mac_addr = addr;
6991 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
6992 (mac_addr[0]) | (mac_addr[1] << 8);
6993 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
6995 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
6997 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
6998 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
7001 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
7002 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
7005 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
7006 struct hclge_mac_vlan_tbl_entry_cmd *req)
7008 struct hclge_dev *hdev = vport->back;
7009 struct hclge_desc desc;
7014 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
7016 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7018 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7020 dev_err(&hdev->pdev->dev,
7021 "del mac addr failed for cmd_send, ret =%d.\n",
7025 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7026 retval = le16_to_cpu(desc.retval);
7028 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7029 HCLGE_MAC_VLAN_REMOVE);
7032 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
7033 struct hclge_mac_vlan_tbl_entry_cmd *req,
7034 struct hclge_desc *desc,
7037 struct hclge_dev *hdev = vport->back;
7042 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
7044 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7045 memcpy(desc[0].data,
7047 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7048 hclge_cmd_setup_basic_desc(&desc[1],
7049 HCLGE_OPC_MAC_VLAN_ADD,
7051 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7052 hclge_cmd_setup_basic_desc(&desc[2],
7053 HCLGE_OPC_MAC_VLAN_ADD,
7055 ret = hclge_cmd_send(&hdev->hw, desc, 3);
7057 memcpy(desc[0].data,
7059 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7060 ret = hclge_cmd_send(&hdev->hw, desc, 1);
7063 dev_err(&hdev->pdev->dev,
7064 "lookup mac addr failed for cmd_send, ret =%d.\n",
7068 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
7069 retval = le16_to_cpu(desc[0].retval);
7071 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
7072 HCLGE_MAC_VLAN_LKUP);
7075 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
7076 struct hclge_mac_vlan_tbl_entry_cmd *req,
7077 struct hclge_desc *mc_desc)
7079 struct hclge_dev *hdev = vport->back;
7086 struct hclge_desc desc;
7088 hclge_cmd_setup_basic_desc(&desc,
7089 HCLGE_OPC_MAC_VLAN_ADD,
7091 memcpy(desc.data, req,
7092 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7093 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7094 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7095 retval = le16_to_cpu(desc.retval);
7097 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7099 HCLGE_MAC_VLAN_ADD);
7101 hclge_cmd_reuse_desc(&mc_desc[0], false);
7102 mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7103 hclge_cmd_reuse_desc(&mc_desc[1], false);
7104 mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7105 hclge_cmd_reuse_desc(&mc_desc[2], false);
7106 mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
7107 memcpy(mc_desc[0].data, req,
7108 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
7109 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
7110 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
7111 retval = le16_to_cpu(mc_desc[0].retval);
7113 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
7115 HCLGE_MAC_VLAN_ADD);
7119 dev_err(&hdev->pdev->dev,
7120 "add mac addr failed for cmd_send, ret =%d.\n",
7128 static int hclge_init_umv_space(struct hclge_dev *hdev)
7130 u16 allocated_size = 0;
7133 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size,
7138 if (allocated_size < hdev->wanted_umv_size)
7139 dev_warn(&hdev->pdev->dev,
7140 "Alloc umv space failed, want %u, get %u\n",
7141 hdev->wanted_umv_size, allocated_size);
7143 mutex_init(&hdev->umv_mutex);
7144 hdev->max_umv_size = allocated_size;
7145 /* divide max_umv_size by (hdev->num_req_vfs + 2), in order to
7146 * preserve some unicast mac vlan table entries shared by pf
7149 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_req_vfs + 2);
7150 hdev->share_umv_size = hdev->priv_umv_size +
7151 hdev->max_umv_size % (hdev->num_req_vfs + 2);
7156 static int hclge_uninit_umv_space(struct hclge_dev *hdev)
7160 if (hdev->max_umv_size > 0) {
7161 ret = hclge_set_umv_space(hdev, hdev->max_umv_size, NULL,
7165 hdev->max_umv_size = 0;
7167 mutex_destroy(&hdev->umv_mutex);
7172 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
7173 u16 *allocated_size, bool is_alloc)
7175 struct hclge_umv_spc_alc_cmd *req;
7176 struct hclge_desc desc;
7179 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
7180 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
7182 hnae3_set_bit(req->allocate, HCLGE_UMV_SPC_ALC_B, 1);
7184 req->space_size = cpu_to_le32(space_size);
7186 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7188 dev_err(&hdev->pdev->dev,
7189 "%s umv space failed for cmd_send, ret =%d\n",
7190 is_alloc ? "allocate" : "free", ret);
7194 if (is_alloc && allocated_size)
7195 *allocated_size = le32_to_cpu(desc.data[1]);
7200 static void hclge_reset_umv_space(struct hclge_dev *hdev)
7202 struct hclge_vport *vport;
7205 for (i = 0; i < hdev->num_alloc_vport; i++) {
7206 vport = &hdev->vport[i];
7207 vport->used_umv_num = 0;
7210 mutex_lock(&hdev->umv_mutex);
7211 hdev->share_umv_size = hdev->priv_umv_size +
7212 hdev->max_umv_size % (hdev->num_req_vfs + 2);
7213 mutex_unlock(&hdev->umv_mutex);
7216 static bool hclge_is_umv_space_full(struct hclge_vport *vport)
7218 struct hclge_dev *hdev = vport->back;
7221 mutex_lock(&hdev->umv_mutex);
7222 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
7223 hdev->share_umv_size == 0);
7224 mutex_unlock(&hdev->umv_mutex);
7229 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
7231 struct hclge_dev *hdev = vport->back;
7233 mutex_lock(&hdev->umv_mutex);
7235 if (vport->used_umv_num > hdev->priv_umv_size)
7236 hdev->share_umv_size++;
7238 if (vport->used_umv_num > 0)
7239 vport->used_umv_num--;
7241 if (vport->used_umv_num >= hdev->priv_umv_size &&
7242 hdev->share_umv_size > 0)
7243 hdev->share_umv_size--;
7244 vport->used_umv_num++;
7246 mutex_unlock(&hdev->umv_mutex);
7249 static int hclge_add_uc_addr(struct hnae3_handle *handle,
7250 const unsigned char *addr)
7252 struct hclge_vport *vport = hclge_get_vport(handle);
7254 return hclge_add_uc_addr_common(vport, addr);
7257 int hclge_add_uc_addr_common(struct hclge_vport *vport,
7258 const unsigned char *addr)
7260 struct hclge_dev *hdev = vport->back;
7261 struct hclge_mac_vlan_tbl_entry_cmd req;
7262 struct hclge_desc desc;
7263 u16 egress_port = 0;
7266 /* mac addr check */
7267 if (is_zero_ether_addr(addr) ||
7268 is_broadcast_ether_addr(addr) ||
7269 is_multicast_ether_addr(addr)) {
7270 dev_err(&hdev->pdev->dev,
7271 "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
7272 addr, is_zero_ether_addr(addr),
7273 is_broadcast_ether_addr(addr),
7274 is_multicast_ether_addr(addr));
7278 memset(&req, 0, sizeof(req));
7280 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7281 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7283 req.egress_port = cpu_to_le16(egress_port);
7285 hclge_prepare_mac_addr(&req, addr, false);
7287 /* Lookup the mac address in the mac_vlan table, and add
7288 * it if the entry is inexistent. Repeated unicast entry
7289 * is not allowed in the mac vlan table.
7291 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
7292 if (ret == -ENOENT) {
7293 if (!hclge_is_umv_space_full(vport)) {
7294 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
7296 hclge_update_umv_space(vport, false);
7300 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
7301 hdev->priv_umv_size);
7306 /* check if we just hit the duplicate */
7308 dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
7309 vport->vport_id, addr);
7313 dev_err(&hdev->pdev->dev,
7314 "PF failed to add unicast entry(%pM) in the MAC table\n",
7320 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
7321 const unsigned char *addr)
7323 struct hclge_vport *vport = hclge_get_vport(handle);
7325 return hclge_rm_uc_addr_common(vport, addr);
7328 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
7329 const unsigned char *addr)
7331 struct hclge_dev *hdev = vport->back;
7332 struct hclge_mac_vlan_tbl_entry_cmd req;
7335 /* mac addr check */
7336 if (is_zero_ether_addr(addr) ||
7337 is_broadcast_ether_addr(addr) ||
7338 is_multicast_ether_addr(addr)) {
7339 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
7344 memset(&req, 0, sizeof(req));
7345 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7346 hclge_prepare_mac_addr(&req, addr, false);
7347 ret = hclge_remove_mac_vlan_tbl(vport, &req);
7349 hclge_update_umv_space(vport, true);
7354 static int hclge_add_mc_addr(struct hnae3_handle *handle,
7355 const unsigned char *addr)
7357 struct hclge_vport *vport = hclge_get_vport(handle);
7359 return hclge_add_mc_addr_common(vport, addr);
7362 int hclge_add_mc_addr_common(struct hclge_vport *vport,
7363 const unsigned char *addr)
7365 struct hclge_dev *hdev = vport->back;
7366 struct hclge_mac_vlan_tbl_entry_cmd req;
7367 struct hclge_desc desc[3];
7370 /* mac addr check */
7371 if (!is_multicast_ether_addr(addr)) {
7372 dev_err(&hdev->pdev->dev,
7373 "Add mc mac err! invalid mac:%pM.\n",
7377 memset(&req, 0, sizeof(req));
7378 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7379 hclge_prepare_mac_addr(&req, addr, true);
7380 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7382 /* This mac addr do not exist, add new entry for it */
7383 memset(desc[0].data, 0, sizeof(desc[0].data));
7384 memset(desc[1].data, 0, sizeof(desc[0].data));
7385 memset(desc[2].data, 0, sizeof(desc[0].data));
7387 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
7390 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7392 if (status == -ENOSPC)
7393 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
7398 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
7399 const unsigned char *addr)
7401 struct hclge_vport *vport = hclge_get_vport(handle);
7403 return hclge_rm_mc_addr_common(vport, addr);
7406 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
7407 const unsigned char *addr)
7409 struct hclge_dev *hdev = vport->back;
7410 struct hclge_mac_vlan_tbl_entry_cmd req;
7411 enum hclge_cmd_status status;
7412 struct hclge_desc desc[3];
7414 /* mac addr check */
7415 if (!is_multicast_ether_addr(addr)) {
7416 dev_dbg(&hdev->pdev->dev,
7417 "Remove mc mac err! invalid mac:%pM.\n",
7422 memset(&req, 0, sizeof(req));
7423 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
7424 hclge_prepare_mac_addr(&req, addr, true);
7425 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
7427 /* This mac addr exist, remove this handle's VFID for it */
7428 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
7432 if (hclge_is_all_function_id_zero(desc))
7433 /* All the vfid is zero, so need to delete this entry */
7434 status = hclge_remove_mac_vlan_tbl(vport, &req);
7436 /* Not all the vfid is zero, update the vfid */
7437 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
7440 /* Maybe this mac address is in mta table, but it cannot be
7441 * deleted here because an entry of mta represents an address
7442 * range rather than a specific address. the delete action to
7443 * all entries will take effect in update_mta_status called by
7444 * hns3_nic_set_rx_mode.
7452 void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7453 enum HCLGE_MAC_ADDR_TYPE mac_type)
7455 struct hclge_vport_mac_addr_cfg *mac_cfg;
7456 struct list_head *list;
7458 if (!vport->vport_id)
7461 mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL);
7465 mac_cfg->hd_tbl_status = true;
7466 memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN);
7468 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7469 &vport->uc_mac_list : &vport->mc_mac_list;
7471 list_add_tail(&mac_cfg->node, list);
7474 void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
7476 enum HCLGE_MAC_ADDR_TYPE mac_type)
7478 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7479 struct list_head *list;
7480 bool uc_flag, mc_flag;
7482 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7483 &vport->uc_mac_list : &vport->mc_mac_list;
7485 uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC;
7486 mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC;
7488 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7489 if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) {
7490 if (uc_flag && mac_cfg->hd_tbl_status)
7491 hclge_rm_uc_addr_common(vport, mac_addr);
7493 if (mc_flag && mac_cfg->hd_tbl_status)
7494 hclge_rm_mc_addr_common(vport, mac_addr);
7496 list_del(&mac_cfg->node);
7503 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
7504 enum HCLGE_MAC_ADDR_TYPE mac_type)
7506 struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp;
7507 struct list_head *list;
7509 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
7510 &vport->uc_mac_list : &vport->mc_mac_list;
7512 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
7513 if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status)
7514 hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr);
7516 if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status)
7517 hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr);
7519 mac_cfg->hd_tbl_status = false;
7521 list_del(&mac_cfg->node);
7527 void hclge_uninit_vport_mac_table(struct hclge_dev *hdev)
7529 struct hclge_vport_mac_addr_cfg *mac, *tmp;
7530 struct hclge_vport *vport;
7533 mutex_lock(&hdev->vport_cfg_mutex);
7534 for (i = 0; i < hdev->num_alloc_vport; i++) {
7535 vport = &hdev->vport[i];
7536 list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) {
7537 list_del(&mac->node);
7541 list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) {
7542 list_del(&mac->node);
7546 mutex_unlock(&hdev->vport_cfg_mutex);
7549 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
7550 u16 cmdq_resp, u8 resp_code)
7552 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
7553 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
7554 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
7555 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
7560 dev_err(&hdev->pdev->dev,
7561 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
7566 switch (resp_code) {
7567 case HCLGE_ETHERTYPE_SUCCESS_ADD:
7568 case HCLGE_ETHERTYPE_ALREADY_ADD:
7571 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
7572 dev_err(&hdev->pdev->dev,
7573 "add mac ethertype failed for manager table overflow.\n");
7574 return_status = -EIO;
7576 case HCLGE_ETHERTYPE_KEY_CONFLICT:
7577 dev_err(&hdev->pdev->dev,
7578 "add mac ethertype failed for key conflict.\n");
7579 return_status = -EIO;
7582 dev_err(&hdev->pdev->dev,
7583 "add mac ethertype failed for undefined, code=%u.\n",
7585 return_status = -EIO;
7588 return return_status;
7591 static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
7594 struct hclge_mac_vlan_tbl_entry_cmd req;
7595 struct hclge_dev *hdev = vport->back;
7596 struct hclge_desc desc;
7597 u16 egress_port = 0;
7600 if (is_zero_ether_addr(mac_addr))
7603 memset(&req, 0, sizeof(req));
7604 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
7605 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
7606 req.egress_port = cpu_to_le16(egress_port);
7607 hclge_prepare_mac_addr(&req, mac_addr, false);
7609 if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
7612 vf_idx += HCLGE_VF_VPORT_START_NUM;
7613 for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
7615 ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
7621 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
7624 struct hclge_vport *vport = hclge_get_vport(handle);
7625 struct hclge_dev *hdev = vport->back;
7627 vport = hclge_get_vf_vport(hdev, vf);
7631 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
7632 dev_info(&hdev->pdev->dev,
7633 "Specified MAC(=%pM) is same as before, no change committed!\n",
7638 if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
7639 dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
7644 ether_addr_copy(vport->vf_info.mac, mac_addr);
7645 dev_info(&hdev->pdev->dev,
7646 "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
7649 return hclge_inform_reset_assert_to_vf(vport);
7652 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
7653 const struct hclge_mac_mgr_tbl_entry_cmd *req)
7655 struct hclge_desc desc;
7660 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
7661 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
7663 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7665 dev_err(&hdev->pdev->dev,
7666 "add mac ethertype failed for cmd_send, ret =%d.\n",
7671 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
7672 retval = le16_to_cpu(desc.retval);
7674 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
7677 static int init_mgr_tbl(struct hclge_dev *hdev)
7682 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
7683 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
7685 dev_err(&hdev->pdev->dev,
7686 "add mac ethertype failed, ret =%d.\n",
7695 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
7697 struct hclge_vport *vport = hclge_get_vport(handle);
7698 struct hclge_dev *hdev = vport->back;
7700 ether_addr_copy(p, hdev->hw.mac.mac_addr);
7703 static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
7706 const unsigned char *new_addr = (const unsigned char *)p;
7707 struct hclge_vport *vport = hclge_get_vport(handle);
7708 struct hclge_dev *hdev = vport->back;
7711 /* mac addr check */
7712 if (is_zero_ether_addr(new_addr) ||
7713 is_broadcast_ether_addr(new_addr) ||
7714 is_multicast_ether_addr(new_addr)) {
7715 dev_err(&hdev->pdev->dev,
7716 "Change uc mac err! invalid mac:%pM.\n",
7721 if ((!is_first || is_kdump_kernel()) &&
7722 hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
7723 dev_warn(&hdev->pdev->dev,
7724 "remove old uc mac address fail.\n");
7726 ret = hclge_add_uc_addr(handle, new_addr);
7728 dev_err(&hdev->pdev->dev,
7729 "add uc mac address fail, ret =%d.\n",
7733 hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr))
7734 dev_err(&hdev->pdev->dev,
7735 "restore uc mac address fail.\n");
7740 ret = hclge_pause_addr_cfg(hdev, new_addr);
7742 dev_err(&hdev->pdev->dev,
7743 "configure mac pause address fail, ret =%d.\n",
7748 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
7753 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
7756 struct hclge_vport *vport = hclge_get_vport(handle);
7757 struct hclge_dev *hdev = vport->back;
7759 if (!hdev->hw.mac.phydev)
7762 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
7765 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
7766 u8 fe_type, bool filter_en, u8 vf_id)
7768 struct hclge_vlan_filter_ctrl_cmd *req;
7769 struct hclge_desc desc;
7772 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
7774 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
7775 req->vlan_type = vlan_type;
7776 req->vlan_fe = filter_en ? fe_type : 0;
7779 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7781 dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
7787 #define HCLGE_FILTER_TYPE_VF 0
7788 #define HCLGE_FILTER_TYPE_PORT 1
7789 #define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
7790 #define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
7791 #define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
7792 #define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
7793 #define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
7794 #define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
7795 | HCLGE_FILTER_FE_ROCE_EGRESS_B)
7796 #define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
7797 | HCLGE_FILTER_FE_ROCE_INGRESS_B)
7799 static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
7801 struct hclge_vport *vport = hclge_get_vport(handle);
7802 struct hclge_dev *hdev = vport->back;
7804 if (hdev->pdev->revision >= 0x21) {
7805 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7806 HCLGE_FILTER_FE_EGRESS, enable, 0);
7807 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
7808 HCLGE_FILTER_FE_INGRESS, enable, 0);
7810 hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
7811 HCLGE_FILTER_FE_EGRESS_V1_B, enable,
7815 handle->netdev_flags |= HNAE3_VLAN_FLTR;
7817 handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
7820 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
7821 bool is_kill, u16 vlan,
7824 struct hclge_vport *vport = &hdev->vport[vfid];
7825 struct hclge_vlan_filter_vf_cfg_cmd *req0;
7826 struct hclge_vlan_filter_vf_cfg_cmd *req1;
7827 struct hclge_desc desc[2];
7832 /* if vf vlan table is full, firmware will close vf vlan filter, it
7833 * is unable and unnecessary to add new vlan id to vf vlan filter.
7834 * If spoof check is enable, and vf vlan is full, it shouldn't add
7835 * new vlan, because tx packets with these vlan id will be dropped.
7837 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
7838 if (vport->vf_info.spoofchk && vlan) {
7839 dev_err(&hdev->pdev->dev,
7840 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
7846 hclge_cmd_setup_basic_desc(&desc[0],
7847 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7848 hclge_cmd_setup_basic_desc(&desc[1],
7849 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
7851 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
7853 vf_byte_off = vfid / 8;
7854 vf_byte_val = 1 << (vfid % 8);
7856 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
7857 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
7859 req0->vlan_id = cpu_to_le16(vlan);
7860 req0->vlan_cfg = is_kill;
7862 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
7863 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
7865 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
7867 ret = hclge_cmd_send(&hdev->hw, desc, 2);
7869 dev_err(&hdev->pdev->dev,
7870 "Send vf vlan command fail, ret =%d.\n",
7876 #define HCLGE_VF_VLAN_NO_ENTRY 2
7877 if (!req0->resp_code || req0->resp_code == 1)
7880 if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
7881 set_bit(vfid, hdev->vf_vlan_full);
7882 dev_warn(&hdev->pdev->dev,
7883 "vf vlan table is full, vf vlan filter is disabled\n");
7887 dev_err(&hdev->pdev->dev,
7888 "Add vf vlan filter fail, ret =%u.\n",
7891 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
7892 if (!req0->resp_code)
7895 /* vf vlan filter is disabled when vf vlan table is full,
7896 * then new vlan id will not be added into vf vlan table.
7897 * Just return 0 without warning, avoid massive verbose
7898 * print logs when unload.
7900 if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
7903 dev_err(&hdev->pdev->dev,
7904 "Kill vf vlan filter fail, ret =%u.\n",
7911 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
7912 u16 vlan_id, bool is_kill)
7914 struct hclge_vlan_filter_pf_cfg_cmd *req;
7915 struct hclge_desc desc;
7916 u8 vlan_offset_byte_val;
7917 u8 vlan_offset_byte;
7921 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
7923 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
7924 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
7925 HCLGE_VLAN_BYTE_SIZE;
7926 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
7928 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
7929 req->vlan_offset = vlan_offset_160;
7930 req->vlan_cfg = is_kill;
7931 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
7933 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7935 dev_err(&hdev->pdev->dev,
7936 "port vlan command, send fail, ret =%d.\n", ret);
7940 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
7941 u16 vport_id, u16 vlan_id,
7944 u16 vport_idx, vport_num = 0;
7947 if (is_kill && !vlan_id)
7950 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
7953 dev_err(&hdev->pdev->dev,
7954 "Set %u vport vlan filter config fail, ret =%d.\n",
7959 /* vlan 0 may be added twice when 8021q module is enabled */
7960 if (!is_kill && !vlan_id &&
7961 test_bit(vport_id, hdev->vlan_table[vlan_id]))
7964 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
7965 dev_err(&hdev->pdev->dev,
7966 "Add port vlan failed, vport %u is already in vlan %u\n",
7972 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
7973 dev_err(&hdev->pdev->dev,
7974 "Delete port vlan failed, vport %u is not in vlan %u\n",
7979 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
7982 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
7983 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
7989 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
7991 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
7992 struct hclge_vport_vtag_tx_cfg_cmd *req;
7993 struct hclge_dev *hdev = vport->back;
7994 struct hclge_desc desc;
7998 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
8000 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
8001 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
8002 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
8003 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
8004 vcfg->accept_tag1 ? 1 : 0);
8005 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
8006 vcfg->accept_untag1 ? 1 : 0);
8007 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
8008 vcfg->accept_tag2 ? 1 : 0);
8009 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
8010 vcfg->accept_untag2 ? 1 : 0);
8011 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
8012 vcfg->insert_tag1_en ? 1 : 0);
8013 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
8014 vcfg->insert_tag2_en ? 1 : 0);
8015 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
8017 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8018 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8019 HCLGE_VF_NUM_PER_BYTE;
8020 req->vf_bitmap[bmap_index] =
8021 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8023 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8025 dev_err(&hdev->pdev->dev,
8026 "Send port txvlan cfg command fail, ret =%d\n",
8032 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
8034 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
8035 struct hclge_vport_vtag_rx_cfg_cmd *req;
8036 struct hclge_dev *hdev = vport->back;
8037 struct hclge_desc desc;
8041 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
8043 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
8044 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
8045 vcfg->strip_tag1_en ? 1 : 0);
8046 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
8047 vcfg->strip_tag2_en ? 1 : 0);
8048 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
8049 vcfg->vlan1_vlan_prionly ? 1 : 0);
8050 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
8051 vcfg->vlan2_vlan_prionly ? 1 : 0);
8053 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
8054 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
8055 HCLGE_VF_NUM_PER_BYTE;
8056 req->vf_bitmap[bmap_index] =
8057 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
8059 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8061 dev_err(&hdev->pdev->dev,
8062 "Send port rxvlan cfg command fail, ret =%d\n",
8068 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
8069 u16 port_base_vlan_state,
8074 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8075 vport->txvlan_cfg.accept_tag1 = true;
8076 vport->txvlan_cfg.insert_tag1_en = false;
8077 vport->txvlan_cfg.default_tag1 = 0;
8079 vport->txvlan_cfg.accept_tag1 = false;
8080 vport->txvlan_cfg.insert_tag1_en = true;
8081 vport->txvlan_cfg.default_tag1 = vlan_tag;
8084 vport->txvlan_cfg.accept_untag1 = true;
8086 /* accept_tag2 and accept_untag2 are not supported on
8087 * pdev revision(0x20), new revision support them,
8088 * this two fields can not be configured by user.
8090 vport->txvlan_cfg.accept_tag2 = true;
8091 vport->txvlan_cfg.accept_untag2 = true;
8092 vport->txvlan_cfg.insert_tag2_en = false;
8093 vport->txvlan_cfg.default_tag2 = 0;
8095 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8096 vport->rxvlan_cfg.strip_tag1_en = false;
8097 vport->rxvlan_cfg.strip_tag2_en =
8098 vport->rxvlan_cfg.rx_vlan_offload_en;
8100 vport->rxvlan_cfg.strip_tag1_en =
8101 vport->rxvlan_cfg.rx_vlan_offload_en;
8102 vport->rxvlan_cfg.strip_tag2_en = true;
8104 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8105 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8107 ret = hclge_set_vlan_tx_offload_cfg(vport);
8111 return hclge_set_vlan_rx_offload_cfg(vport);
8114 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
8116 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
8117 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
8118 struct hclge_desc desc;
8121 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
8122 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
8123 rx_req->ot_fst_vlan_type =
8124 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
8125 rx_req->ot_sec_vlan_type =
8126 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
8127 rx_req->in_fst_vlan_type =
8128 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
8129 rx_req->in_sec_vlan_type =
8130 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
8132 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8134 dev_err(&hdev->pdev->dev,
8135 "Send rxvlan protocol type command fail, ret =%d\n",
8140 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
8142 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
8143 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
8144 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
8146 status = hclge_cmd_send(&hdev->hw, &desc, 1);
8148 dev_err(&hdev->pdev->dev,
8149 "Send txvlan protocol type command fail, ret =%d\n",
8155 static int hclge_init_vlan_config(struct hclge_dev *hdev)
8157 #define HCLGE_DEF_VLAN_TYPE 0x8100
8159 struct hnae3_handle *handle = &hdev->vport[0].nic;
8160 struct hclge_vport *vport;
8164 if (hdev->pdev->revision >= 0x21) {
8165 /* for revision 0x21, vf vlan filter is per function */
8166 for (i = 0; i < hdev->num_alloc_vport; i++) {
8167 vport = &hdev->vport[i];
8168 ret = hclge_set_vlan_filter_ctrl(hdev,
8169 HCLGE_FILTER_TYPE_VF,
8170 HCLGE_FILTER_FE_EGRESS,
8177 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
8178 HCLGE_FILTER_FE_INGRESS, true,
8183 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
8184 HCLGE_FILTER_FE_EGRESS_V1_B,
8190 handle->netdev_flags |= HNAE3_VLAN_FLTR;
8192 hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8193 hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8194 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
8195 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
8196 hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
8197 hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
8199 ret = hclge_set_vlan_protocol_type(hdev);
8203 for (i = 0; i < hdev->num_alloc_vport; i++) {
8206 vport = &hdev->vport[i];
8207 vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8209 ret = hclge_vlan_offload_cfg(vport,
8210 vport->port_base_vlan_cfg.state,
8216 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
8219 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8222 struct hclge_vport_vlan_cfg *vlan;
8224 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
8228 vlan->hd_tbl_status = writen_to_tbl;
8229 vlan->vlan_id = vlan_id;
8231 list_add_tail(&vlan->node, &vport->vlan_list);
8234 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
8236 struct hclge_vport_vlan_cfg *vlan, *tmp;
8237 struct hclge_dev *hdev = vport->back;
8240 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8241 if (!vlan->hd_tbl_status) {
8242 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8244 vlan->vlan_id, false);
8246 dev_err(&hdev->pdev->dev,
8247 "restore vport vlan list failed, ret=%d\n",
8252 vlan->hd_tbl_status = true;
8258 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
8261 struct hclge_vport_vlan_cfg *vlan, *tmp;
8262 struct hclge_dev *hdev = vport->back;
8264 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8265 if (vlan->vlan_id == vlan_id) {
8266 if (is_write_tbl && vlan->hd_tbl_status)
8267 hclge_set_vlan_filter_hw(hdev,
8273 list_del(&vlan->node);
8280 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
8282 struct hclge_vport_vlan_cfg *vlan, *tmp;
8283 struct hclge_dev *hdev = vport->back;
8285 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8286 if (vlan->hd_tbl_status)
8287 hclge_set_vlan_filter_hw(hdev,
8293 vlan->hd_tbl_status = false;
8295 list_del(&vlan->node);
8301 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
8303 struct hclge_vport_vlan_cfg *vlan, *tmp;
8304 struct hclge_vport *vport;
8307 mutex_lock(&hdev->vport_cfg_mutex);
8308 for (i = 0; i < hdev->num_alloc_vport; i++) {
8309 vport = &hdev->vport[i];
8310 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8311 list_del(&vlan->node);
8315 mutex_unlock(&hdev->vport_cfg_mutex);
8318 static void hclge_restore_vlan_table(struct hnae3_handle *handle)
8320 struct hclge_vport *vport = hclge_get_vport(handle);
8321 struct hclge_vport_vlan_cfg *vlan, *tmp;
8322 struct hclge_dev *hdev = vport->back;
8327 mutex_lock(&hdev->vport_cfg_mutex);
8328 for (i = 0; i < hdev->num_alloc_vport; i++) {
8329 vport = &hdev->vport[i];
8330 vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
8331 vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
8332 state = vport->port_base_vlan_cfg.state;
8334 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
8335 hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
8336 vport->vport_id, vlan_id,
8341 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
8344 if (!vlan->hd_tbl_status)
8346 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8348 vlan->vlan_id, false);
8354 mutex_unlock(&hdev->vport_cfg_mutex);
8357 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
8359 struct hclge_vport *vport = hclge_get_vport(handle);
8361 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8362 vport->rxvlan_cfg.strip_tag1_en = false;
8363 vport->rxvlan_cfg.strip_tag2_en = enable;
8365 vport->rxvlan_cfg.strip_tag1_en = enable;
8366 vport->rxvlan_cfg.strip_tag2_en = true;
8368 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
8369 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
8370 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
8372 return hclge_set_vlan_rx_offload_cfg(vport);
8375 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
8376 u16 port_base_vlan_state,
8377 struct hclge_vlan_info *new_info,
8378 struct hclge_vlan_info *old_info)
8380 struct hclge_dev *hdev = vport->back;
8383 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
8384 hclge_rm_vport_all_vlan_table(vport, false);
8385 return hclge_set_vlan_filter_hw(hdev,
8386 htons(new_info->vlan_proto),
8392 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
8393 vport->vport_id, old_info->vlan_tag,
8398 return hclge_add_vport_all_vlan_table(vport);
8401 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
8402 struct hclge_vlan_info *vlan_info)
8404 struct hnae3_handle *nic = &vport->nic;
8405 struct hclge_vlan_info *old_vlan_info;
8406 struct hclge_dev *hdev = vport->back;
8409 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
8411 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
8415 if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
8416 /* add new VLAN tag */
8417 ret = hclge_set_vlan_filter_hw(hdev,
8418 htons(vlan_info->vlan_proto),
8420 vlan_info->vlan_tag,
8425 /* remove old VLAN tag */
8426 ret = hclge_set_vlan_filter_hw(hdev,
8427 htons(old_vlan_info->vlan_proto),
8429 old_vlan_info->vlan_tag,
8437 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
8442 /* update state only when disable/enable port based VLAN */
8443 vport->port_base_vlan_cfg.state = state;
8444 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
8445 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
8447 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
8450 vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
8451 vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
8452 vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
8457 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
8458 enum hnae3_port_base_vlan_state state,
8461 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8463 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8465 return HNAE3_PORT_BASE_VLAN_ENABLE;
8468 return HNAE3_PORT_BASE_VLAN_DISABLE;
8469 else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
8470 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
8472 return HNAE3_PORT_BASE_VLAN_MODIFY;
8476 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
8477 u16 vlan, u8 qos, __be16 proto)
8479 struct hclge_vport *vport = hclge_get_vport(handle);
8480 struct hclge_dev *hdev = vport->back;
8481 struct hclge_vlan_info vlan_info;
8485 if (hdev->pdev->revision == 0x20)
8488 vport = hclge_get_vf_vport(hdev, vfid);
8492 /* qos is a 3 bits value, so can not be bigger than 7 */
8493 if (vlan > VLAN_N_VID - 1 || qos > 7)
8495 if (proto != htons(ETH_P_8021Q))
8496 return -EPROTONOSUPPORT;
8498 state = hclge_get_port_base_vlan_state(vport,
8499 vport->port_base_vlan_cfg.state,
8501 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
8504 vlan_info.vlan_tag = vlan;
8505 vlan_info.qos = qos;
8506 vlan_info.vlan_proto = ntohs(proto);
8508 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8509 return hclge_update_port_base_vlan_cfg(vport, state,
8512 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
8513 vport->vport_id, state,
8520 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
8521 u16 vlan_id, bool is_kill)
8523 struct hclge_vport *vport = hclge_get_vport(handle);
8524 struct hclge_dev *hdev = vport->back;
8525 bool writen_to_tbl = false;
8528 /* When device is resetting, firmware is unable to handle
8529 * mailbox. Just record the vlan id, and remove it after
8532 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
8533 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8537 /* when port base vlan enabled, we use port base vlan as the vlan
8538 * filter entry. In this case, we don't update vlan filter table
8539 * when user add new vlan or remove exist vlan, just update the vport
8540 * vlan list. The vlan id in vlan list will be writen in vlan filter
8541 * table until port base vlan disabled
8543 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
8544 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
8546 writen_to_tbl = true;
8551 hclge_rm_vport_vlan_table(vport, vlan_id, false);
8553 hclge_add_vport_vlan_table(vport, vlan_id,
8555 } else if (is_kill) {
8556 /* when remove hw vlan filter failed, record the vlan id,
8557 * and try to remove it from hw later, to be consistence
8560 set_bit(vlan_id, vport->vlan_del_fail_bmap);
8565 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
8567 #define HCLGE_MAX_SYNC_COUNT 60
8569 int i, ret, sync_cnt = 0;
8572 /* start from vport 1 for PF is always alive */
8573 for (i = 0; i < hdev->num_alloc_vport; i++) {
8574 struct hclge_vport *vport = &hdev->vport[i];
8576 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8578 while (vlan_id != VLAN_N_VID) {
8579 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
8580 vport->vport_id, vlan_id,
8582 if (ret && ret != -EINVAL)
8585 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
8586 hclge_rm_vport_vlan_table(vport, vlan_id, false);
8589 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
8592 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
8598 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
8600 struct hclge_config_max_frm_size_cmd *req;
8601 struct hclge_desc desc;
8603 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
8605 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
8606 req->max_frm_size = cpu_to_le16(new_mps);
8607 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
8609 return hclge_cmd_send(&hdev->hw, &desc, 1);
8612 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
8614 struct hclge_vport *vport = hclge_get_vport(handle);
8616 return hclge_set_vport_mtu(vport, new_mtu);
8619 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
8621 struct hclge_dev *hdev = vport->back;
8622 int i, max_frm_size, ret;
8624 /* HW supprt 2 layer vlan */
8625 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
8626 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
8627 max_frm_size > HCLGE_MAC_MAX_FRAME)
8630 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
8631 mutex_lock(&hdev->vport_lock);
8632 /* VF's mps must fit within hdev->mps */
8633 if (vport->vport_id && max_frm_size > hdev->mps) {
8634 mutex_unlock(&hdev->vport_lock);
8636 } else if (vport->vport_id) {
8637 vport->mps = max_frm_size;
8638 mutex_unlock(&hdev->vport_lock);
8642 /* PF's mps must be greater then VF's mps */
8643 for (i = 1; i < hdev->num_alloc_vport; i++)
8644 if (max_frm_size < hdev->vport[i].mps) {
8645 mutex_unlock(&hdev->vport_lock);
8649 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
8651 ret = hclge_set_mac_mtu(hdev, max_frm_size);
8653 dev_err(&hdev->pdev->dev,
8654 "Change mtu fail, ret =%d\n", ret);
8658 hdev->mps = max_frm_size;
8659 vport->mps = max_frm_size;
8661 ret = hclge_buffer_alloc(hdev);
8663 dev_err(&hdev->pdev->dev,
8664 "Allocate buffer fail, ret =%d\n", ret);
8667 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
8668 mutex_unlock(&hdev->vport_lock);
8672 static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
8675 struct hclge_reset_tqp_queue_cmd *req;
8676 struct hclge_desc desc;
8679 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
8681 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8682 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8684 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
8686 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8688 dev_err(&hdev->pdev->dev,
8689 "Send tqp reset cmd error, status =%d\n", ret);
8696 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
8698 struct hclge_reset_tqp_queue_cmd *req;
8699 struct hclge_desc desc;
8702 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
8704 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
8705 req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
8707 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8709 dev_err(&hdev->pdev->dev,
8710 "Get reset status error, status =%d\n", ret);
8714 return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
8717 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
8719 struct hnae3_queue *queue;
8720 struct hclge_tqp *tqp;
8722 queue = handle->kinfo.tqp[queue_id];
8723 tqp = container_of(queue, struct hclge_tqp, q);
8728 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
8730 struct hclge_vport *vport = hclge_get_vport(handle);
8731 struct hclge_dev *hdev = vport->back;
8732 int reset_try_times = 0;
8737 queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
8739 ret = hclge_tqp_enable(hdev, queue_id, 0, false);
8741 dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
8745 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8747 dev_err(&hdev->pdev->dev,
8748 "Send reset tqp cmd fail, ret = %d\n", ret);
8752 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8753 reset_status = hclge_get_reset_status(hdev, queue_gid);
8757 /* Wait for tqp hw reset */
8758 usleep_range(1000, 1200);
8761 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8762 dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
8766 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8768 dev_err(&hdev->pdev->dev,
8769 "Deassert the soft reset fail, ret = %d\n", ret);
8774 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
8776 struct hclge_dev *hdev = vport->back;
8777 int reset_try_times = 0;
8782 queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
8784 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
8786 dev_warn(&hdev->pdev->dev,
8787 "Send reset tqp cmd fail, ret = %d\n", ret);
8791 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
8792 reset_status = hclge_get_reset_status(hdev, queue_gid);
8796 /* Wait for tqp hw reset */
8797 usleep_range(1000, 1200);
8800 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
8801 dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
8805 ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
8807 dev_warn(&hdev->pdev->dev,
8808 "Deassert the soft reset fail, ret = %d\n", ret);
8811 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
8813 struct hclge_vport *vport = hclge_get_vport(handle);
8814 struct hclge_dev *hdev = vport->back;
8816 return hdev->fw_version;
8819 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8821 struct phy_device *phydev = hdev->hw.mac.phydev;
8826 phy_set_asym_pause(phydev, rx_en, tx_en);
8829 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
8833 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
8836 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
8838 dev_err(&hdev->pdev->dev,
8839 "configure pauseparam error, ret = %d.\n", ret);
8844 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
8846 struct phy_device *phydev = hdev->hw.mac.phydev;
8847 u16 remote_advertising = 0;
8848 u16 local_advertising;
8849 u32 rx_pause, tx_pause;
8852 if (!phydev->link || !phydev->autoneg)
8855 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
8858 remote_advertising = LPA_PAUSE_CAP;
8860 if (phydev->asym_pause)
8861 remote_advertising |= LPA_PAUSE_ASYM;
8863 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
8864 remote_advertising);
8865 tx_pause = flowctl & FLOW_CTRL_TX;
8866 rx_pause = flowctl & FLOW_CTRL_RX;
8868 if (phydev->duplex == HCLGE_MAC_HALF) {
8873 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
8876 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
8877 u32 *rx_en, u32 *tx_en)
8879 struct hclge_vport *vport = hclge_get_vport(handle);
8880 struct hclge_dev *hdev = vport->back;
8881 struct phy_device *phydev = hdev->hw.mac.phydev;
8883 *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
8885 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8891 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
8894 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
8897 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
8906 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
8907 u32 rx_en, u32 tx_en)
8910 hdev->fc_mode_last_time = HCLGE_FC_FULL;
8911 else if (rx_en && !tx_en)
8912 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
8913 else if (!rx_en && tx_en)
8914 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
8916 hdev->fc_mode_last_time = HCLGE_FC_NONE;
8918 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
8921 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
8922 u32 rx_en, u32 tx_en)
8924 struct hclge_vport *vport = hclge_get_vport(handle);
8925 struct hclge_dev *hdev = vport->back;
8926 struct phy_device *phydev = hdev->hw.mac.phydev;
8930 fc_autoneg = hclge_get_autoneg(handle);
8931 if (auto_neg != fc_autoneg) {
8932 dev_info(&hdev->pdev->dev,
8933 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
8938 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
8939 dev_info(&hdev->pdev->dev,
8940 "Priority flow control enabled. Cannot set link flow control.\n");
8944 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
8946 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
8949 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
8952 return phy_start_aneg(phydev);
8957 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
8958 u8 *auto_neg, u32 *speed, u8 *duplex)
8960 struct hclge_vport *vport = hclge_get_vport(handle);
8961 struct hclge_dev *hdev = vport->back;
8964 *speed = hdev->hw.mac.speed;
8966 *duplex = hdev->hw.mac.duplex;
8968 *auto_neg = hdev->hw.mac.autoneg;
8971 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
8974 struct hclge_vport *vport = hclge_get_vport(handle);
8975 struct hclge_dev *hdev = vport->back;
8978 *media_type = hdev->hw.mac.media_type;
8981 *module_type = hdev->hw.mac.module_type;
8984 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
8985 u8 *tp_mdix_ctrl, u8 *tp_mdix)
8987 struct hclge_vport *vport = hclge_get_vport(handle);
8988 struct hclge_dev *hdev = vport->back;
8989 struct phy_device *phydev = hdev->hw.mac.phydev;
8990 int mdix_ctrl, mdix, is_resolved;
8991 unsigned int retval;
8994 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
8995 *tp_mdix = ETH_TP_MDI_INVALID;
8999 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
9001 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
9002 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
9003 HCLGE_PHY_MDIX_CTRL_S);
9005 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
9006 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
9007 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
9009 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
9011 switch (mdix_ctrl) {
9013 *tp_mdix_ctrl = ETH_TP_MDI;
9016 *tp_mdix_ctrl = ETH_TP_MDI_X;
9019 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
9022 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
9027 *tp_mdix = ETH_TP_MDI_INVALID;
9029 *tp_mdix = ETH_TP_MDI_X;
9031 *tp_mdix = ETH_TP_MDI;
9034 static void hclge_info_show(struct hclge_dev *hdev)
9036 struct device *dev = &hdev->pdev->dev;
9038 dev_info(dev, "PF info begin:\n");
9040 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
9041 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
9042 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
9043 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
9044 dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
9045 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
9046 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
9047 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
9048 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
9049 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
9050 dev_info(dev, "This is %s PF\n",
9051 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
9052 dev_info(dev, "DCB %s\n",
9053 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
9054 dev_info(dev, "MQPRIO %s\n",
9055 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
9057 dev_info(dev, "PF info end.\n");
9060 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
9061 struct hclge_vport *vport)
9063 struct hnae3_client *client = vport->nic.client;
9064 struct hclge_dev *hdev = ae_dev->priv;
9065 int rst_cnt = hdev->rst_stats.reset_cnt;
9068 ret = client->ops->init_instance(&vport->nic);
9072 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9073 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9074 rst_cnt != hdev->rst_stats.reset_cnt) {
9079 /* Enable nic hw error interrupts */
9080 ret = hclge_config_nic_hw_error(hdev, true);
9082 dev_err(&ae_dev->pdev->dev,
9083 "fail(%d) to enable hw error interrupts\n", ret);
9087 hnae3_set_client_init_flag(client, ae_dev, 1);
9089 if (netif_msg_drv(&hdev->vport->nic))
9090 hclge_info_show(hdev);
9095 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9096 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9097 msleep(HCLGE_WAIT_RESET_DONE);
9099 client->ops->uninit_instance(&vport->nic, 0);
9104 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
9105 struct hclge_vport *vport)
9107 struct hnae3_client *client = vport->roce.client;
9108 struct hclge_dev *hdev = ae_dev->priv;
9112 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
9116 client = hdev->roce_client;
9117 ret = hclge_init_roce_base_info(vport);
9121 rst_cnt = hdev->rst_stats.reset_cnt;
9122 ret = client->ops->init_instance(&vport->roce);
9126 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9127 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
9128 rst_cnt != hdev->rst_stats.reset_cnt) {
9133 /* Enable roce ras interrupts */
9134 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9136 dev_err(&ae_dev->pdev->dev,
9137 "fail(%d) to enable roce ras interrupts\n", ret);
9141 hnae3_set_client_init_flag(client, ae_dev, 1);
9146 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9147 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9148 msleep(HCLGE_WAIT_RESET_DONE);
9150 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
9155 static int hclge_init_client_instance(struct hnae3_client *client,
9156 struct hnae3_ae_dev *ae_dev)
9158 struct hclge_dev *hdev = ae_dev->priv;
9159 struct hclge_vport *vport;
9162 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9163 vport = &hdev->vport[i];
9165 switch (client->type) {
9166 case HNAE3_CLIENT_KNIC:
9167 hdev->nic_client = client;
9168 vport->nic.client = client;
9169 ret = hclge_init_nic_client_instance(ae_dev, vport);
9173 ret = hclge_init_roce_client_instance(ae_dev, vport);
9178 case HNAE3_CLIENT_ROCE:
9179 if (hnae3_dev_roce_supported(hdev)) {
9180 hdev->roce_client = client;
9181 vport->roce.client = client;
9184 ret = hclge_init_roce_client_instance(ae_dev, vport);
9197 hdev->nic_client = NULL;
9198 vport->nic.client = NULL;
9201 hdev->roce_client = NULL;
9202 vport->roce.client = NULL;
9206 static void hclge_uninit_client_instance(struct hnae3_client *client,
9207 struct hnae3_ae_dev *ae_dev)
9209 struct hclge_dev *hdev = ae_dev->priv;
9210 struct hclge_vport *vport;
9213 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
9214 vport = &hdev->vport[i];
9215 if (hdev->roce_client) {
9216 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
9217 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9218 msleep(HCLGE_WAIT_RESET_DONE);
9220 hdev->roce_client->ops->uninit_instance(&vport->roce,
9222 hdev->roce_client = NULL;
9223 vport->roce.client = NULL;
9225 if (client->type == HNAE3_CLIENT_ROCE)
9227 if (hdev->nic_client && client->ops->uninit_instance) {
9228 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
9229 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
9230 msleep(HCLGE_WAIT_RESET_DONE);
9232 client->ops->uninit_instance(&vport->nic, 0);
9233 hdev->nic_client = NULL;
9234 vport->nic.client = NULL;
9239 static int hclge_pci_init(struct hclge_dev *hdev)
9241 struct pci_dev *pdev = hdev->pdev;
9242 struct hclge_hw *hw;
9245 ret = pci_enable_device(pdev);
9247 dev_err(&pdev->dev, "failed to enable PCI device\n");
9251 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9253 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9256 "can't set consistent PCI DMA");
9257 goto err_disable_device;
9259 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
9262 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
9264 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
9265 goto err_disable_device;
9268 pci_set_master(pdev);
9270 hw->io_base = pcim_iomap(pdev, 2, 0);
9272 dev_err(&pdev->dev, "Can't map configuration register space\n");
9274 goto err_clr_master;
9277 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
9281 pci_clear_master(pdev);
9282 pci_release_regions(pdev);
9284 pci_disable_device(pdev);
9289 static void hclge_pci_uninit(struct hclge_dev *hdev)
9291 struct pci_dev *pdev = hdev->pdev;
9293 pcim_iounmap(pdev, hdev->hw.io_base);
9294 pci_free_irq_vectors(pdev);
9295 pci_clear_master(pdev);
9296 pci_release_mem_regions(pdev);
9297 pci_disable_device(pdev);
9300 static void hclge_state_init(struct hclge_dev *hdev)
9302 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
9303 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9304 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
9305 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
9306 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
9307 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
9310 static void hclge_state_uninit(struct hclge_dev *hdev)
9312 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9313 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
9315 if (hdev->reset_timer.function)
9316 del_timer_sync(&hdev->reset_timer);
9317 if (hdev->service_task.work.func)
9318 cancel_delayed_work_sync(&hdev->service_task);
9321 static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
9323 #define HCLGE_FLR_WAIT_MS 100
9324 #define HCLGE_FLR_WAIT_CNT 50
9325 struct hclge_dev *hdev = ae_dev->priv;
9328 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
9329 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
9330 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
9331 hclge_reset_event(hdev->pdev, NULL);
9333 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
9334 cnt++ < HCLGE_FLR_WAIT_CNT)
9335 msleep(HCLGE_FLR_WAIT_MS);
9337 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
9338 dev_err(&hdev->pdev->dev,
9339 "flr wait down timeout: %d\n", cnt);
9342 static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
9344 struct hclge_dev *hdev = ae_dev->priv;
9346 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
9349 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
9353 for (i = 0; i < hdev->num_alloc_vport; i++) {
9354 struct hclge_vport *vport = &hdev->vport[i];
9357 /* Send cmd to clear VF's FUNC_RST_ING */
9358 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
9360 dev_warn(&hdev->pdev->dev,
9361 "clear vf(%u) rst failed %d!\n",
9362 vport->vport_id, ret);
9366 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
9368 struct pci_dev *pdev = ae_dev->pdev;
9369 struct hclge_dev *hdev;
9372 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
9379 hdev->ae_dev = ae_dev;
9380 hdev->reset_type = HNAE3_NONE_RESET;
9381 hdev->reset_level = HNAE3_FUNC_RESET;
9382 ae_dev->priv = hdev;
9384 /* HW supprt 2 layer vlan */
9385 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
9387 mutex_init(&hdev->vport_lock);
9388 mutex_init(&hdev->vport_cfg_mutex);
9389 spin_lock_init(&hdev->fd_rule_lock);
9391 ret = hclge_pci_init(hdev);
9393 dev_err(&pdev->dev, "PCI init failed\n");
9397 /* Firmware command queue initialize */
9398 ret = hclge_cmd_queue_init(hdev);
9400 dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret);
9401 goto err_pci_uninit;
9404 /* Firmware command initialize */
9405 ret = hclge_cmd_init(hdev);
9407 goto err_cmd_uninit;
9409 ret = hclge_get_cap(hdev);
9411 dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
9413 goto err_cmd_uninit;
9416 ret = hclge_configure(hdev);
9418 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
9419 goto err_cmd_uninit;
9422 ret = hclge_init_msi(hdev);
9424 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
9425 goto err_cmd_uninit;
9428 ret = hclge_misc_irq_init(hdev);
9431 "Misc IRQ(vector0) init error, ret = %d.\n",
9433 goto err_msi_uninit;
9436 ret = hclge_alloc_tqps(hdev);
9438 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
9439 goto err_msi_irq_uninit;
9442 ret = hclge_alloc_vport(hdev);
9444 dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret);
9445 goto err_msi_irq_uninit;
9448 ret = hclge_map_tqp(hdev);
9450 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9451 goto err_msi_irq_uninit;
9454 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
9455 ret = hclge_mac_mdio_config(hdev);
9457 dev_err(&hdev->pdev->dev,
9458 "mdio config fail ret=%d\n", ret);
9459 goto err_msi_irq_uninit;
9463 ret = hclge_init_umv_space(hdev);
9465 dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret);
9466 goto err_mdiobus_unreg;
9469 ret = hclge_mac_init(hdev);
9471 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9472 goto err_mdiobus_unreg;
9475 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9477 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9478 goto err_mdiobus_unreg;
9481 ret = hclge_config_gro(hdev, true);
9483 goto err_mdiobus_unreg;
9485 ret = hclge_init_vlan_config(hdev);
9487 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9488 goto err_mdiobus_unreg;
9491 ret = hclge_tm_schd_init(hdev);
9493 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
9494 goto err_mdiobus_unreg;
9497 hclge_rss_init_cfg(hdev);
9498 ret = hclge_rss_init_hw(hdev);
9500 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9501 goto err_mdiobus_unreg;
9504 ret = init_mgr_tbl(hdev);
9506 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
9507 goto err_mdiobus_unreg;
9510 ret = hclge_init_fd_config(hdev);
9513 "fd table init fail, ret=%d\n", ret);
9514 goto err_mdiobus_unreg;
9517 INIT_KFIFO(hdev->mac_tnl_log);
9519 hclge_dcb_ops_set(hdev);
9521 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
9522 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
9524 /* Setup affinity after service timer setup because add_timer_on
9525 * is called in affinity notify.
9527 hclge_misc_affinity_setup(hdev);
9529 hclge_clear_all_event_cause(hdev);
9530 hclge_clear_resetting_state(hdev);
9532 /* Log and clear the hw errors those already occurred */
9533 hclge_handle_all_hns_hw_errors(ae_dev);
9535 /* request delayed reset for the error recovery because an immediate
9536 * global reset on a PF affecting pending initialization of other PFs
9538 if (ae_dev->hw_err_reset_req) {
9539 enum hnae3_reset_type reset_level;
9541 reset_level = hclge_get_reset_level(ae_dev,
9542 &ae_dev->hw_err_reset_req);
9543 hclge_set_def_reset_request(ae_dev, reset_level);
9544 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
9547 /* Enable MISC vector(vector0) */
9548 hclge_enable_vector(&hdev->misc_vector, true);
9550 hclge_state_init(hdev);
9551 hdev->last_reset_time = jiffies;
9553 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
9556 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
9561 if (hdev->hw.mac.phydev)
9562 mdiobus_unregister(hdev->hw.mac.mdio_bus);
9564 hclge_misc_irq_uninit(hdev);
9566 pci_free_irq_vectors(pdev);
9568 hclge_cmd_uninit(hdev);
9570 pcim_iounmap(pdev, hdev->hw.io_base);
9571 pci_clear_master(pdev);
9572 pci_release_regions(pdev);
9573 pci_disable_device(pdev);
9578 static void hclge_stats_clear(struct hclge_dev *hdev)
9580 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
9583 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9585 return hclge_config_switch_param(hdev, vf, enable,
9586 HCLGE_SWITCH_ANTI_SPOOF_MASK);
9589 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
9591 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9592 HCLGE_FILTER_FE_NIC_INGRESS_B,
9596 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
9600 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
9602 dev_err(&hdev->pdev->dev,
9603 "Set vf %d mac spoof check %s failed, ret=%d\n",
9604 vf, enable ? "on" : "off", ret);
9608 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
9610 dev_err(&hdev->pdev->dev,
9611 "Set vf %d vlan spoof check %s failed, ret=%d\n",
9612 vf, enable ? "on" : "off", ret);
9617 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
9620 struct hclge_vport *vport = hclge_get_vport(handle);
9621 struct hclge_dev *hdev = vport->back;
9622 u32 new_spoofchk = enable ? 1 : 0;
9625 if (hdev->pdev->revision == 0x20)
9628 vport = hclge_get_vf_vport(hdev, vf);
9632 if (vport->vf_info.spoofchk == new_spoofchk)
9635 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
9636 dev_warn(&hdev->pdev->dev,
9637 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
9639 else if (enable && hclge_is_umv_space_full(vport))
9640 dev_warn(&hdev->pdev->dev,
9641 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
9644 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
9648 vport->vf_info.spoofchk = new_spoofchk;
9652 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
9654 struct hclge_vport *vport = hdev->vport;
9658 if (hdev->pdev->revision == 0x20)
9661 /* resume the vf spoof check state after reset */
9662 for (i = 0; i < hdev->num_alloc_vport; i++) {
9663 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
9664 vport->vf_info.spoofchk);
9674 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
9676 struct hclge_vport *vport = hclge_get_vport(handle);
9677 struct hclge_dev *hdev = vport->back;
9678 u32 new_trusted = enable ? 1 : 0;
9682 vport = hclge_get_vf_vport(hdev, vf);
9686 if (vport->vf_info.trusted == new_trusted)
9689 /* Disable promisc mode for VF if it is not trusted any more. */
9690 if (!enable && vport->vf_info.promisc_enable) {
9691 en_bc_pmc = hdev->pdev->revision != 0x20;
9692 ret = hclge_set_vport_promisc_mode(vport, false, false,
9696 vport->vf_info.promisc_enable = 0;
9697 hclge_inform_vf_promisc_info(vport);
9700 vport->vf_info.trusted = new_trusted;
9705 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
9710 /* reset vf rate to default value */
9711 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
9712 struct hclge_vport *vport = &hdev->vport[vf];
9714 vport->vf_info.max_tx_rate = 0;
9715 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
9717 dev_err(&hdev->pdev->dev,
9718 "vf%d failed to reset to default, ret=%d\n",
9719 vf - HCLGE_VF_VPORT_START_NUM, ret);
9723 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
9724 int min_tx_rate, int max_tx_rate)
9726 if (min_tx_rate != 0 ||
9727 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
9728 dev_err(&hdev->pdev->dev,
9729 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
9730 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
9737 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
9738 int min_tx_rate, int max_tx_rate, bool force)
9740 struct hclge_vport *vport = hclge_get_vport(handle);
9741 struct hclge_dev *hdev = vport->back;
9744 ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
9748 vport = hclge_get_vf_vport(hdev, vf);
9752 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
9755 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
9759 vport->vf_info.max_tx_rate = max_tx_rate;
9764 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
9766 struct hnae3_handle *handle = &hdev->vport->nic;
9767 struct hclge_vport *vport;
9771 /* resume the vf max_tx_rate after reset */
9772 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
9773 vport = hclge_get_vf_vport(hdev, vf);
9777 /* zero means max rate, after reset, firmware already set it to
9778 * max rate, so just continue.
9780 if (!vport->vf_info.max_tx_rate)
9783 ret = hclge_set_vf_rate(handle, vf, 0,
9784 vport->vf_info.max_tx_rate, true);
9786 dev_err(&hdev->pdev->dev,
9787 "vf%d failed to resume tx_rate:%u, ret=%d\n",
9788 vf, vport->vf_info.max_tx_rate, ret);
9796 static void hclge_reset_vport_state(struct hclge_dev *hdev)
9798 struct hclge_vport *vport = hdev->vport;
9801 for (i = 0; i < hdev->num_alloc_vport; i++) {
9802 hclge_vport_stop(vport);
9807 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
9809 struct hclge_dev *hdev = ae_dev->priv;
9810 struct pci_dev *pdev = ae_dev->pdev;
9813 set_bit(HCLGE_STATE_DOWN, &hdev->state);
9815 hclge_stats_clear(hdev);
9816 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
9817 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
9819 ret = hclge_cmd_init(hdev);
9821 dev_err(&pdev->dev, "Cmd queue init failed\n");
9825 ret = hclge_map_tqp(hdev);
9827 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
9831 hclge_reset_umv_space(hdev);
9833 ret = hclge_mac_init(hdev);
9835 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
9839 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
9841 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
9845 ret = hclge_config_gro(hdev, true);
9849 ret = hclge_init_vlan_config(hdev);
9851 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
9855 ret = hclge_tm_init_hw(hdev, true);
9857 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
9861 ret = hclge_rss_init_hw(hdev);
9863 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
9867 ret = hclge_init_fd_config(hdev);
9869 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
9873 /* Log and clear the hw errors those already occurred */
9874 hclge_handle_all_hns_hw_errors(ae_dev);
9876 /* Re-enable the hw error interrupts because
9877 * the interrupts get disabled on global reset.
9879 ret = hclge_config_nic_hw_error(hdev, true);
9882 "fail(%d) to re-enable NIC hw error interrupts\n",
9887 if (hdev->roce_client) {
9888 ret = hclge_config_rocee_ras_interrupt(hdev, true);
9891 "fail(%d) to re-enable roce ras interrupts\n",
9897 hclge_reset_vport_state(hdev);
9898 ret = hclge_reset_vport_spoofchk(hdev);
9902 ret = hclge_resume_vf_rate(hdev);
9906 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
9912 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
9914 struct hclge_dev *hdev = ae_dev->priv;
9915 struct hclge_mac *mac = &hdev->hw.mac;
9917 hclge_reset_vf_rate(hdev);
9918 hclge_misc_affinity_teardown(hdev);
9919 hclge_state_uninit(hdev);
9922 mdiobus_unregister(mac->mdio_bus);
9924 hclge_uninit_umv_space(hdev);
9926 /* Disable MISC vector(vector0) */
9927 hclge_enable_vector(&hdev->misc_vector, false);
9928 synchronize_irq(hdev->misc_vector.vector_irq);
9930 /* Disable all hw interrupts */
9931 hclge_config_mac_tnl_int(hdev, false);
9932 hclge_config_nic_hw_error(hdev, false);
9933 hclge_config_rocee_ras_interrupt(hdev, false);
9935 hclge_cmd_uninit(hdev);
9936 hclge_misc_irq_uninit(hdev);
9937 hclge_pci_uninit(hdev);
9938 mutex_destroy(&hdev->vport_lock);
9939 hclge_uninit_vport_mac_table(hdev);
9940 hclge_uninit_vport_vlan_table(hdev);
9941 mutex_destroy(&hdev->vport_cfg_mutex);
9942 ae_dev->priv = NULL;
9945 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
9947 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
9948 struct hclge_vport *vport = hclge_get_vport(handle);
9949 struct hclge_dev *hdev = vport->back;
9951 return min_t(u32, hdev->rss_size_max,
9952 vport->alloc_tqps / kinfo->num_tc);
9955 static void hclge_get_channels(struct hnae3_handle *handle,
9956 struct ethtool_channels *ch)
9958 ch->max_combined = hclge_get_max_channels(handle);
9959 ch->other_count = 1;
9961 ch->combined_count = handle->kinfo.rss_size;
9964 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
9965 u16 *alloc_tqps, u16 *max_rss_size)
9967 struct hclge_vport *vport = hclge_get_vport(handle);
9968 struct hclge_dev *hdev = vport->back;
9970 *alloc_tqps = vport->alloc_tqps;
9971 *max_rss_size = hdev->rss_size_max;
9974 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
9975 bool rxfh_configured)
9977 struct hclge_vport *vport = hclge_get_vport(handle);
9978 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
9979 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
9980 struct hclge_dev *hdev = vport->back;
9981 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
9982 u16 cur_rss_size = kinfo->rss_size;
9983 u16 cur_tqps = kinfo->num_tqps;
9984 u16 tc_valid[HCLGE_MAX_TC_NUM];
9990 kinfo->req_rss_size = new_tqps_num;
9992 ret = hclge_tm_vport_map_update(hdev);
9994 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
9998 roundup_size = roundup_pow_of_two(kinfo->rss_size);
9999 roundup_size = ilog2(roundup_size);
10000 /* Set the RSS TC mode according to the new RSS size */
10001 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
10004 if (!(hdev->hw_tc_map & BIT(i)))
10008 tc_size[i] = roundup_size;
10009 tc_offset[i] = kinfo->rss_size * i;
10011 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
10015 /* RSS indirection table has been configuared by user */
10016 if (rxfh_configured)
10019 /* Reinitializes the rss indirect table according to the new RSS size */
10020 rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
10024 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
10025 rss_indir[i] = i % kinfo->rss_size;
10027 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
10029 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
10036 dev_info(&hdev->pdev->dev,
10037 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
10038 cur_rss_size, kinfo->rss_size,
10039 cur_tqps, kinfo->rss_size * kinfo->num_tc);
10044 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
10045 u32 *regs_num_64_bit)
10047 struct hclge_desc desc;
10051 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
10052 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10054 dev_err(&hdev->pdev->dev,
10055 "Query register number cmd failed, ret = %d.\n", ret);
10059 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
10060 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
10062 total_num = *regs_num_32_bit + *regs_num_64_bit;
10069 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10072 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
10073 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
10075 struct hclge_desc *desc;
10076 u32 *reg_val = data;
10086 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
10087 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
10088 HCLGE_32_BIT_REG_RTN_DATANUM);
10089 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10093 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
10094 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10096 dev_err(&hdev->pdev->dev,
10097 "Query 32 bit register cmd failed, ret = %d.\n", ret);
10102 for (i = 0; i < cmd_num; i++) {
10104 desc_data = (__le32 *)(&desc[i].data[0]);
10105 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
10107 desc_data = (__le32 *)(&desc[i]);
10108 n = HCLGE_32_BIT_REG_RTN_DATANUM;
10110 for (k = 0; k < n; k++) {
10111 *reg_val++ = le32_to_cpu(*desc_data++);
10123 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
10126 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
10127 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
10129 struct hclge_desc *desc;
10130 u64 *reg_val = data;
10140 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
10141 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
10142 HCLGE_64_BIT_REG_RTN_DATANUM);
10143 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
10147 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
10148 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
10150 dev_err(&hdev->pdev->dev,
10151 "Query 64 bit register cmd failed, ret = %d.\n", ret);
10156 for (i = 0; i < cmd_num; i++) {
10158 desc_data = (__le64 *)(&desc[i].data[0]);
10159 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
10161 desc_data = (__le64 *)(&desc[i]);
10162 n = HCLGE_64_BIT_REG_RTN_DATANUM;
10164 for (k = 0; k < n; k++) {
10165 *reg_val++ = le64_to_cpu(*desc_data++);
10177 #define MAX_SEPARATE_NUM 4
10178 #define SEPARATOR_VALUE 0xFDFCFBFA
10179 #define REG_NUM_PER_LINE 4
10180 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
10181 #define REG_SEPARATOR_LINE 1
10182 #define REG_NUM_REMAIN_MASK 3
10183 #define BD_LIST_MAX_NUM 30
10185 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
10187 /*prepare 4 commands to query DFX BD number*/
10188 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
10189 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10190 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
10191 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10192 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
10193 desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10194 hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
10196 return hclge_cmd_send(&hdev->hw, desc, 4);
10199 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
10203 #define HCLGE_DFX_REG_BD_NUM 4
10205 u32 entries_per_desc, desc_index, index, offset, i;
10206 struct hclge_desc desc[HCLGE_DFX_REG_BD_NUM];
10209 ret = hclge_query_bd_num_cmd_send(hdev, desc);
10211 dev_err(&hdev->pdev->dev,
10212 "Get dfx bd num fail, status is %d.\n", ret);
10216 entries_per_desc = ARRAY_SIZE(desc[0].data);
10217 for (i = 0; i < type_num; i++) {
10218 offset = hclge_dfx_bd_offset_list[i];
10219 index = offset % entries_per_desc;
10220 desc_index = offset / entries_per_desc;
10221 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
10227 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
10228 struct hclge_desc *desc_src, int bd_num,
10229 enum hclge_opcode_type cmd)
10231 struct hclge_desc *desc = desc_src;
10234 hclge_cmd_setup_basic_desc(desc, cmd, true);
10235 for (i = 0; i < bd_num - 1; i++) {
10236 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
10238 hclge_cmd_setup_basic_desc(desc, cmd, true);
10242 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
10244 dev_err(&hdev->pdev->dev,
10245 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
10251 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
10254 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
10255 struct hclge_desc *desc = desc_src;
10258 entries_per_desc = ARRAY_SIZE(desc->data);
10259 reg_num = entries_per_desc * bd_num;
10260 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
10261 for (i = 0; i < reg_num; i++) {
10262 index = i % entries_per_desc;
10263 desc_index = i / entries_per_desc;
10264 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
10266 for (i = 0; i < separator_num; i++)
10267 *reg++ = SEPARATOR_VALUE;
10269 return reg_num + separator_num;
10272 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
10274 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10275 int data_len_per_desc, data_len, bd_num, i;
10276 int bd_num_list[BD_LIST_MAX_NUM];
10279 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10281 dev_err(&hdev->pdev->dev,
10282 "Get dfx reg bd num fail, status is %d.\n", ret);
10286 data_len_per_desc = FIELD_SIZEOF(struct hclge_desc, data);
10288 for (i = 0; i < dfx_reg_type_num; i++) {
10289 bd_num = bd_num_list[i];
10290 data_len = data_len_per_desc * bd_num;
10291 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
10297 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
10299 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
10300 int bd_num, bd_num_max, buf_len, i;
10301 int bd_num_list[BD_LIST_MAX_NUM];
10302 struct hclge_desc *desc_src;
10306 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
10308 dev_err(&hdev->pdev->dev,
10309 "Get dfx reg bd num fail, status is %d.\n", ret);
10313 bd_num_max = bd_num_list[0];
10314 for (i = 1; i < dfx_reg_type_num; i++)
10315 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
10317 buf_len = sizeof(*desc_src) * bd_num_max;
10318 desc_src = kzalloc(buf_len, GFP_KERNEL);
10320 dev_err(&hdev->pdev->dev, "%s kzalloc failed\n", __func__);
10324 for (i = 0; i < dfx_reg_type_num; i++) {
10325 bd_num = bd_num_list[i];
10326 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
10327 hclge_dfx_reg_opcode_list[i]);
10329 dev_err(&hdev->pdev->dev,
10330 "Get dfx reg fail, status is %d.\n", ret);
10334 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
10341 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
10342 struct hnae3_knic_private_info *kinfo)
10344 #define HCLGE_RING_REG_OFFSET 0x200
10345 #define HCLGE_RING_INT_REG_OFFSET 0x4
10347 int i, j, reg_num, separator_num;
10351 /* fetching per-PF registers valus from PF PCIe register space */
10352 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
10353 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10354 for (i = 0; i < reg_num; i++)
10355 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
10356 for (i = 0; i < separator_num; i++)
10357 *reg++ = SEPARATOR_VALUE;
10358 data_num_sum = reg_num + separator_num;
10360 reg_num = ARRAY_SIZE(common_reg_addr_list);
10361 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10362 for (i = 0; i < reg_num; i++)
10363 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
10364 for (i = 0; i < separator_num; i++)
10365 *reg++ = SEPARATOR_VALUE;
10366 data_num_sum += reg_num + separator_num;
10368 reg_num = ARRAY_SIZE(ring_reg_addr_list);
10369 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10370 for (j = 0; j < kinfo->num_tqps; j++) {
10371 for (i = 0; i < reg_num; i++)
10372 *reg++ = hclge_read_dev(&hdev->hw,
10373 ring_reg_addr_list[i] +
10374 HCLGE_RING_REG_OFFSET * j);
10375 for (i = 0; i < separator_num; i++)
10376 *reg++ = SEPARATOR_VALUE;
10378 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
10380 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
10381 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10382 for (j = 0; j < hdev->num_msi_used - 1; j++) {
10383 for (i = 0; i < reg_num; i++)
10384 *reg++ = hclge_read_dev(&hdev->hw,
10385 tqp_intr_reg_addr_list[i] +
10386 HCLGE_RING_INT_REG_OFFSET * j);
10387 for (i = 0; i < separator_num; i++)
10388 *reg++ = SEPARATOR_VALUE;
10390 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
10392 return data_num_sum;
10395 static int hclge_get_regs_len(struct hnae3_handle *handle)
10397 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
10398 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10399 struct hclge_vport *vport = hclge_get_vport(handle);
10400 struct hclge_dev *hdev = vport->back;
10401 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
10402 int regs_lines_32_bit, regs_lines_64_bit;
10405 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
10407 dev_err(&hdev->pdev->dev,
10408 "Get register number failed, ret = %d.\n", ret);
10412 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
10414 dev_err(&hdev->pdev->dev,
10415 "Get dfx reg len failed, ret = %d.\n", ret);
10419 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
10420 REG_SEPARATOR_LINE;
10421 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
10422 REG_SEPARATOR_LINE;
10423 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
10424 REG_SEPARATOR_LINE;
10425 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
10426 REG_SEPARATOR_LINE;
10427 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
10428 REG_SEPARATOR_LINE;
10429 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
10430 REG_SEPARATOR_LINE;
10432 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
10433 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
10434 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
10437 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
10440 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
10441 struct hclge_vport *vport = hclge_get_vport(handle);
10442 struct hclge_dev *hdev = vport->back;
10443 u32 regs_num_32_bit, regs_num_64_bit;
10444 int i, reg_num, separator_num, ret;
10447 *version = hdev->fw_version;
10449 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
10451 dev_err(&hdev->pdev->dev,
10452 "Get register number failed, ret = %d.\n", ret);
10456 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
10458 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
10460 dev_err(&hdev->pdev->dev,
10461 "Get 32 bit register failed, ret = %d.\n", ret);
10464 reg_num = regs_num_32_bit;
10466 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10467 for (i = 0; i < separator_num; i++)
10468 *reg++ = SEPARATOR_VALUE;
10470 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
10472 dev_err(&hdev->pdev->dev,
10473 "Get 64 bit register failed, ret = %d.\n", ret);
10476 reg_num = regs_num_64_bit * 2;
10478 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
10479 for (i = 0; i < separator_num; i++)
10480 *reg++ = SEPARATOR_VALUE;
10482 ret = hclge_get_dfx_reg(hdev, reg);
10484 dev_err(&hdev->pdev->dev,
10485 "Get dfx register failed, ret = %d.\n", ret);
10488 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
10490 struct hclge_set_led_state_cmd *req;
10491 struct hclge_desc desc;
10494 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
10496 req = (struct hclge_set_led_state_cmd *)desc.data;
10497 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
10498 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
10500 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10502 dev_err(&hdev->pdev->dev,
10503 "Send set led state cmd error, ret =%d\n", ret);
10508 enum hclge_led_status {
10511 HCLGE_LED_NO_CHANGE = 0xFF,
10514 static int hclge_set_led_id(struct hnae3_handle *handle,
10515 enum ethtool_phys_id_state status)
10517 struct hclge_vport *vport = hclge_get_vport(handle);
10518 struct hclge_dev *hdev = vport->back;
10521 case ETHTOOL_ID_ACTIVE:
10522 return hclge_set_led_status(hdev, HCLGE_LED_ON);
10523 case ETHTOOL_ID_INACTIVE:
10524 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
10530 static void hclge_get_link_mode(struct hnae3_handle *handle,
10531 unsigned long *supported,
10532 unsigned long *advertising)
10534 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
10535 struct hclge_vport *vport = hclge_get_vport(handle);
10536 struct hclge_dev *hdev = vport->back;
10537 unsigned int idx = 0;
10539 for (; idx < size; idx++) {
10540 supported[idx] = hdev->hw.mac.supported[idx];
10541 advertising[idx] = hdev->hw.mac.advertising[idx];
10545 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
10547 struct hclge_vport *vport = hclge_get_vport(handle);
10548 struct hclge_dev *hdev = vport->back;
10550 return hclge_config_gro(hdev, enable);
10553 static const struct hnae3_ae_ops hclge_ops = {
10554 .init_ae_dev = hclge_init_ae_dev,
10555 .uninit_ae_dev = hclge_uninit_ae_dev,
10556 .flr_prepare = hclge_flr_prepare,
10557 .flr_done = hclge_flr_done,
10558 .init_client_instance = hclge_init_client_instance,
10559 .uninit_client_instance = hclge_uninit_client_instance,
10560 .map_ring_to_vector = hclge_map_ring_to_vector,
10561 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
10562 .get_vector = hclge_get_vector,
10563 .put_vector = hclge_put_vector,
10564 .set_promisc_mode = hclge_set_promisc_mode,
10565 .set_loopback = hclge_set_loopback,
10566 .start = hclge_ae_start,
10567 .stop = hclge_ae_stop,
10568 .client_start = hclge_client_start,
10569 .client_stop = hclge_client_stop,
10570 .get_status = hclge_get_status,
10571 .get_ksettings_an_result = hclge_get_ksettings_an_result,
10572 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
10573 .get_media_type = hclge_get_media_type,
10574 .check_port_speed = hclge_check_port_speed,
10575 .get_fec = hclge_get_fec,
10576 .set_fec = hclge_set_fec,
10577 .get_rss_key_size = hclge_get_rss_key_size,
10578 .get_rss_indir_size = hclge_get_rss_indir_size,
10579 .get_rss = hclge_get_rss,
10580 .set_rss = hclge_set_rss,
10581 .set_rss_tuple = hclge_set_rss_tuple,
10582 .get_rss_tuple = hclge_get_rss_tuple,
10583 .get_tc_size = hclge_get_tc_size,
10584 .get_mac_addr = hclge_get_mac_addr,
10585 .set_mac_addr = hclge_set_mac_addr,
10586 .do_ioctl = hclge_do_ioctl,
10587 .add_uc_addr = hclge_add_uc_addr,
10588 .rm_uc_addr = hclge_rm_uc_addr,
10589 .add_mc_addr = hclge_add_mc_addr,
10590 .rm_mc_addr = hclge_rm_mc_addr,
10591 .set_autoneg = hclge_set_autoneg,
10592 .get_autoneg = hclge_get_autoneg,
10593 .restart_autoneg = hclge_restart_autoneg,
10594 .halt_autoneg = hclge_halt_autoneg,
10595 .get_pauseparam = hclge_get_pauseparam,
10596 .set_pauseparam = hclge_set_pauseparam,
10597 .set_mtu = hclge_set_mtu,
10598 .reset_queue = hclge_reset_tqp,
10599 .get_stats = hclge_get_stats,
10600 .get_mac_stats = hclge_get_mac_stat,
10601 .update_stats = hclge_update_stats,
10602 .get_strings = hclge_get_strings,
10603 .get_sset_count = hclge_get_sset_count,
10604 .get_fw_version = hclge_get_fw_version,
10605 .get_mdix_mode = hclge_get_mdix_mode,
10606 .enable_vlan_filter = hclge_enable_vlan_filter,
10607 .set_vlan_filter = hclge_set_vlan_filter,
10608 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
10609 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
10610 .reset_event = hclge_reset_event,
10611 .get_reset_level = hclge_get_reset_level,
10612 .set_default_reset_request = hclge_set_def_reset_request,
10613 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
10614 .set_channels = hclge_set_channels,
10615 .get_channels = hclge_get_channels,
10616 .get_regs_len = hclge_get_regs_len,
10617 .get_regs = hclge_get_regs,
10618 .set_led_id = hclge_set_led_id,
10619 .get_link_mode = hclge_get_link_mode,
10620 .add_fd_entry = hclge_add_fd_entry,
10621 .del_fd_entry = hclge_del_fd_entry,
10622 .del_all_fd_entries = hclge_del_all_fd_entries,
10623 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
10624 .get_fd_rule_info = hclge_get_fd_rule_info,
10625 .get_fd_all_rules = hclge_get_all_rules,
10626 .restore_fd_rules = hclge_restore_fd_entries,
10627 .enable_fd = hclge_enable_fd,
10628 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
10629 .dbg_run_cmd = hclge_dbg_run_cmd,
10630 .handle_hw_ras_error = hclge_handle_hw_ras_error,
10631 .get_hw_reset_stat = hclge_get_hw_reset_stat,
10632 .ae_dev_resetting = hclge_ae_dev_resetting,
10633 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
10634 .set_gro_en = hclge_gro_en,
10635 .get_global_queue_id = hclge_covert_handle_qid_global,
10636 .set_timer_task = hclge_set_timer_task,
10637 .mac_connect_phy = hclge_mac_connect_phy,
10638 .mac_disconnect_phy = hclge_mac_disconnect_phy,
10639 .restore_vlan_table = hclge_restore_vlan_table,
10640 .get_vf_config = hclge_get_vf_config,
10641 .set_vf_link_state = hclge_set_vf_link_state,
10642 .set_vf_spoofchk = hclge_set_vf_spoofchk,
10643 .set_vf_trust = hclge_set_vf_trust,
10644 .set_vf_rate = hclge_set_vf_rate,
10645 .set_vf_mac = hclge_set_vf_mac,
10648 static struct hnae3_ae_algo ae_algo = {
10650 .pdev_id_table = ae_algo_pci_tbl,
10653 static int hclge_init(void)
10655 pr_info("%s is initializing\n", HCLGE_NAME);
10657 hclge_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGE_NAME);
10659 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
10663 hnae3_register_ae_algo(&ae_algo);
10668 static void hclge_exit(void)
10670 hnae3_unregister_ae_algo(&ae_algo);
10671 destroy_workqueue(hclge_wq);
10673 module_init(hclge_init);
10674 module_exit(hclge_exit);
10676 MODULE_LICENSE("GPL");
10677 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
10678 MODULE_DESCRIPTION("HCLGE Driver");
10679 MODULE_VERSION(HCLGE_MOD_VERSION);