1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
24 #include "hclge_err.h"
26 #include "hclge_devlink.h"
27 #include "hclge_comm_cmd.h"
29 #define HCLGE_NAME "hclge"
31 #define HCLGE_BUF_SIZE_UNIT 256U
32 #define HCLGE_BUF_MUL_BY 2
33 #define HCLGE_BUF_DIV_BY 2
34 #define NEED_RESERVE_TC_NUM 2
35 #define BUF_MAX_PERCENT 100
36 #define BUF_RESERVE_PERCENT 90
38 #define HCLGE_RESET_MAX_FAIL_CNT 5
39 #define HCLGE_RESET_SYNC_TIME 100
40 #define HCLGE_PF_RESET_SYNC_TIME 20
41 #define HCLGE_PF_RESET_SYNC_CNT 1500
43 /* Get DFX BD number offset */
44 #define HCLGE_DFX_BIOS_BD_OFFSET 1
45 #define HCLGE_DFX_SSU_0_BD_OFFSET 2
46 #define HCLGE_DFX_SSU_1_BD_OFFSET 3
47 #define HCLGE_DFX_IGU_BD_OFFSET 4
48 #define HCLGE_DFX_RPU_0_BD_OFFSET 5
49 #define HCLGE_DFX_RPU_1_BD_OFFSET 6
50 #define HCLGE_DFX_NCSI_BD_OFFSET 7
51 #define HCLGE_DFX_RTC_BD_OFFSET 8
52 #define HCLGE_DFX_PPP_BD_OFFSET 9
53 #define HCLGE_DFX_RCB_BD_OFFSET 10
54 #define HCLGE_DFX_TQP_BD_OFFSET 11
55 #define HCLGE_DFX_SSU_2_BD_OFFSET 12
57 #define HCLGE_LINK_STATUS_MS 10
59 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
60 static int hclge_init_vlan_config(struct hclge_dev *hdev);
61 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
62 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
63 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
64 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
65 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
66 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
68 static int hclge_set_default_loopback(struct hclge_dev *hdev);
70 static void hclge_sync_mac_table(struct hclge_dev *hdev);
71 static void hclge_restore_hw_table(struct hclge_dev *hdev);
72 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
73 static void hclge_sync_fd_table(struct hclge_dev *hdev);
74 static void hclge_update_fec_stats(struct hclge_dev *hdev);
76 static struct hnae3_ae_algo ae_algo;
78 static struct workqueue_struct *hclge_wq;
80 static const struct pci_device_id ae_algo_pci_tbl[] = {
81 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
82 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
83 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
84 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
85 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
86 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
87 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
88 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
89 /* required last entry */
93 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
95 static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
96 HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
97 HCLGE_COMM_NIC_CSQ_DEPTH_REG,
98 HCLGE_COMM_NIC_CSQ_TAIL_REG,
99 HCLGE_COMM_NIC_CSQ_HEAD_REG,
100 HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
101 HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
102 HCLGE_COMM_NIC_CRQ_DEPTH_REG,
103 HCLGE_COMM_NIC_CRQ_TAIL_REG,
104 HCLGE_COMM_NIC_CRQ_HEAD_REG,
105 HCLGE_COMM_VECTOR0_CMDQ_SRC_REG,
106 HCLGE_COMM_CMDQ_INTR_STS_REG,
107 HCLGE_COMM_CMDQ_INTR_EN_REG,
108 HCLGE_COMM_CMDQ_INTR_GEN_REG};
110 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
111 HCLGE_PF_OTHER_INT_REG,
112 HCLGE_MISC_RESET_STS_REG,
113 HCLGE_MISC_VECTOR_INT_STS,
114 HCLGE_GLOBAL_RESET_REG,
118 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
119 HCLGE_RING_RX_ADDR_H_REG,
120 HCLGE_RING_RX_BD_NUM_REG,
121 HCLGE_RING_RX_BD_LENGTH_REG,
122 HCLGE_RING_RX_MERGE_EN_REG,
123 HCLGE_RING_RX_TAIL_REG,
124 HCLGE_RING_RX_HEAD_REG,
125 HCLGE_RING_RX_FBD_NUM_REG,
126 HCLGE_RING_RX_OFFSET_REG,
127 HCLGE_RING_RX_FBD_OFFSET_REG,
128 HCLGE_RING_RX_STASH_REG,
129 HCLGE_RING_RX_BD_ERR_REG,
130 HCLGE_RING_TX_ADDR_L_REG,
131 HCLGE_RING_TX_ADDR_H_REG,
132 HCLGE_RING_TX_BD_NUM_REG,
133 HCLGE_RING_TX_PRIORITY_REG,
134 HCLGE_RING_TX_TC_REG,
135 HCLGE_RING_TX_MERGE_EN_REG,
136 HCLGE_RING_TX_TAIL_REG,
137 HCLGE_RING_TX_HEAD_REG,
138 HCLGE_RING_TX_FBD_NUM_REG,
139 HCLGE_RING_TX_OFFSET_REG,
140 HCLGE_RING_TX_EBD_NUM_REG,
141 HCLGE_RING_TX_EBD_OFFSET_REG,
142 HCLGE_RING_TX_BD_ERR_REG,
145 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
146 HCLGE_TQP_INTR_GL0_REG,
147 HCLGE_TQP_INTR_GL1_REG,
148 HCLGE_TQP_INTR_GL2_REG,
149 HCLGE_TQP_INTR_RL_REG};
151 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
152 "External Loopback test",
154 "Serdes serial Loopback test",
155 "Serdes parallel Loopback test",
159 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
160 {"mac_tx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1,
161 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
162 {"mac_rx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1,
163 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
164 {"mac_tx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
165 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pause_xoff_time)},
166 {"mac_rx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
167 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pause_xoff_time)},
168 {"mac_tx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
169 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
170 {"mac_rx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
171 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
172 {"mac_tx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
173 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
174 {"mac_tx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
175 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
176 {"mac_tx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
177 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
178 {"mac_tx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
179 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
180 {"mac_tx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
181 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
182 {"mac_tx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
183 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
184 {"mac_tx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
185 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
186 {"mac_tx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
187 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
188 {"mac_tx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
189 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
190 {"mac_tx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
191 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_xoff_time)},
192 {"mac_tx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
193 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_xoff_time)},
194 {"mac_tx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
195 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_xoff_time)},
196 {"mac_tx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
197 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_xoff_time)},
198 {"mac_tx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
199 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_xoff_time)},
200 {"mac_tx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
201 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_xoff_time)},
202 {"mac_tx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
203 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_xoff_time)},
204 {"mac_tx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
205 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_xoff_time)},
206 {"mac_rx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
207 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
208 {"mac_rx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
209 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
210 {"mac_rx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
211 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
212 {"mac_rx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
213 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
214 {"mac_rx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
215 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
216 {"mac_rx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
217 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
218 {"mac_rx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
219 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
220 {"mac_rx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
221 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
222 {"mac_rx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
223 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
224 {"mac_rx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
225 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_xoff_time)},
226 {"mac_rx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
227 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_xoff_time)},
228 {"mac_rx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
229 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_xoff_time)},
230 {"mac_rx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
231 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_xoff_time)},
232 {"mac_rx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
233 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_xoff_time)},
234 {"mac_rx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
235 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_xoff_time)},
236 {"mac_rx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
237 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_xoff_time)},
238 {"mac_rx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
239 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_xoff_time)},
240 {"mac_tx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
241 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
242 {"mac_tx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
243 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
244 {"mac_tx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
245 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
246 {"mac_tx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
247 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
248 {"mac_tx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
249 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
250 {"mac_tx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
251 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
252 {"mac_tx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
253 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
254 {"mac_tx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
255 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
256 {"mac_tx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
257 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
258 {"mac_tx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
259 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
260 {"mac_tx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
261 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
262 {"mac_tx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
263 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
264 {"mac_tx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
265 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
266 {"mac_tx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
267 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
268 {"mac_tx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
269 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
270 {"mac_tx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
271 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
272 {"mac_tx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
273 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
274 {"mac_tx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
276 {"mac_tx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
278 {"mac_tx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
279 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
280 {"mac_tx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
281 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
282 {"mac_tx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
283 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
284 {"mac_tx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
285 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
286 {"mac_tx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
287 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
288 {"mac_tx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
289 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
290 {"mac_rx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
291 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
292 {"mac_rx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
293 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
294 {"mac_rx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
295 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
296 {"mac_rx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
297 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
298 {"mac_rx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
299 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
300 {"mac_rx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
301 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
302 {"mac_rx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
303 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
304 {"mac_rx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
305 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
306 {"mac_rx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
307 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
308 {"mac_rx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
309 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
310 {"mac_rx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
311 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
312 {"mac_rx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
313 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
314 {"mac_rx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
315 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
316 {"mac_rx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
317 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
318 {"mac_rx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
319 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
320 {"mac_rx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
321 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
322 {"mac_rx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
323 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
324 {"mac_rx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
325 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
326 {"mac_rx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
327 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
328 {"mac_rx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
329 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
330 {"mac_rx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
331 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
332 {"mac_rx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
333 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
334 {"mac_rx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
335 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
336 {"mac_rx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
337 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
338 {"mac_rx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
339 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
341 {"mac_tx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
342 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
343 {"mac_tx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
344 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
345 {"mac_tx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
346 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
347 {"mac_tx_err_all_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
348 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
349 {"mac_tx_from_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
350 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
351 {"mac_tx_from_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
352 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
353 {"mac_rx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
354 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
355 {"mac_rx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
356 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
357 {"mac_rx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
358 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
359 {"mac_rx_fcs_err_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
360 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
361 {"mac_rx_send_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
362 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
363 {"mac_rx_send_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
364 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
367 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
369 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
370 .ethter_type = cpu_to_le16(ETH_P_LLDP),
371 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
372 .i_port_bitmap = 0x1,
376 static const u32 hclge_dfx_bd_offset_list[] = {
377 HCLGE_DFX_BIOS_BD_OFFSET,
378 HCLGE_DFX_SSU_0_BD_OFFSET,
379 HCLGE_DFX_SSU_1_BD_OFFSET,
380 HCLGE_DFX_IGU_BD_OFFSET,
381 HCLGE_DFX_RPU_0_BD_OFFSET,
382 HCLGE_DFX_RPU_1_BD_OFFSET,
383 HCLGE_DFX_NCSI_BD_OFFSET,
384 HCLGE_DFX_RTC_BD_OFFSET,
385 HCLGE_DFX_PPP_BD_OFFSET,
386 HCLGE_DFX_RCB_BD_OFFSET,
387 HCLGE_DFX_TQP_BD_OFFSET,
388 HCLGE_DFX_SSU_2_BD_OFFSET
391 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
392 HCLGE_OPC_DFX_BIOS_COMMON_REG,
393 HCLGE_OPC_DFX_SSU_REG_0,
394 HCLGE_OPC_DFX_SSU_REG_1,
395 HCLGE_OPC_DFX_IGU_EGU_REG,
396 HCLGE_OPC_DFX_RPU_REG_0,
397 HCLGE_OPC_DFX_RPU_REG_1,
398 HCLGE_OPC_DFX_NCSI_REG,
399 HCLGE_OPC_DFX_RTC_REG,
400 HCLGE_OPC_DFX_PPP_REG,
401 HCLGE_OPC_DFX_RCB_REG,
402 HCLGE_OPC_DFX_TQP_REG,
403 HCLGE_OPC_DFX_SSU_REG_2
406 static const struct key_info meta_data_key_info[] = {
407 { PACKET_TYPE_ID, 6 },
414 { TUNNEL_PACKET, 1 },
417 static const struct key_info tuple_key_info[] = {
418 { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
419 { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
420 { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
421 { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
422 { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
423 { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
424 { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
425 { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
426 { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
427 { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
428 { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
429 { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
430 { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
431 { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
432 { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
433 { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
434 { INNER_DST_MAC, 48, KEY_OPT_MAC,
435 offsetof(struct hclge_fd_rule, tuples.dst_mac),
436 offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
437 { INNER_SRC_MAC, 48, KEY_OPT_MAC,
438 offsetof(struct hclge_fd_rule, tuples.src_mac),
439 offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
440 { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
441 offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
442 offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
443 { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
444 { INNER_ETH_TYPE, 16, KEY_OPT_LE16,
445 offsetof(struct hclge_fd_rule, tuples.ether_proto),
446 offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
447 { INNER_L2_RSV, 16, KEY_OPT_LE16,
448 offsetof(struct hclge_fd_rule, tuples.l2_user_def),
449 offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
450 { INNER_IP_TOS, 8, KEY_OPT_U8,
451 offsetof(struct hclge_fd_rule, tuples.ip_tos),
452 offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
453 { INNER_IP_PROTO, 8, KEY_OPT_U8,
454 offsetof(struct hclge_fd_rule, tuples.ip_proto),
455 offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
456 { INNER_SRC_IP, 32, KEY_OPT_IP,
457 offsetof(struct hclge_fd_rule, tuples.src_ip),
458 offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
459 { INNER_DST_IP, 32, KEY_OPT_IP,
460 offsetof(struct hclge_fd_rule, tuples.dst_ip),
461 offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
462 { INNER_L3_RSV, 16, KEY_OPT_LE16,
463 offsetof(struct hclge_fd_rule, tuples.l3_user_def),
464 offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
465 { INNER_SRC_PORT, 16, KEY_OPT_LE16,
466 offsetof(struct hclge_fd_rule, tuples.src_port),
467 offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
468 { INNER_DST_PORT, 16, KEY_OPT_LE16,
469 offsetof(struct hclge_fd_rule, tuples.dst_port),
470 offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
471 { INNER_L4_RSV, 32, KEY_OPT_LE32,
472 offsetof(struct hclge_fd_rule, tuples.l4_user_def),
473 offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
477 * hclge_cmd_send - send command to command queue
478 * @hw: pointer to the hw struct
479 * @desc: prefilled descriptor for describing the command
480 * @num : the number of descriptors to be sent
482 * This is the main send command for command queue, it
483 * sends the queue, cleans the queue, etc
485 int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
487 return hclge_comm_cmd_send(&hw->hw, desc, num);
490 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
492 #define HCLGE_MAC_CMD_NUM 21
494 u64 *data = (u64 *)(&hdev->mac_stats);
495 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
501 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
502 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
504 dev_err(&hdev->pdev->dev,
505 "Get MAC pkt stats fail, status = %d.\n", ret);
510 /* The first desc has a 64-bit header, so data size need to minus 1 */
511 data_size = sizeof(desc) / (sizeof(u64)) - 1;
513 desc_data = (__le64 *)(&desc[0].data[0]);
514 for (i = 0; i < data_size; i++) {
515 /* data memory is continuous becase only the first desc has a
516 * header in this command
518 *data += le64_to_cpu(*desc_data);
526 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev)
528 #define HCLGE_REG_NUM_PER_DESC 4
530 u32 reg_num = hdev->ae_dev->dev_specs.mac_stats_num;
531 u64 *data = (u64 *)(&hdev->mac_stats);
532 struct hclge_desc *desc;
539 /* The first desc has a 64-bit header, so need to consider it */
540 desc_num = reg_num / HCLGE_REG_NUM_PER_DESC + 1;
542 /* This may be called inside atomic sections,
543 * so GFP_ATOMIC is more suitalbe here
545 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
549 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
550 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
556 data_size = min_t(u32, sizeof(hdev->mac_stats) / sizeof(u64), reg_num);
558 desc_data = (__le64 *)(&desc[0].data[0]);
559 for (i = 0; i < data_size; i++) {
560 /* data memory is continuous becase only the first desc has a
561 * header in this command
563 *data += le64_to_cpu(*desc_data);
573 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *reg_num)
575 struct hclge_desc desc;
578 /* Driver needs total register number of both valid registers and
579 * reserved registers, but the old firmware only returns number
580 * of valid registers in device V2. To be compatible with these
581 * devices, driver uses a fixed value.
583 if (hdev->ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
584 *reg_num = HCLGE_MAC_STATS_MAX_NUM_V1;
588 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
589 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
591 dev_err(&hdev->pdev->dev,
592 "failed to query mac statistic reg number, ret = %d\n",
597 *reg_num = le32_to_cpu(desc.data[0]);
599 dev_err(&hdev->pdev->dev,
600 "mac statistic reg number is invalid!\n");
607 int hclge_mac_update_stats(struct hclge_dev *hdev)
609 /* The firmware supports the new statistics acquisition method */
610 if (hdev->ae_dev->dev_specs.mac_stats_num)
611 return hclge_mac_update_stats_complete(hdev);
613 return hclge_mac_update_stats_defective(hdev);
616 static int hclge_comm_get_count(struct hclge_dev *hdev,
617 const struct hclge_comm_stats_str strs[],
623 for (i = 0; i < size; i++)
624 if (strs[i].stats_num <= hdev->ae_dev->dev_specs.mac_stats_num)
630 static u64 *hclge_comm_get_stats(struct hclge_dev *hdev,
631 const struct hclge_comm_stats_str strs[],
637 for (i = 0; i < size; i++) {
638 if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
641 *buf = HCLGE_STATS_READ(&hdev->mac_stats, strs[i].offset);
648 static u8 *hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset,
649 const struct hclge_comm_stats_str strs[],
652 char *buff = (char *)data;
655 if (stringset != ETH_SS_STATS)
658 for (i = 0; i < size; i++) {
659 if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
662 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
663 buff = buff + ETH_GSTRING_LEN;
669 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
671 struct hnae3_handle *handle;
674 handle = &hdev->vport[0].nic;
675 if (handle->client) {
676 status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
678 dev_err(&hdev->pdev->dev,
679 "Update TQPS stats fail, status = %d.\n",
684 hclge_update_fec_stats(hdev);
686 status = hclge_mac_update_stats(hdev);
688 dev_err(&hdev->pdev->dev,
689 "Update MAC stats fail, status = %d.\n", status);
692 static void hclge_update_stats(struct hnae3_handle *handle,
693 struct net_device_stats *net_stats)
695 struct hclge_vport *vport = hclge_get_vport(handle);
696 struct hclge_dev *hdev = vport->back;
699 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
702 status = hclge_mac_update_stats(hdev);
704 dev_err(&hdev->pdev->dev,
705 "Update MAC stats fail, status = %d.\n",
708 status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
710 dev_err(&hdev->pdev->dev,
711 "Update TQPS stats fail, status = %d.\n",
714 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
717 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
719 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
720 HNAE3_SUPPORT_PHY_LOOPBACK | \
721 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
722 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK | \
723 HNAE3_SUPPORT_EXTERNAL_LOOPBACK)
725 struct hclge_vport *vport = hclge_get_vport(handle);
726 struct hclge_dev *hdev = vport->back;
729 /* Loopback test support rules:
730 * mac: only GE mode support
731 * serdes: all mac mode will support include GE/XGE/LGE/CGE
732 * phy: only support when phy device exist on board
734 if (stringset == ETH_SS_TEST) {
735 /* clear loopback bit flags at first */
736 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
737 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
738 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
739 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
740 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
742 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
746 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
748 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
750 handle->flags |= HNAE3_SUPPORT_EXTERNAL_LOOPBACK;
752 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
753 hdev->hw.mac.phydev->drv->set_loopback) ||
754 hnae3_dev_phy_imp_supported(hdev)) {
756 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
758 } else if (stringset == ETH_SS_STATS) {
759 count = hclge_comm_get_count(hdev, g_mac_stats_string,
760 ARRAY_SIZE(g_mac_stats_string)) +
761 hclge_comm_tqps_get_sset_count(handle);
767 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
770 struct hclge_vport *vport = hclge_get_vport(handle);
771 struct hclge_dev *hdev = vport->back;
772 u8 *p = (char *)data;
775 if (stringset == ETH_SS_STATS) {
776 size = ARRAY_SIZE(g_mac_stats_string);
777 p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string,
779 p = hclge_comm_tqps_get_strings(handle, p);
780 } else if (stringset == ETH_SS_TEST) {
781 if (handle->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK) {
782 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_EXTERNAL],
784 p += ETH_GSTRING_LEN;
786 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
787 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
789 p += ETH_GSTRING_LEN;
791 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
792 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
794 p += ETH_GSTRING_LEN;
796 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
798 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
800 p += ETH_GSTRING_LEN;
802 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
803 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
805 p += ETH_GSTRING_LEN;
810 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
812 struct hclge_vport *vport = hclge_get_vport(handle);
813 struct hclge_dev *hdev = vport->back;
816 p = hclge_comm_get_stats(hdev, g_mac_stats_string,
817 ARRAY_SIZE(g_mac_stats_string), data);
818 p = hclge_comm_tqps_get_stats(handle, p);
821 static void hclge_get_mac_stat(struct hnae3_handle *handle,
822 struct hns3_mac_stats *mac_stats)
824 struct hclge_vport *vport = hclge_get_vport(handle);
825 struct hclge_dev *hdev = vport->back;
827 hclge_update_stats(handle, NULL);
829 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
830 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
833 static int hclge_parse_func_status(struct hclge_dev *hdev,
834 struct hclge_func_status_cmd *status)
836 #define HCLGE_MAC_ID_MASK 0xF
838 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
841 /* Set the pf to main pf */
842 if (status->pf_state & HCLGE_PF_STATE_MAIN)
843 hdev->flag |= HCLGE_FLAG_MAIN;
845 hdev->flag &= ~HCLGE_FLAG_MAIN;
847 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
851 static int hclge_query_function_status(struct hclge_dev *hdev)
853 #define HCLGE_QUERY_MAX_CNT 5
855 struct hclge_func_status_cmd *req;
856 struct hclge_desc desc;
860 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
861 req = (struct hclge_func_status_cmd *)desc.data;
864 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
866 dev_err(&hdev->pdev->dev,
867 "query function status failed %d.\n", ret);
871 /* Check pf reset is done */
874 usleep_range(1000, 2000);
875 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
877 return hclge_parse_func_status(hdev, req);
880 static int hclge_query_pf_resource(struct hclge_dev *hdev)
882 struct hclge_pf_res_cmd *req;
883 struct hclge_desc desc;
886 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
887 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
889 dev_err(&hdev->pdev->dev,
890 "query pf resource failed %d.\n", ret);
894 req = (struct hclge_pf_res_cmd *)desc.data;
895 hdev->num_tqps = le16_to_cpu(req->tqp_num) +
896 le16_to_cpu(req->ext_tqp_num);
897 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
899 if (req->tx_buf_size)
901 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
903 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
905 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
907 if (req->dv_buf_size)
909 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
911 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
913 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
915 hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
916 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
917 dev_err(&hdev->pdev->dev,
918 "only %u msi resources available, not enough for pf(min:2).\n",
923 if (hnae3_dev_roce_supported(hdev)) {
925 le16_to_cpu(req->pf_intr_vector_number_roce);
927 /* PF should have NIC vectors and Roce vectors,
928 * NIC vectors are queued before Roce vectors.
930 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
932 hdev->num_msi = hdev->num_nic_msi;
938 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
941 case HCLGE_FW_MAC_SPEED_10M:
942 *speed = HCLGE_MAC_SPEED_10M;
944 case HCLGE_FW_MAC_SPEED_100M:
945 *speed = HCLGE_MAC_SPEED_100M;
947 case HCLGE_FW_MAC_SPEED_1G:
948 *speed = HCLGE_MAC_SPEED_1G;
950 case HCLGE_FW_MAC_SPEED_10G:
951 *speed = HCLGE_MAC_SPEED_10G;
953 case HCLGE_FW_MAC_SPEED_25G:
954 *speed = HCLGE_MAC_SPEED_25G;
956 case HCLGE_FW_MAC_SPEED_40G:
957 *speed = HCLGE_MAC_SPEED_40G;
959 case HCLGE_FW_MAC_SPEED_50G:
960 *speed = HCLGE_MAC_SPEED_50G;
962 case HCLGE_FW_MAC_SPEED_100G:
963 *speed = HCLGE_MAC_SPEED_100G;
965 case HCLGE_FW_MAC_SPEED_200G:
966 *speed = HCLGE_MAC_SPEED_200G;
975 static const struct hclge_speed_bit_map speed_bit_map[] = {
976 {HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT},
977 {HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT},
978 {HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT},
979 {HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT},
980 {HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT},
981 {HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT},
982 {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BIT},
983 {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BIT},
984 {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT},
987 static int hclge_get_speed_bit(u32 speed, u32 *speed_bit)
991 for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) {
992 if (speed == speed_bit_map[i].speed) {
993 *speed_bit = speed_bit_map[i].speed_bit;
1001 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
1003 struct hclge_vport *vport = hclge_get_vport(handle);
1004 struct hclge_dev *hdev = vport->back;
1005 u32 speed_ability = hdev->hw.mac.speed_ability;
1009 ret = hclge_get_speed_bit(speed, &speed_bit);
1013 if (speed_bit & speed_ability)
1019 static void hclge_update_fec_support(struct hclge_mac *mac)
1021 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1022 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1023 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, mac->supported);
1024 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1026 if (mac->fec_ability & BIT(HNAE3_FEC_BASER))
1027 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1029 if (mac->fec_ability & BIT(HNAE3_FEC_RS))
1030 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1032 if (mac->fec_ability & BIT(HNAE3_FEC_LLRS))
1033 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
1035 if (mac->fec_ability & BIT(HNAE3_FEC_NONE))
1036 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
1040 static void hclge_convert_setting_sr(u16 speed_ability,
1041 unsigned long *link_mode)
1043 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1044 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1046 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1047 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1049 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1050 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1052 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1053 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1055 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1056 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1058 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1059 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1063 static void hclge_convert_setting_lr(u16 speed_ability,
1064 unsigned long *link_mode)
1066 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1067 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1069 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1070 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1072 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1073 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1075 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1076 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1078 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1079 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1081 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1083 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1087 static void hclge_convert_setting_cr(u16 speed_ability,
1088 unsigned long *link_mode)
1090 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1091 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1093 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1094 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1096 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1097 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1099 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1100 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1102 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1103 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1105 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1106 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1110 static void hclge_convert_setting_kr(u16 speed_ability,
1111 unsigned long *link_mode)
1113 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1114 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1116 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1117 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1119 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1120 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1122 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1123 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1125 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1126 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1128 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1129 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1131 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1132 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1136 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1138 /* If firmware has reported fec_ability, don't need to convert by speed */
1139 if (mac->fec_ability)
1142 switch (mac->speed) {
1143 case HCLGE_MAC_SPEED_10G:
1144 case HCLGE_MAC_SPEED_40G:
1145 mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO) |
1146 BIT(HNAE3_FEC_NONE);
1148 case HCLGE_MAC_SPEED_25G:
1149 case HCLGE_MAC_SPEED_50G:
1150 mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1151 BIT(HNAE3_FEC_AUTO) | BIT(HNAE3_FEC_NONE);
1153 case HCLGE_MAC_SPEED_100G:
1154 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) |
1155 BIT(HNAE3_FEC_NONE);
1157 case HCLGE_MAC_SPEED_200G:
1158 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) |
1159 BIT(HNAE3_FEC_LLRS);
1162 mac->fec_ability = 0;
1167 hclge_update_fec_support(mac);
1170 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1173 struct hclge_mac *mac = &hdev->hw.mac;
1175 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1176 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1179 hclge_convert_setting_sr(speed_ability, mac->supported);
1180 hclge_convert_setting_lr(speed_ability, mac->supported);
1181 hclge_convert_setting_cr(speed_ability, mac->supported);
1182 if (hnae3_dev_fec_supported(hdev))
1183 hclge_convert_setting_fec(mac);
1185 if (hnae3_dev_pause_supported(hdev))
1186 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1188 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1189 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1192 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1195 struct hclge_mac *mac = &hdev->hw.mac;
1197 hclge_convert_setting_kr(speed_ability, mac->supported);
1198 if (hnae3_dev_fec_supported(hdev))
1199 hclge_convert_setting_fec(mac);
1201 if (hnae3_dev_pause_supported(hdev))
1202 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1204 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1205 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1208 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1211 unsigned long *supported = hdev->hw.mac.supported;
1213 /* default to support all speed for GE port */
1215 speed_ability = HCLGE_SUPPORT_GE;
1217 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1218 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1221 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1222 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1224 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1228 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1229 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1230 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1233 if (hnae3_dev_pause_supported(hdev)) {
1234 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1235 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1238 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1239 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1242 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1244 u8 media_type = hdev->hw.mac.media_type;
1246 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1247 hclge_parse_fiber_link_mode(hdev, speed_ability);
1248 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1249 hclge_parse_copper_link_mode(hdev, speed_ability);
1250 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1251 hclge_parse_backplane_link_mode(hdev, speed_ability);
1254 static u32 hclge_get_max_speed(u16 speed_ability)
1256 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1257 return HCLGE_MAC_SPEED_200G;
1259 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1260 return HCLGE_MAC_SPEED_100G;
1262 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1263 return HCLGE_MAC_SPEED_50G;
1265 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1266 return HCLGE_MAC_SPEED_40G;
1268 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1269 return HCLGE_MAC_SPEED_25G;
1271 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1272 return HCLGE_MAC_SPEED_10G;
1274 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1275 return HCLGE_MAC_SPEED_1G;
1277 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1278 return HCLGE_MAC_SPEED_100M;
1280 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1281 return HCLGE_MAC_SPEED_10M;
1283 return HCLGE_MAC_SPEED_1G;
1286 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1288 #define HCLGE_TX_SPARE_SIZE_UNIT 4096
1289 #define SPEED_ABILITY_EXT_SHIFT 8
1291 struct hclge_cfg_param_cmd *req;
1292 u64 mac_addr_tmp_high;
1293 u16 speed_ability_ext;
1297 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1299 /* get the configuration */
1300 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1301 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1302 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1303 HCLGE_CFG_TQP_DESC_N_M,
1304 HCLGE_CFG_TQP_DESC_N_S);
1306 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1307 HCLGE_CFG_PHY_ADDR_M,
1308 HCLGE_CFG_PHY_ADDR_S);
1309 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1310 HCLGE_CFG_MEDIA_TP_M,
1311 HCLGE_CFG_MEDIA_TP_S);
1312 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1313 HCLGE_CFG_RX_BUF_LEN_M,
1314 HCLGE_CFG_RX_BUF_LEN_S);
1315 /* get mac_address */
1316 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1317 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1318 HCLGE_CFG_MAC_ADDR_H_M,
1319 HCLGE_CFG_MAC_ADDR_H_S);
1321 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1323 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1324 HCLGE_CFG_DEFAULT_SPEED_M,
1325 HCLGE_CFG_DEFAULT_SPEED_S);
1326 cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1327 HCLGE_CFG_RSS_SIZE_M,
1328 HCLGE_CFG_RSS_SIZE_S);
1330 for (i = 0; i < ETH_ALEN; i++)
1331 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1333 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1334 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1336 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1337 HCLGE_CFG_SPEED_ABILITY_M,
1338 HCLGE_CFG_SPEED_ABILITY_S);
1339 speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1340 HCLGE_CFG_SPEED_ABILITY_EXT_M,
1341 HCLGE_CFG_SPEED_ABILITY_EXT_S);
1342 cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1344 cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
1345 HCLGE_CFG_VLAN_FLTR_CAP_M,
1346 HCLGE_CFG_VLAN_FLTR_CAP_S);
1348 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1349 HCLGE_CFG_UMV_TBL_SPACE_M,
1350 HCLGE_CFG_UMV_TBL_SPACE_S);
1352 cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1353 HCLGE_CFG_PF_RSS_SIZE_M,
1354 HCLGE_CFG_PF_RSS_SIZE_S);
1356 /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1357 * power of 2, instead of reading out directly. This would
1358 * be more flexible for future changes and expansions.
1359 * When VF max rss size field is HCLGE_CFG_RSS_SIZE_S,
1360 * it does not make sense if PF's field is 0. In this case, PF and VF
1361 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1363 cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1364 1U << cfg->pf_rss_size_max :
1365 cfg->vf_rss_size_max;
1367 /* The unit of the tx spare buffer size queried from configuration
1368 * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is
1371 cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]),
1372 HCLGE_CFG_TX_SPARE_BUF_SIZE_M,
1373 HCLGE_CFG_TX_SPARE_BUF_SIZE_S);
1374 cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT;
1377 /* hclge_get_cfg: query the static parameter from flash
1378 * @hdev: pointer to struct hclge_dev
1379 * @hcfg: the config structure to be getted
1381 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1383 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1384 struct hclge_cfg_param_cmd *req;
1388 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1391 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1392 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1394 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1395 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1396 /* Len should be united by 4 bytes when send to hardware */
1397 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1398 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1399 req->offset = cpu_to_le32(offset);
1402 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1404 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1408 hclge_parse_cfg(hcfg, desc);
1413 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1415 #define HCLGE_MAX_NON_TSO_BD_NUM 8U
1417 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1419 ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1420 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1421 ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
1422 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1423 ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1424 ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1425 ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1426 ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1429 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1430 struct hclge_desc *desc)
1432 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1433 struct hclge_dev_specs_0_cmd *req0;
1434 struct hclge_dev_specs_1_cmd *req1;
1436 req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1437 req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1439 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1440 ae_dev->dev_specs.rss_ind_tbl_size =
1441 le16_to_cpu(req0->rss_ind_tbl_size);
1442 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1443 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1444 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1445 ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1446 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1447 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1448 ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size);
1449 ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size);
1452 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1454 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1456 if (!dev_specs->max_non_tso_bd_num)
1457 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1458 if (!dev_specs->rss_ind_tbl_size)
1459 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1460 if (!dev_specs->rss_key_size)
1461 dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
1462 if (!dev_specs->max_tm_rate)
1463 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1464 if (!dev_specs->max_qset_num)
1465 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1466 if (!dev_specs->max_int_gl)
1467 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1468 if (!dev_specs->max_frm_size)
1469 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1470 if (!dev_specs->umv_size)
1471 dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1474 static int hclge_query_mac_stats_num(struct hclge_dev *hdev)
1479 ret = hclge_mac_query_reg_num(hdev, ®_num);
1480 if (ret && ret != -EOPNOTSUPP)
1483 hdev->ae_dev->dev_specs.mac_stats_num = reg_num;
1487 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1489 struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1493 ret = hclge_query_mac_stats_num(hdev);
1497 /* set default specifications as devices lower than version V3 do not
1498 * support querying specifications from firmware.
1500 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1501 hclge_set_default_dev_specs(hdev);
1505 for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1506 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1508 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1510 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1512 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1516 hclge_parse_dev_specs(hdev, desc);
1517 hclge_check_dev_specs(hdev);
1522 static int hclge_get_cap(struct hclge_dev *hdev)
1526 ret = hclge_query_function_status(hdev);
1528 dev_err(&hdev->pdev->dev,
1529 "query function status error %d.\n", ret);
1533 /* get pf resource */
1534 return hclge_query_pf_resource(hdev);
1537 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1539 #define HCLGE_MIN_TX_DESC 64
1540 #define HCLGE_MIN_RX_DESC 64
1542 if (!is_kdump_kernel())
1545 dev_info(&hdev->pdev->dev,
1546 "Running kdump kernel. Using minimal resources\n");
1548 /* minimal queue pairs equals to the number of vports */
1549 hdev->num_tqps = hdev->num_req_vfs + 1;
1550 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1551 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1554 static void hclge_init_tc_config(struct hclge_dev *hdev)
1558 if (hdev->tc_max > HNAE3_MAX_TC ||
1560 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1565 /* Dev does not support DCB */
1566 if (!hnae3_dev_dcb_supported(hdev)) {
1570 hdev->pfc_max = hdev->tc_max;
1573 hdev->tm_info.num_tc = 1;
1575 /* Currently not support uncontiuous tc */
1576 for (i = 0; i < hdev->tm_info.num_tc; i++)
1577 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1579 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1582 static int hclge_configure(struct hclge_dev *hdev)
1584 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1585 struct hclge_cfg cfg;
1588 ret = hclge_get_cfg(hdev, &cfg);
1592 hdev->base_tqp_pid = 0;
1593 hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1594 hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1595 hdev->rx_buf_len = cfg.rx_buf_len;
1596 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1597 hdev->hw.mac.media_type = cfg.media_type;
1598 hdev->hw.mac.phy_addr = cfg.phy_addr;
1599 hdev->num_tx_desc = cfg.tqp_desc_num;
1600 hdev->num_rx_desc = cfg.tqp_desc_num;
1601 hdev->tm_info.num_pg = 1;
1602 hdev->tc_max = cfg.tc_num;
1603 hdev->tm_info.hw_pfc_map = 0;
1605 hdev->wanted_umv_size = cfg.umv_space;
1607 hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size;
1608 hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
1609 hdev->gro_en = true;
1610 if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
1611 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
1613 if (hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
1615 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1618 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1620 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1621 cfg.default_speed, ret);
1625 hclge_parse_link_mode(hdev, cfg.speed_ability);
1627 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1629 hclge_init_tc_config(hdev);
1630 hclge_init_kdump_kernel_config(hdev);
1635 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1638 struct hclge_cfg_tso_status_cmd *req;
1639 struct hclge_desc desc;
1641 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1643 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1644 req->tso_mss_min = cpu_to_le16(tso_mss_min);
1645 req->tso_mss_max = cpu_to_le16(tso_mss_max);
1647 return hclge_cmd_send(&hdev->hw, &desc, 1);
1650 static int hclge_config_gro(struct hclge_dev *hdev)
1652 struct hclge_cfg_gro_status_cmd *req;
1653 struct hclge_desc desc;
1656 if (!hnae3_ae_dev_gro_supported(hdev->ae_dev))
1659 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1660 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1662 req->gro_en = hdev->gro_en ? 1 : 0;
1664 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1666 dev_err(&hdev->pdev->dev,
1667 "GRO hardware config cmd failed, ret = %d\n", ret);
1672 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1674 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1675 struct hclge_comm_tqp *tqp;
1678 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1679 sizeof(struct hclge_comm_tqp), GFP_KERNEL);
1685 for (i = 0; i < hdev->num_tqps; i++) {
1686 tqp->dev = &hdev->pdev->dev;
1689 tqp->q.ae_algo = &ae_algo;
1690 tqp->q.buf_size = hdev->rx_buf_len;
1691 tqp->q.tx_desc_num = hdev->num_tx_desc;
1692 tqp->q.rx_desc_num = hdev->num_rx_desc;
1694 /* need an extended offset to configure queues >=
1695 * HCLGE_TQP_MAX_SIZE_DEV_V2
1697 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1698 tqp->q.io_base = hdev->hw.hw.io_base +
1699 HCLGE_TQP_REG_OFFSET +
1700 i * HCLGE_TQP_REG_SIZE;
1702 tqp->q.io_base = hdev->hw.hw.io_base +
1703 HCLGE_TQP_REG_OFFSET +
1704 HCLGE_TQP_EXT_REG_OFFSET +
1705 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1708 /* when device supports tx push and has device memory,
1709 * the queue can execute push mode or doorbell mode on
1712 if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps))
1713 tqp->q.mem_base = hdev->hw.hw.mem_base +
1714 HCLGE_TQP_MEM_OFFSET(hdev, i);
1722 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1723 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1725 struct hclge_tqp_map_cmd *req;
1726 struct hclge_desc desc;
1729 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1731 req = (struct hclge_tqp_map_cmd *)desc.data;
1732 req->tqp_id = cpu_to_le16(tqp_pid);
1733 req->tqp_vf = func_id;
1734 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1736 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1737 req->tqp_vid = cpu_to_le16(tqp_vid);
1739 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1741 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1746 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1748 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1749 struct hclge_dev *hdev = vport->back;
1752 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1753 alloced < num_tqps; i++) {
1754 if (!hdev->htqp[i].alloced) {
1755 hdev->htqp[i].q.handle = &vport->nic;
1756 hdev->htqp[i].q.tqp_index = alloced;
1757 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1758 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1759 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1760 hdev->htqp[i].alloced = true;
1764 vport->alloc_tqps = alloced;
1765 kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1766 vport->alloc_tqps / hdev->tm_info.num_tc);
1768 /* ensure one to one mapping between irq and queue at default */
1769 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1770 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1775 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1776 u16 num_tx_desc, u16 num_rx_desc)
1779 struct hnae3_handle *nic = &vport->nic;
1780 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1781 struct hclge_dev *hdev = vport->back;
1784 kinfo->num_tx_desc = num_tx_desc;
1785 kinfo->num_rx_desc = num_rx_desc;
1787 kinfo->rx_buf_len = hdev->rx_buf_len;
1788 kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size;
1790 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1791 sizeof(struct hnae3_queue *), GFP_KERNEL);
1795 ret = hclge_assign_tqp(vport, num_tqps);
1797 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1802 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1803 struct hclge_vport *vport)
1805 struct hnae3_handle *nic = &vport->nic;
1806 struct hnae3_knic_private_info *kinfo;
1809 kinfo = &nic->kinfo;
1810 for (i = 0; i < vport->alloc_tqps; i++) {
1811 struct hclge_comm_tqp *q =
1812 container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
1816 is_pf = !(vport->vport_id);
1817 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1826 static int hclge_map_tqp(struct hclge_dev *hdev)
1828 struct hclge_vport *vport = hdev->vport;
1831 num_vport = hdev->num_req_vfs + 1;
1832 for (i = 0; i < num_vport; i++) {
1835 ret = hclge_map_tqp_to_vport(hdev, vport);
1845 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1847 struct hnae3_handle *nic = &vport->nic;
1848 struct hclge_dev *hdev = vport->back;
1851 nic->pdev = hdev->pdev;
1852 nic->ae_algo = &ae_algo;
1853 nic->numa_node_mask = hdev->numa_node_mask;
1854 nic->kinfo.io_base = hdev->hw.hw.io_base;
1856 ret = hclge_knic_setup(vport, num_tqps,
1857 hdev->num_tx_desc, hdev->num_rx_desc);
1859 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1864 static int hclge_alloc_vport(struct hclge_dev *hdev)
1866 struct pci_dev *pdev = hdev->pdev;
1867 struct hclge_vport *vport;
1873 /* We need to alloc a vport for main NIC of PF */
1874 num_vport = hdev->num_req_vfs + 1;
1876 if (hdev->num_tqps < num_vport) {
1877 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1878 hdev->num_tqps, num_vport);
1882 /* Alloc the same number of TQPs for every vport */
1883 tqp_per_vport = hdev->num_tqps / num_vport;
1884 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1886 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1891 hdev->vport = vport;
1892 hdev->num_alloc_vport = num_vport;
1894 if (IS_ENABLED(CONFIG_PCI_IOV))
1895 hdev->num_alloc_vfs = hdev->num_req_vfs;
1897 for (i = 0; i < num_vport; i++) {
1899 vport->vport_id = i;
1900 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1901 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1902 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1903 vport->port_base_vlan_cfg.tbl_sta = true;
1904 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1905 vport->req_vlan_fltr_en = true;
1906 INIT_LIST_HEAD(&vport->vlan_list);
1907 INIT_LIST_HEAD(&vport->uc_mac_list);
1908 INIT_LIST_HEAD(&vport->mc_mac_list);
1909 spin_lock_init(&vport->mac_list_lock);
1912 ret = hclge_vport_setup(vport, tqp_main_vport);
1914 ret = hclge_vport_setup(vport, tqp_per_vport);
1917 "vport setup failed for vport %d, %d\n",
1928 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1929 struct hclge_pkt_buf_alloc *buf_alloc)
1931 /* TX buffer size is unit by 128 byte */
1932 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1933 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1934 struct hclge_tx_buff_alloc_cmd *req;
1935 struct hclge_desc desc;
1939 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1941 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1942 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1943 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1945 req->tx_pkt_buff[i] =
1946 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1947 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1950 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1952 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1958 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1959 struct hclge_pkt_buf_alloc *buf_alloc)
1961 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1964 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1969 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1974 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1975 if (hdev->hw_tc_map & BIT(i))
1980 /* Get the number of pfc enabled TCs, which have private buffer */
1981 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1982 struct hclge_pkt_buf_alloc *buf_alloc)
1984 struct hclge_priv_buf *priv;
1988 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1989 priv = &buf_alloc->priv_buf[i];
1990 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1998 /* Get the number of pfc disabled TCs, which have private buffer */
1999 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
2000 struct hclge_pkt_buf_alloc *buf_alloc)
2002 struct hclge_priv_buf *priv;
2006 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2007 priv = &buf_alloc->priv_buf[i];
2008 if (hdev->hw_tc_map & BIT(i) &&
2009 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
2017 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
2019 struct hclge_priv_buf *priv;
2023 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2024 priv = &buf_alloc->priv_buf[i];
2026 rx_priv += priv->buf_size;
2031 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
2033 u32 i, total_tx_size = 0;
2035 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
2036 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
2038 return total_tx_size;
2041 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
2042 struct hclge_pkt_buf_alloc *buf_alloc,
2045 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
2046 u32 tc_num = hclge_get_tc_num(hdev);
2047 u32 shared_buf, aligned_mps;
2051 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2053 if (hnae3_dev_dcb_supported(hdev))
2054 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
2057 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
2058 + hdev->dv_buf_size;
2060 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2061 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
2062 HCLGE_BUF_SIZE_UNIT);
2064 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
2065 if (rx_all < rx_priv + shared_std)
2068 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
2069 buf_alloc->s_buf.buf_size = shared_buf;
2070 if (hnae3_dev_dcb_supported(hdev)) {
2071 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2072 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2073 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
2074 HCLGE_BUF_SIZE_UNIT);
2076 buf_alloc->s_buf.self.high = aligned_mps +
2077 HCLGE_NON_DCB_ADDITIONAL_BUF;
2078 buf_alloc->s_buf.self.low = aligned_mps;
2081 if (hnae3_dev_dcb_supported(hdev)) {
2082 hi_thrd = shared_buf - hdev->dv_buf_size;
2084 if (tc_num <= NEED_RESERVE_TC_NUM)
2085 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2089 hi_thrd = hi_thrd / tc_num;
2091 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2092 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2093 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2095 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2096 lo_thrd = aligned_mps;
2099 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2100 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2101 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2107 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2108 struct hclge_pkt_buf_alloc *buf_alloc)
2112 total_size = hdev->pkt_buf_size;
2114 /* alloc tx buffer for all enabled tc */
2115 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2116 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2118 if (hdev->hw_tc_map & BIT(i)) {
2119 if (total_size < hdev->tx_buf_size)
2122 priv->tx_buf_size = hdev->tx_buf_size;
2124 priv->tx_buf_size = 0;
2127 total_size -= priv->tx_buf_size;
2133 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2134 struct hclge_pkt_buf_alloc *buf_alloc)
2136 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2137 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2140 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2141 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2148 if (!(hdev->hw_tc_map & BIT(i)))
2153 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2154 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2155 priv->wl.high = roundup(priv->wl.low + aligned_mps,
2156 HCLGE_BUF_SIZE_UNIT);
2159 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2163 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2166 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2169 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2170 struct hclge_pkt_buf_alloc *buf_alloc)
2172 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2173 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2176 /* let the last to be cleared first */
2177 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2178 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2179 unsigned int mask = BIT((unsigned int)i);
2181 if (hdev->hw_tc_map & mask &&
2182 !(hdev->tm_info.hw_pfc_map & mask)) {
2183 /* Clear the no pfc TC private buffer */
2191 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2192 no_pfc_priv_num == 0)
2196 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2199 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2200 struct hclge_pkt_buf_alloc *buf_alloc)
2202 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2203 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2206 /* let the last to be cleared first */
2207 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2208 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2209 unsigned int mask = BIT((unsigned int)i);
2211 if (hdev->hw_tc_map & mask &&
2212 hdev->tm_info.hw_pfc_map & mask) {
2213 /* Reduce the number of pfc TC with private buffer */
2221 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2226 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2229 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2230 struct hclge_pkt_buf_alloc *buf_alloc)
2232 #define COMPENSATE_BUFFER 0x3C00
2233 #define COMPENSATE_HALF_MPS_NUM 5
2234 #define PRIV_WL_GAP 0x1800
2236 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2237 u32 tc_num = hclge_get_tc_num(hdev);
2238 u32 half_mps = hdev->mps >> 1;
2243 rx_priv = rx_priv / tc_num;
2245 if (tc_num <= NEED_RESERVE_TC_NUM)
2246 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2248 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2249 COMPENSATE_HALF_MPS_NUM * half_mps;
2250 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2251 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2252 if (rx_priv < min_rx_priv)
2255 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2256 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2263 if (!(hdev->hw_tc_map & BIT(i)))
2267 priv->buf_size = rx_priv;
2268 priv->wl.high = rx_priv - hdev->dv_buf_size;
2269 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2272 buf_alloc->s_buf.buf_size = 0;
2277 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2278 * @hdev: pointer to struct hclge_dev
2279 * @buf_alloc: pointer to buffer calculation data
2280 * @return: 0: calculate successful, negative: fail
2282 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2283 struct hclge_pkt_buf_alloc *buf_alloc)
2285 /* When DCB is not supported, rx private buffer is not allocated. */
2286 if (!hnae3_dev_dcb_supported(hdev)) {
2287 u32 rx_all = hdev->pkt_buf_size;
2289 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2290 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2296 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2299 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2302 /* try to decrease the buffer size */
2303 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2306 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2309 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2315 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2316 struct hclge_pkt_buf_alloc *buf_alloc)
2318 struct hclge_rx_priv_buff_cmd *req;
2319 struct hclge_desc desc;
2323 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2324 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2326 /* Alloc private buffer TCs */
2327 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2328 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2331 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2333 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2337 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2338 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2340 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2342 dev_err(&hdev->pdev->dev,
2343 "rx private buffer alloc cmd failed %d\n", ret);
2348 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2349 struct hclge_pkt_buf_alloc *buf_alloc)
2351 struct hclge_rx_priv_wl_buf *req;
2352 struct hclge_priv_buf *priv;
2353 struct hclge_desc desc[2];
2357 for (i = 0; i < 2; i++) {
2358 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2360 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2362 /* The first descriptor set the NEXT bit to 1 */
2364 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2366 desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2368 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2369 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2371 priv = &buf_alloc->priv_buf[idx];
2372 req->tc_wl[j].high =
2373 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2374 req->tc_wl[j].high |=
2375 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2377 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2378 req->tc_wl[j].low |=
2379 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2383 /* Send 2 descriptor at one time */
2384 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2386 dev_err(&hdev->pdev->dev,
2387 "rx private waterline config cmd failed %d\n",
2392 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2393 struct hclge_pkt_buf_alloc *buf_alloc)
2395 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2396 struct hclge_rx_com_thrd *req;
2397 struct hclge_desc desc[2];
2398 struct hclge_tc_thrd *tc;
2402 for (i = 0; i < 2; i++) {
2403 hclge_cmd_setup_basic_desc(&desc[i],
2404 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2405 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2407 /* The first descriptor set the NEXT bit to 1 */
2409 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2411 desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2413 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2414 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2416 req->com_thrd[j].high =
2417 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2418 req->com_thrd[j].high |=
2419 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2420 req->com_thrd[j].low =
2421 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2422 req->com_thrd[j].low |=
2423 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2427 /* Send 2 descriptors at one time */
2428 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2430 dev_err(&hdev->pdev->dev,
2431 "common threshold config cmd failed %d\n", ret);
2435 static int hclge_common_wl_config(struct hclge_dev *hdev,
2436 struct hclge_pkt_buf_alloc *buf_alloc)
2438 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2439 struct hclge_rx_com_wl *req;
2440 struct hclge_desc desc;
2443 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2445 req = (struct hclge_rx_com_wl *)desc.data;
2446 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2447 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2449 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2450 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2452 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2454 dev_err(&hdev->pdev->dev,
2455 "common waterline config cmd failed %d\n", ret);
2460 int hclge_buffer_alloc(struct hclge_dev *hdev)
2462 struct hclge_pkt_buf_alloc *pkt_buf;
2465 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2469 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2471 dev_err(&hdev->pdev->dev,
2472 "could not calc tx buffer size for all TCs %d\n", ret);
2476 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2478 dev_err(&hdev->pdev->dev,
2479 "could not alloc tx buffers %d\n", ret);
2483 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2485 dev_err(&hdev->pdev->dev,
2486 "could not calc rx priv buffer size for all TCs %d\n",
2491 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2493 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2498 if (hnae3_dev_dcb_supported(hdev)) {
2499 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2501 dev_err(&hdev->pdev->dev,
2502 "could not configure rx private waterline %d\n",
2507 ret = hclge_common_thrd_config(hdev, pkt_buf);
2509 dev_err(&hdev->pdev->dev,
2510 "could not configure common threshold %d\n",
2516 ret = hclge_common_wl_config(hdev, pkt_buf);
2518 dev_err(&hdev->pdev->dev,
2519 "could not configure common waterline %d\n", ret);
2526 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2528 struct hnae3_handle *roce = &vport->roce;
2529 struct hnae3_handle *nic = &vport->nic;
2530 struct hclge_dev *hdev = vport->back;
2532 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2534 if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2537 roce->rinfo.base_vector = hdev->num_nic_msi;
2539 roce->rinfo.netdev = nic->kinfo.netdev;
2540 roce->rinfo.roce_io_base = hdev->hw.hw.io_base;
2541 roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base;
2543 roce->pdev = nic->pdev;
2544 roce->ae_algo = nic->ae_algo;
2545 roce->numa_node_mask = nic->numa_node_mask;
2550 static int hclge_init_msi(struct hclge_dev *hdev)
2552 struct pci_dev *pdev = hdev->pdev;
2556 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2558 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2561 "failed(%d) to allocate MSI/MSI-X vectors\n",
2565 if (vectors < hdev->num_msi)
2566 dev_warn(&hdev->pdev->dev,
2567 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2568 hdev->num_msi, vectors);
2570 hdev->num_msi = vectors;
2571 hdev->num_msi_left = vectors;
2573 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2574 sizeof(u16), GFP_KERNEL);
2575 if (!hdev->vector_status) {
2576 pci_free_irq_vectors(pdev);
2580 for (i = 0; i < hdev->num_msi; i++)
2581 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2583 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2584 sizeof(int), GFP_KERNEL);
2585 if (!hdev->vector_irq) {
2586 pci_free_irq_vectors(pdev);
2593 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2595 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2596 duplex = HCLGE_MAC_FULL;
2601 static struct hclge_mac_speed_map hclge_mac_speed_map_to_fw[] = {
2602 {HCLGE_MAC_SPEED_10M, HCLGE_FW_MAC_SPEED_10M},
2603 {HCLGE_MAC_SPEED_100M, HCLGE_FW_MAC_SPEED_100M},
2604 {HCLGE_MAC_SPEED_1G, HCLGE_FW_MAC_SPEED_1G},
2605 {HCLGE_MAC_SPEED_10G, HCLGE_FW_MAC_SPEED_10G},
2606 {HCLGE_MAC_SPEED_25G, HCLGE_FW_MAC_SPEED_25G},
2607 {HCLGE_MAC_SPEED_40G, HCLGE_FW_MAC_SPEED_40G},
2608 {HCLGE_MAC_SPEED_50G, HCLGE_FW_MAC_SPEED_50G},
2609 {HCLGE_MAC_SPEED_100G, HCLGE_FW_MAC_SPEED_100G},
2610 {HCLGE_MAC_SPEED_200G, HCLGE_FW_MAC_SPEED_200G},
2613 static int hclge_convert_to_fw_speed(u32 speed_drv, u32 *speed_fw)
2617 for (i = 0; i < ARRAY_SIZE(hclge_mac_speed_map_to_fw); i++) {
2618 if (hclge_mac_speed_map_to_fw[i].speed_drv == speed_drv) {
2619 *speed_fw = hclge_mac_speed_map_to_fw[i].speed_fw;
2627 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2628 u8 duplex, u8 lane_num)
2630 struct hclge_config_mac_speed_dup_cmd *req;
2631 struct hclge_desc desc;
2635 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2637 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2640 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2642 ret = hclge_convert_to_fw_speed(speed, &speed_fw);
2644 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2648 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, HCLGE_CFG_SPEED_S,
2650 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2652 req->lane_num = lane_num;
2654 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2656 dev_err(&hdev->pdev->dev,
2657 "mac speed/duplex config cmd failed %d.\n", ret);
2664 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lane_num)
2666 struct hclge_mac *mac = &hdev->hw.mac;
2669 duplex = hclge_check_speed_dup(duplex, speed);
2670 if (!mac->support_autoneg && mac->speed == speed &&
2671 mac->duplex == duplex && (mac->lane_num == lane_num || lane_num == 0))
2674 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex, lane_num);
2678 hdev->hw.mac.speed = speed;
2679 hdev->hw.mac.duplex = duplex;
2681 hdev->hw.mac.lane_num = lane_num;
2686 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2687 u8 duplex, u8 lane_num)
2689 struct hclge_vport *vport = hclge_get_vport(handle);
2690 struct hclge_dev *hdev = vport->back;
2692 return hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num);
2695 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2697 struct hclge_config_auto_neg_cmd *req;
2698 struct hclge_desc desc;
2702 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2704 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2706 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2707 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2709 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2711 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2717 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2719 struct hclge_vport *vport = hclge_get_vport(handle);
2720 struct hclge_dev *hdev = vport->back;
2722 if (!hdev->hw.mac.support_autoneg) {
2724 dev_err(&hdev->pdev->dev,
2725 "autoneg is not supported by current port\n");
2732 return hclge_set_autoneg_en(hdev, enable);
2735 static int hclge_get_autoneg(struct hnae3_handle *handle)
2737 struct hclge_vport *vport = hclge_get_vport(handle);
2738 struct hclge_dev *hdev = vport->back;
2739 struct phy_device *phydev = hdev->hw.mac.phydev;
2742 return phydev->autoneg;
2744 return hdev->hw.mac.autoneg;
2747 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2749 struct hclge_vport *vport = hclge_get_vport(handle);
2750 struct hclge_dev *hdev = vport->back;
2753 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2755 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2758 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2761 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2763 struct hclge_vport *vport = hclge_get_vport(handle);
2764 struct hclge_dev *hdev = vport->back;
2766 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2767 return hclge_set_autoneg_en(hdev, !halt);
2772 static void hclge_parse_fec_stats_lanes(struct hclge_dev *hdev,
2773 struct hclge_desc *desc, u32 desc_len)
2775 u32 lane_size = HCLGE_FEC_STATS_MAX_LANES * 2;
2780 for (i = 0; i < lane_size; i++) {
2781 if (data_index >= HCLGE_DESC_DATA_LEN) {
2786 if (desc_index >= desc_len)
2789 hdev->fec_stats.per_lanes[i] +=
2790 le32_to_cpu(desc[desc_index].data[data_index]);
2795 static void hclge_parse_fec_stats(struct hclge_dev *hdev,
2796 struct hclge_desc *desc, u32 desc_len)
2798 struct hclge_query_fec_stats_cmd *req;
2800 req = (struct hclge_query_fec_stats_cmd *)desc[0].data;
2802 hdev->fec_stats.base_r_lane_num = req->base_r_lane_num;
2803 hdev->fec_stats.rs_corr_blocks +=
2804 le32_to_cpu(req->rs_fec_corr_blocks);
2805 hdev->fec_stats.rs_uncorr_blocks +=
2806 le32_to_cpu(req->rs_fec_uncorr_blocks);
2807 hdev->fec_stats.rs_error_blocks +=
2808 le32_to_cpu(req->rs_fec_error_blocks);
2809 hdev->fec_stats.base_r_corr_blocks +=
2810 le32_to_cpu(req->base_r_fec_corr_blocks);
2811 hdev->fec_stats.base_r_uncorr_blocks +=
2812 le32_to_cpu(req->base_r_fec_uncorr_blocks);
2814 hclge_parse_fec_stats_lanes(hdev, &desc[1], desc_len - 1);
2817 static int hclge_update_fec_stats_hw(struct hclge_dev *hdev)
2819 struct hclge_desc desc[HCLGE_FEC_STATS_CMD_NUM];
2823 for (i = 0; i < HCLGE_FEC_STATS_CMD_NUM; i++) {
2824 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_FEC_STATS,
2826 if (i != (HCLGE_FEC_STATS_CMD_NUM - 1))
2827 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2830 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_FEC_STATS_CMD_NUM);
2834 hclge_parse_fec_stats(hdev, desc, HCLGE_FEC_STATS_CMD_NUM);
2839 static void hclge_update_fec_stats(struct hclge_dev *hdev)
2841 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2844 if (!hnae3_ae_dev_fec_stats_supported(ae_dev) ||
2845 test_and_set_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state))
2848 ret = hclge_update_fec_stats_hw(hdev);
2850 dev_err(&hdev->pdev->dev,
2851 "failed to update fec stats, ret = %d\n", ret);
2853 clear_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state);
2856 static void hclge_get_fec_stats_total(struct hclge_dev *hdev,
2857 struct ethtool_fec_stats *fec_stats)
2859 fec_stats->corrected_blocks.total = hdev->fec_stats.rs_corr_blocks;
2860 fec_stats->uncorrectable_blocks.total =
2861 hdev->fec_stats.rs_uncorr_blocks;
2864 static void hclge_get_fec_stats_lanes(struct hclge_dev *hdev,
2865 struct ethtool_fec_stats *fec_stats)
2869 if (hdev->fec_stats.base_r_lane_num == 0 ||
2870 hdev->fec_stats.base_r_lane_num > HCLGE_FEC_STATS_MAX_LANES) {
2871 dev_err(&hdev->pdev->dev,
2872 "fec stats lane number(%llu) is invalid\n",
2873 hdev->fec_stats.base_r_lane_num);
2877 for (i = 0; i < hdev->fec_stats.base_r_lane_num; i++) {
2878 fec_stats->corrected_blocks.lanes[i] =
2879 hdev->fec_stats.base_r_corr_per_lanes[i];
2880 fec_stats->uncorrectable_blocks.lanes[i] =
2881 hdev->fec_stats.base_r_uncorr_per_lanes[i];
2885 static void hclge_comm_get_fec_stats(struct hclge_dev *hdev,
2886 struct ethtool_fec_stats *fec_stats)
2888 u32 fec_mode = hdev->hw.mac.fec_mode;
2891 case BIT(HNAE3_FEC_RS):
2892 case BIT(HNAE3_FEC_LLRS):
2893 hclge_get_fec_stats_total(hdev, fec_stats);
2895 case BIT(HNAE3_FEC_BASER):
2896 hclge_get_fec_stats_lanes(hdev, fec_stats);
2899 dev_err(&hdev->pdev->dev,
2900 "fec stats is not supported by current fec mode(0x%x)\n",
2906 static void hclge_get_fec_stats(struct hnae3_handle *handle,
2907 struct ethtool_fec_stats *fec_stats)
2909 struct hclge_vport *vport = hclge_get_vport(handle);
2910 struct hclge_dev *hdev = vport->back;
2911 u32 fec_mode = hdev->hw.mac.fec_mode;
2913 if (fec_mode == BIT(HNAE3_FEC_NONE) ||
2914 fec_mode == BIT(HNAE3_FEC_AUTO) ||
2915 fec_mode == BIT(HNAE3_FEC_USER_DEF))
2918 hclge_update_fec_stats(hdev);
2920 hclge_comm_get_fec_stats(hdev, fec_stats);
2923 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2925 struct hclge_config_fec_cmd *req;
2926 struct hclge_desc desc;
2929 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2931 req = (struct hclge_config_fec_cmd *)desc.data;
2932 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2933 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2934 if (fec_mode & BIT(HNAE3_FEC_RS))
2935 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2936 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2937 if (fec_mode & BIT(HNAE3_FEC_LLRS))
2938 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2939 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_LLRS);
2940 if (fec_mode & BIT(HNAE3_FEC_BASER))
2941 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2942 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2944 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2946 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2951 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2953 struct hclge_vport *vport = hclge_get_vport(handle);
2954 struct hclge_dev *hdev = vport->back;
2955 struct hclge_mac *mac = &hdev->hw.mac;
2958 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2959 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2963 ret = hclge_set_fec_hw(hdev, fec_mode);
2967 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2971 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2974 struct hclge_vport *vport = hclge_get_vport(handle);
2975 struct hclge_dev *hdev = vport->back;
2976 struct hclge_mac *mac = &hdev->hw.mac;
2979 *fec_ability = mac->fec_ability;
2981 *fec_mode = mac->fec_mode;
2984 static int hclge_mac_init(struct hclge_dev *hdev)
2986 struct hclge_mac *mac = &hdev->hw.mac;
2989 hdev->support_sfp_query = true;
2990 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2991 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2992 hdev->hw.mac.duplex, hdev->hw.mac.lane_num);
2996 if (hdev->hw.mac.support_autoneg) {
2997 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
3004 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
3005 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
3010 ret = hclge_set_mac_mtu(hdev, hdev->mps);
3012 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
3016 ret = hclge_set_default_loopback(hdev);
3020 ret = hclge_buffer_alloc(hdev);
3022 dev_err(&hdev->pdev->dev,
3023 "allocate buffer fail, ret=%d\n", ret);
3028 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
3030 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
3031 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) {
3032 hdev->last_mbx_scheduled = jiffies;
3033 mod_delayed_work(hclge_wq, &hdev->service_task, 0);
3037 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
3039 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
3040 test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) &&
3041 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) {
3042 hdev->last_rst_scheduled = jiffies;
3043 mod_delayed_work(hclge_wq, &hdev->service_task, 0);
3047 static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
3049 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
3050 !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
3051 mod_delayed_work(hclge_wq, &hdev->service_task, 0);
3054 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
3056 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
3057 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
3058 mod_delayed_work(hclge_wq, &hdev->service_task, delay_time);
3061 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
3063 struct hclge_link_status_cmd *req;
3064 struct hclge_desc desc;
3067 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
3068 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3070 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
3075 req = (struct hclge_link_status_cmd *)desc.data;
3076 *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
3077 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
3082 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
3084 struct phy_device *phydev = hdev->hw.mac.phydev;
3086 *link_status = HCLGE_LINK_STATUS_DOWN;
3088 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
3091 if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
3094 return hclge_get_mac_link_status(hdev, link_status);
3097 static void hclge_push_link_status(struct hclge_dev *hdev)
3099 struct hclge_vport *vport;
3103 for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
3104 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
3106 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
3107 vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
3110 ret = hclge_push_vf_link_status(vport);
3112 dev_err(&hdev->pdev->dev,
3113 "failed to push link status to vf%u, ret = %d\n",
3119 static void hclge_update_link_status(struct hclge_dev *hdev)
3121 struct hnae3_handle *rhandle = &hdev->vport[0].roce;
3122 struct hnae3_handle *handle = &hdev->vport[0].nic;
3123 struct hnae3_client *rclient = hdev->roce_client;
3124 struct hnae3_client *client = hdev->nic_client;
3131 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
3134 ret = hclge_get_mac_phy_link(hdev, &state);
3136 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
3140 if (state != hdev->hw.mac.link) {
3141 hdev->hw.mac.link = state;
3142 client->ops->link_status_change(handle, state);
3143 hclge_config_mac_tnl_int(hdev, state);
3144 if (rclient && rclient->ops->link_status_change)
3145 rclient->ops->link_status_change(rhandle, state);
3147 hclge_push_link_status(hdev);
3150 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
3153 static void hclge_update_speed_advertising(struct hclge_mac *mac)
3157 if (hclge_get_speed_bit(mac->speed, &speed_ability))
3160 switch (mac->module_type) {
3161 case HNAE3_MODULE_TYPE_FIBRE_LR:
3162 hclge_convert_setting_lr(speed_ability, mac->advertising);
3164 case HNAE3_MODULE_TYPE_FIBRE_SR:
3165 case HNAE3_MODULE_TYPE_AOC:
3166 hclge_convert_setting_sr(speed_ability, mac->advertising);
3168 case HNAE3_MODULE_TYPE_CR:
3169 hclge_convert_setting_cr(speed_ability, mac->advertising);
3171 case HNAE3_MODULE_TYPE_KR:
3172 hclge_convert_setting_kr(speed_ability, mac->advertising);
3179 static void hclge_update_fec_advertising(struct hclge_mac *mac)
3181 if (mac->fec_mode & BIT(HNAE3_FEC_RS))
3182 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
3184 else if (mac->fec_mode & BIT(HNAE3_FEC_LLRS))
3185 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
3187 else if (mac->fec_mode & BIT(HNAE3_FEC_BASER))
3188 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
3191 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
3195 static void hclge_update_pause_advertising(struct hclge_dev *hdev)
3197 struct hclge_mac *mac = &hdev->hw.mac;
3200 switch (hdev->fc_mode_last_time) {
3201 case HCLGE_FC_RX_PAUSE:
3205 case HCLGE_FC_TX_PAUSE:
3219 linkmode_set_pause(mac->advertising, tx_en, rx_en);
3222 static void hclge_update_advertising(struct hclge_dev *hdev)
3224 struct hclge_mac *mac = &hdev->hw.mac;
3226 linkmode_zero(mac->advertising);
3227 hclge_update_speed_advertising(mac);
3228 hclge_update_fec_advertising(mac);
3229 hclge_update_pause_advertising(hdev);
3232 static void hclge_update_port_capability(struct hclge_dev *hdev,
3233 struct hclge_mac *mac)
3235 if (hnae3_dev_fec_supported(hdev))
3236 hclge_convert_setting_fec(mac);
3238 /* firmware can not identify back plane type, the media type
3239 * read from configuration can help deal it
3241 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
3242 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
3243 mac->module_type = HNAE3_MODULE_TYPE_KR;
3244 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3245 mac->module_type = HNAE3_MODULE_TYPE_TP;
3247 if (mac->support_autoneg) {
3248 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
3249 linkmode_copy(mac->advertising, mac->supported);
3251 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
3253 hclge_update_advertising(hdev);
3257 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
3259 struct hclge_sfp_info_cmd *resp;
3260 struct hclge_desc desc;
3263 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3264 resp = (struct hclge_sfp_info_cmd *)desc.data;
3265 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3266 if (ret == -EOPNOTSUPP) {
3267 dev_warn(&hdev->pdev->dev,
3268 "IMP do not support get SFP speed %d\n", ret);
3271 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
3275 *speed = le32_to_cpu(resp->speed);
3280 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
3282 struct hclge_sfp_info_cmd *resp;
3283 struct hclge_desc desc;
3286 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3287 resp = (struct hclge_sfp_info_cmd *)desc.data;
3289 resp->query_type = QUERY_ACTIVE_SPEED;
3291 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3292 if (ret == -EOPNOTSUPP) {
3293 dev_warn(&hdev->pdev->dev,
3294 "IMP does not support get SFP info %d\n", ret);
3297 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
3301 /* In some case, mac speed get from IMP may be 0, it shouldn't be
3302 * set to mac->speed.
3304 if (!le32_to_cpu(resp->speed))
3307 mac->speed = le32_to_cpu(resp->speed);
3308 /* if resp->speed_ability is 0, it means it's an old version
3309 * firmware, do not update these params
3311 if (resp->speed_ability) {
3312 mac->module_type = le32_to_cpu(resp->module_type);
3313 mac->speed_ability = le32_to_cpu(resp->speed_ability);
3314 mac->autoneg = resp->autoneg;
3315 mac->support_autoneg = resp->autoneg_ability;
3316 mac->speed_type = QUERY_ACTIVE_SPEED;
3317 mac->lane_num = resp->lane_num;
3318 if (!resp->active_fec)
3321 mac->fec_mode = BIT(resp->active_fec);
3322 mac->fec_ability = resp->fec_ability;
3324 mac->speed_type = QUERY_SFP_SPEED;
3330 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3331 struct ethtool_link_ksettings *cmd)
3333 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3334 struct hclge_vport *vport = hclge_get_vport(handle);
3335 struct hclge_phy_link_ksetting_0_cmd *req0;
3336 struct hclge_phy_link_ksetting_1_cmd *req1;
3337 u32 supported, advertising, lp_advertising;
3338 struct hclge_dev *hdev = vport->back;
3341 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3343 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
3344 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3347 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3349 dev_err(&hdev->pdev->dev,
3350 "failed to get phy link ksetting, ret = %d.\n", ret);
3354 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3355 cmd->base.autoneg = req0->autoneg;
3356 cmd->base.speed = le32_to_cpu(req0->speed);
3357 cmd->base.duplex = req0->duplex;
3358 cmd->base.port = req0->port;
3359 cmd->base.transceiver = req0->transceiver;
3360 cmd->base.phy_address = req0->phy_address;
3361 cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3362 cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3363 supported = le32_to_cpu(req0->supported);
3364 advertising = le32_to_cpu(req0->advertising);
3365 lp_advertising = le32_to_cpu(req0->lp_advertising);
3366 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3368 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3370 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3373 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3374 cmd->base.master_slave_cfg = req1->master_slave_cfg;
3375 cmd->base.master_slave_state = req1->master_slave_state;
3381 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3382 const struct ethtool_link_ksettings *cmd)
3384 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3385 struct hclge_vport *vport = hclge_get_vport(handle);
3386 struct hclge_phy_link_ksetting_0_cmd *req0;
3387 struct hclge_phy_link_ksetting_1_cmd *req1;
3388 struct hclge_dev *hdev = vport->back;
3392 if (cmd->base.autoneg == AUTONEG_DISABLE &&
3393 ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3394 (cmd->base.duplex != DUPLEX_HALF &&
3395 cmd->base.duplex != DUPLEX_FULL)))
3398 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3400 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
3401 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3404 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3405 req0->autoneg = cmd->base.autoneg;
3406 req0->speed = cpu_to_le32(cmd->base.speed);
3407 req0->duplex = cmd->base.duplex;
3408 ethtool_convert_link_mode_to_legacy_u32(&advertising,
3409 cmd->link_modes.advertising);
3410 req0->advertising = cpu_to_le32(advertising);
3411 req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3413 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3414 req1->master_slave_cfg = cmd->base.master_slave_cfg;
3416 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3418 dev_err(&hdev->pdev->dev,
3419 "failed to set phy link ksettings, ret = %d.\n", ret);
3423 hdev->hw.mac.autoneg = cmd->base.autoneg;
3424 hdev->hw.mac.speed = cmd->base.speed;
3425 hdev->hw.mac.duplex = cmd->base.duplex;
3426 linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3431 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3433 struct ethtool_link_ksettings cmd;
3436 if (!hnae3_dev_phy_imp_supported(hdev))
3439 ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3443 hdev->hw.mac.autoneg = cmd.base.autoneg;
3444 hdev->hw.mac.speed = cmd.base.speed;
3445 hdev->hw.mac.duplex = cmd.base.duplex;
3446 linkmode_copy(hdev->hw.mac.advertising, cmd.link_modes.advertising);
3451 static int hclge_tp_port_init(struct hclge_dev *hdev)
3453 struct ethtool_link_ksettings cmd;
3455 if (!hnae3_dev_phy_imp_supported(hdev))
3458 cmd.base.autoneg = hdev->hw.mac.autoneg;
3459 cmd.base.speed = hdev->hw.mac.speed;
3460 cmd.base.duplex = hdev->hw.mac.duplex;
3461 linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3463 return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3466 static int hclge_update_port_info(struct hclge_dev *hdev)
3468 struct hclge_mac *mac = &hdev->hw.mac;
3472 /* get the port info from SFP cmd if not copper port */
3473 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3474 return hclge_update_tp_port_info(hdev);
3476 /* if IMP does not support get SFP/qSFP info, return directly */
3477 if (!hdev->support_sfp_query)
3480 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3482 ret = hclge_get_sfp_info(hdev, mac);
3484 speed = HCLGE_MAC_SPEED_UNKNOWN;
3485 ret = hclge_get_sfp_speed(hdev, &speed);
3488 if (ret == -EOPNOTSUPP) {
3489 hdev->support_sfp_query = false;
3495 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3496 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3497 hclge_update_port_capability(hdev, mac);
3498 if (mac->speed != speed)
3499 (void)hclge_tm_port_shaper_cfg(hdev);
3502 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3503 HCLGE_MAC_FULL, mac->lane_num);
3505 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3506 return 0; /* do nothing if no SFP */
3508 /* must config full duplex for SFP */
3509 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL, 0);
3513 static int hclge_get_status(struct hnae3_handle *handle)
3515 struct hclge_vport *vport = hclge_get_vport(handle);
3516 struct hclge_dev *hdev = vport->back;
3518 hclge_update_link_status(hdev);
3520 return hdev->hw.mac.link;
3523 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3525 if (!pci_num_vf(hdev->pdev)) {
3526 dev_err(&hdev->pdev->dev,
3527 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3531 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3532 dev_err(&hdev->pdev->dev,
3533 "vf id(%d) is out of range(0 <= vfid < %d)\n",
3534 vf, pci_num_vf(hdev->pdev));
3538 /* VF start from 1 in vport */
3539 vf += HCLGE_VF_VPORT_START_NUM;
3540 return &hdev->vport[vf];
3543 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3544 struct ifla_vf_info *ivf)
3546 struct hclge_vport *vport = hclge_get_vport(handle);
3547 struct hclge_dev *hdev = vport->back;
3549 vport = hclge_get_vf_vport(hdev, vf);
3554 ivf->linkstate = vport->vf_info.link_state;
3555 ivf->spoofchk = vport->vf_info.spoofchk;
3556 ivf->trusted = vport->vf_info.trusted;
3557 ivf->min_tx_rate = 0;
3558 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3559 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3560 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3561 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3562 ether_addr_copy(ivf->mac, vport->vf_info.mac);
3567 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3570 struct hclge_vport *vport = hclge_get_vport(handle);
3571 struct hclge_dev *hdev = vport->back;
3575 vport = hclge_get_vf_vport(hdev, vf);
3579 link_state_old = vport->vf_info.link_state;
3580 vport->vf_info.link_state = link_state;
3582 /* return success directly if the VF is unalive, VF will
3583 * query link state itself when it starts work.
3585 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3588 ret = hclge_push_vf_link_status(vport);
3590 vport->vf_info.link_state = link_state_old;
3591 dev_err(&hdev->pdev->dev,
3592 "failed to push vf%d link status, ret = %d\n", vf, ret);
3598 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3600 u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
3602 /* fetch the events from their corresponding regs */
3603 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3604 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3605 hw_err_src_reg = hclge_read_dev(&hdev->hw,
3606 HCLGE_RAS_PF_OTHER_INT_STS_REG);
3608 /* Assumption: If by any chance reset and mailbox events are reported
3609 * together then we will only process reset event in this go and will
3610 * defer the processing of the mailbox events. Since, we would have not
3611 * cleared RX CMDQ event this time we would receive again another
3612 * interrupt from H/W just for the mailbox.
3614 * check for vector0 reset event sources
3616 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3617 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3618 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3619 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
3620 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3621 hdev->rst_stats.imp_rst_cnt++;
3622 return HCLGE_VECTOR0_EVENT_RST;
3625 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3626 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3627 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
3628 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3629 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3630 hdev->rst_stats.global_rst_cnt++;
3631 return HCLGE_VECTOR0_EVENT_RST;
3634 /* check for vector0 msix event and hardware error event source */
3635 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK ||
3636 hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK)
3637 return HCLGE_VECTOR0_EVENT_ERR;
3639 /* check for vector0 ptp event source */
3640 if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) {
3641 *clearval = msix_src_reg;
3642 return HCLGE_VECTOR0_EVENT_PTP;
3645 /* check for vector0 mailbox(=CMDQ RX) event source */
3646 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3647 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3648 *clearval = cmdq_src_reg;
3649 return HCLGE_VECTOR0_EVENT_MBX;
3652 /* print other vector0 event source */
3653 dev_info(&hdev->pdev->dev,
3654 "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n",
3655 cmdq_src_reg, hw_err_src_reg, msix_src_reg);
3657 return HCLGE_VECTOR0_EVENT_OTHER;
3660 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3663 switch (event_type) {
3664 case HCLGE_VECTOR0_EVENT_PTP:
3665 case HCLGE_VECTOR0_EVENT_RST:
3666 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3668 case HCLGE_VECTOR0_EVENT_MBX:
3669 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3676 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3678 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3679 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3680 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3681 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3682 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3685 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3687 writel(enable ? 1 : 0, vector->addr);
3690 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3692 struct hclge_dev *hdev = data;
3693 unsigned long flags;
3697 hclge_enable_vector(&hdev->misc_vector, false);
3698 event_cause = hclge_check_event_cause(hdev, &clearval);
3700 /* vector 0 interrupt is shared with reset and mailbox source events. */
3701 switch (event_cause) {
3702 case HCLGE_VECTOR0_EVENT_ERR:
3703 hclge_errhand_task_schedule(hdev);
3705 case HCLGE_VECTOR0_EVENT_RST:
3706 hclge_reset_task_schedule(hdev);
3708 case HCLGE_VECTOR0_EVENT_PTP:
3709 spin_lock_irqsave(&hdev->ptp->lock, flags);
3710 hclge_ptp_clean_tx_hwts(hdev);
3711 spin_unlock_irqrestore(&hdev->ptp->lock, flags);
3713 case HCLGE_VECTOR0_EVENT_MBX:
3714 /* If we are here then,
3715 * 1. Either we are not handling any mbx task and we are not
3718 * 2. We could be handling a mbx task but nothing more is
3720 * In both cases, we should schedule mbx task as there are more
3721 * mbx messages reported by this interrupt.
3723 hclge_mbx_task_schedule(hdev);
3726 dev_warn(&hdev->pdev->dev,
3727 "received unknown or unhandled event of vector0\n");
3731 hclge_clear_event_cause(hdev, event_cause, clearval);
3733 /* Enable interrupt if it is not caused by reset event or error event */
3734 if (event_cause == HCLGE_VECTOR0_EVENT_PTP ||
3735 event_cause == HCLGE_VECTOR0_EVENT_MBX ||
3736 event_cause == HCLGE_VECTOR0_EVENT_OTHER)
3737 hclge_enable_vector(&hdev->misc_vector, true);
3742 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3744 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3745 dev_warn(&hdev->pdev->dev,
3746 "vector(vector_id %d) has been freed.\n", vector_id);
3750 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3751 hdev->num_msi_left += 1;
3752 hdev->num_msi_used -= 1;
3755 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3757 struct hclge_misc_vector *vector = &hdev->misc_vector;
3759 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3761 vector->addr = hdev->hw.hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3762 hdev->vector_status[0] = 0;
3764 hdev->num_msi_left -= 1;
3765 hdev->num_msi_used += 1;
3768 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3772 hclge_get_misc_vector(hdev);
3774 /* this would be explicitly freed in the end */
3775 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3776 HCLGE_NAME, pci_name(hdev->pdev));
3777 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3778 0, hdev->misc_vector.name, hdev);
3780 hclge_free_vector(hdev, 0);
3781 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3782 hdev->misc_vector.vector_irq);
3788 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3790 free_irq(hdev->misc_vector.vector_irq, hdev);
3791 hclge_free_vector(hdev, 0);
3794 int hclge_notify_client(struct hclge_dev *hdev,
3795 enum hnae3_reset_notify_type type)
3797 struct hnae3_handle *handle = &hdev->vport[0].nic;
3798 struct hnae3_client *client = hdev->nic_client;
3801 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3804 if (!client->ops->reset_notify)
3807 ret = client->ops->reset_notify(handle, type);
3809 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3815 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3816 enum hnae3_reset_notify_type type)
3818 struct hnae3_handle *handle = &hdev->vport[0].roce;
3819 struct hnae3_client *client = hdev->roce_client;
3822 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3825 if (!client->ops->reset_notify)
3828 ret = client->ops->reset_notify(handle, type);
3830 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3836 static int hclge_reset_wait(struct hclge_dev *hdev)
3838 #define HCLGE_RESET_WATI_MS 100
3839 #define HCLGE_RESET_WAIT_CNT 350
3841 u32 val, reg, reg_bit;
3844 switch (hdev->reset_type) {
3845 case HNAE3_IMP_RESET:
3846 reg = HCLGE_GLOBAL_RESET_REG;
3847 reg_bit = HCLGE_IMP_RESET_BIT;
3849 case HNAE3_GLOBAL_RESET:
3850 reg = HCLGE_GLOBAL_RESET_REG;
3851 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3853 case HNAE3_FUNC_RESET:
3854 reg = HCLGE_FUN_RST_ING;
3855 reg_bit = HCLGE_FUN_RST_ING_B;
3858 dev_err(&hdev->pdev->dev,
3859 "Wait for unsupported reset type: %d\n",
3864 val = hclge_read_dev(&hdev->hw, reg);
3865 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3866 msleep(HCLGE_RESET_WATI_MS);
3867 val = hclge_read_dev(&hdev->hw, reg);
3871 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3872 dev_warn(&hdev->pdev->dev,
3873 "Wait for reset timeout: %d\n", hdev->reset_type);
3880 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3882 struct hclge_vf_rst_cmd *req;
3883 struct hclge_desc desc;
3885 req = (struct hclge_vf_rst_cmd *)desc.data;
3886 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3887 req->dest_vfid = func_id;
3892 return hclge_cmd_send(&hdev->hw, &desc, 1);
3895 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3899 for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3900 struct hclge_vport *vport = &hdev->vport[i];
3903 /* Send cmd to set/clear VF's FUNC_RST_ING */
3904 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3906 dev_err(&hdev->pdev->dev,
3907 "set vf(%u) rst failed %d!\n",
3908 vport->vport_id - HCLGE_VF_VPORT_START_NUM,
3914 !test_bit(HCLGE_VPORT_STATE_INITED, &vport->state))
3917 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) &&
3918 hdev->reset_type == HNAE3_FUNC_RESET) {
3919 set_bit(HCLGE_VPORT_NEED_NOTIFY_RESET,
3920 &vport->need_notify);
3924 /* Inform VF to process the reset.
3925 * hclge_inform_reset_assert_to_vf may fail if VF
3926 * driver is not loaded.
3928 ret = hclge_inform_reset_assert_to_vf(vport);
3930 dev_warn(&hdev->pdev->dev,
3931 "inform reset to vf(%u) failed %d!\n",
3932 vport->vport_id - HCLGE_VF_VPORT_START_NUM,
3939 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3941 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3942 test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state) ||
3943 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3946 if (time_is_before_jiffies(hdev->last_mbx_scheduled +
3947 HCLGE_MBX_SCHED_TIMEOUT))
3948 dev_warn(&hdev->pdev->dev,
3949 "mbx service task is scheduled after %ums on cpu%u!\n",
3950 jiffies_to_msecs(jiffies - hdev->last_mbx_scheduled),
3951 smp_processor_id());
3953 hclge_mbx_handler(hdev);
3955 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3958 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3960 struct hclge_pf_rst_sync_cmd *req;
3961 struct hclge_desc desc;
3965 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3966 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3969 /* vf need to down netdev by mbx during PF or FLR reset */
3970 hclge_mailbox_service_task(hdev);
3972 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3973 /* for compatible with old firmware, wait
3974 * 100 ms for VF to stop IO
3976 if (ret == -EOPNOTSUPP) {
3977 msleep(HCLGE_RESET_SYNC_TIME);
3980 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3983 } else if (req->all_vf_ready) {
3986 msleep(HCLGE_PF_RESET_SYNC_TIME);
3987 hclge_comm_cmd_reuse_desc(&desc, true);
3988 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3990 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3993 void hclge_report_hw_error(struct hclge_dev *hdev,
3994 enum hnae3_hw_error_type type)
3996 struct hnae3_client *client = hdev->nic_client;
3998 if (!client || !client->ops->process_hw_error ||
3999 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
4002 client->ops->process_hw_error(&hdev->vport[0].nic, type);
4005 static void hclge_handle_imp_error(struct hclge_dev *hdev)
4009 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
4010 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
4011 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
4012 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
4013 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
4016 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
4017 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
4018 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
4019 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
4023 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
4025 struct hclge_desc desc;
4026 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
4029 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
4030 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
4031 req->fun_reset_vfid = func_id;
4033 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4035 dev_err(&hdev->pdev->dev,
4036 "send function reset cmd fail, status =%d\n", ret);
4041 static void hclge_do_reset(struct hclge_dev *hdev)
4043 struct hnae3_handle *handle = &hdev->vport[0].nic;
4044 struct pci_dev *pdev = hdev->pdev;
4047 if (hclge_get_hw_reset_stat(handle)) {
4048 dev_info(&pdev->dev, "hardware reset not finish\n");
4049 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
4050 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
4051 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
4055 switch (hdev->reset_type) {
4056 case HNAE3_IMP_RESET:
4057 dev_info(&pdev->dev, "IMP reset requested\n");
4058 val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
4059 hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1);
4060 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val);
4062 case HNAE3_GLOBAL_RESET:
4063 dev_info(&pdev->dev, "global reset requested\n");
4064 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
4065 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
4066 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
4068 case HNAE3_FUNC_RESET:
4069 dev_info(&pdev->dev, "PF reset requested\n");
4070 /* schedule again to check later */
4071 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
4072 hclge_reset_task_schedule(hdev);
4075 dev_warn(&pdev->dev,
4076 "unsupported reset type: %d\n", hdev->reset_type);
4081 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
4082 unsigned long *addr)
4084 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
4085 struct hclge_dev *hdev = ae_dev->priv;
4087 /* return the highest priority reset level amongst all */
4088 if (test_bit(HNAE3_IMP_RESET, addr)) {
4089 rst_level = HNAE3_IMP_RESET;
4090 clear_bit(HNAE3_IMP_RESET, addr);
4091 clear_bit(HNAE3_GLOBAL_RESET, addr);
4092 clear_bit(HNAE3_FUNC_RESET, addr);
4093 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
4094 rst_level = HNAE3_GLOBAL_RESET;
4095 clear_bit(HNAE3_GLOBAL_RESET, addr);
4096 clear_bit(HNAE3_FUNC_RESET, addr);
4097 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
4098 rst_level = HNAE3_FUNC_RESET;
4099 clear_bit(HNAE3_FUNC_RESET, addr);
4100 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
4101 rst_level = HNAE3_FLR_RESET;
4102 clear_bit(HNAE3_FLR_RESET, addr);
4105 if (hdev->reset_type != HNAE3_NONE_RESET &&
4106 rst_level < hdev->reset_type)
4107 return HNAE3_NONE_RESET;
4112 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
4116 switch (hdev->reset_type) {
4117 case HNAE3_IMP_RESET:
4118 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
4120 case HNAE3_GLOBAL_RESET:
4121 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
4130 /* For revision 0x20, the reset interrupt source
4131 * can only be cleared after hardware reset done
4133 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
4134 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
4137 hclge_enable_vector(&hdev->misc_vector, true);
4140 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
4144 reg_val = hclge_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG);
4146 reg_val |= HCLGE_COMM_NIC_SW_RST_RDY;
4148 reg_val &= ~HCLGE_COMM_NIC_SW_RST_RDY;
4150 hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val);
4153 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
4157 ret = hclge_set_all_vf_rst(hdev, true);
4161 hclge_func_reset_sync_vf(hdev);
4166 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
4171 switch (hdev->reset_type) {
4172 case HNAE3_FUNC_RESET:
4173 ret = hclge_func_reset_notify_vf(hdev);
4177 ret = hclge_func_reset_cmd(hdev, 0);
4179 dev_err(&hdev->pdev->dev,
4180 "asserting function reset fail %d!\n", ret);
4184 /* After performaning pf reset, it is not necessary to do the
4185 * mailbox handling or send any command to firmware, because
4186 * any mailbox handling or command to firmware is only valid
4187 * after hclge_comm_cmd_init is called.
4189 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
4190 hdev->rst_stats.pf_rst_cnt++;
4192 case HNAE3_FLR_RESET:
4193 ret = hclge_func_reset_notify_vf(hdev);
4197 case HNAE3_IMP_RESET:
4198 hclge_handle_imp_error(hdev);
4199 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
4200 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
4201 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
4207 /* inform hardware that preparatory work is done */
4208 msleep(HCLGE_RESET_SYNC_TIME);
4209 hclge_reset_handshake(hdev, true);
4210 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
4215 static void hclge_show_rst_info(struct hclge_dev *hdev)
4219 buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL);
4223 hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
4225 dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
4230 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
4232 #define MAX_RESET_FAIL_CNT 5
4234 if (hdev->reset_pending) {
4235 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
4236 hdev->reset_pending);
4238 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
4239 HCLGE_RESET_INT_M) {
4240 dev_info(&hdev->pdev->dev,
4241 "reset failed because new reset interrupt\n");
4242 hclge_clear_reset_cause(hdev);
4244 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
4245 hdev->rst_stats.reset_fail_cnt++;
4246 set_bit(hdev->reset_type, &hdev->reset_pending);
4247 dev_info(&hdev->pdev->dev,
4248 "re-schedule reset task(%u)\n",
4249 hdev->rst_stats.reset_fail_cnt);
4253 hclge_clear_reset_cause(hdev);
4255 /* recover the handshake status when reset fail */
4256 hclge_reset_handshake(hdev, true);
4258 dev_err(&hdev->pdev->dev, "Reset fail!\n");
4260 hclge_show_rst_info(hdev);
4262 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4267 static void hclge_update_reset_level(struct hclge_dev *hdev)
4269 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4270 enum hnae3_reset_type reset_level;
4272 /* reset request will not be set during reset, so clear
4273 * pending reset request to avoid unnecessary reset
4274 * caused by the same reason.
4276 hclge_get_reset_level(ae_dev, &hdev->reset_request);
4278 /* if default_reset_request has a higher level reset request,
4279 * it should be handled as soon as possible. since some errors
4280 * need this kind of reset to fix.
4282 reset_level = hclge_get_reset_level(ae_dev,
4283 &hdev->default_reset_request);
4284 if (reset_level != HNAE3_NONE_RESET)
4285 set_bit(reset_level, &hdev->reset_request);
4288 static int hclge_set_rst_done(struct hclge_dev *hdev)
4290 struct hclge_pf_rst_done_cmd *req;
4291 struct hclge_desc desc;
4294 req = (struct hclge_pf_rst_done_cmd *)desc.data;
4295 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
4296 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
4298 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4299 /* To be compatible with the old firmware, which does not support
4300 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
4303 if (ret == -EOPNOTSUPP) {
4304 dev_warn(&hdev->pdev->dev,
4305 "current firmware does not support command(0x%x)!\n",
4306 HCLGE_OPC_PF_RST_DONE);
4309 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
4316 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
4320 switch (hdev->reset_type) {
4321 case HNAE3_FUNC_RESET:
4322 case HNAE3_FLR_RESET:
4323 ret = hclge_set_all_vf_rst(hdev, false);
4325 case HNAE3_GLOBAL_RESET:
4326 case HNAE3_IMP_RESET:
4327 ret = hclge_set_rst_done(hdev);
4333 /* clear up the handshake status after re-initialize done */
4334 hclge_reset_handshake(hdev, false);
4339 static int hclge_reset_stack(struct hclge_dev *hdev)
4343 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4347 ret = hclge_reset_ae_dev(hdev->ae_dev);
4351 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4354 static int hclge_reset_prepare(struct hclge_dev *hdev)
4358 hdev->rst_stats.reset_cnt++;
4359 /* perform reset of the stack & ae device for a client */
4360 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4365 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4370 return hclge_reset_prepare_wait(hdev);
4373 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4377 hdev->rst_stats.hw_reset_done_cnt++;
4379 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4384 ret = hclge_reset_stack(hdev);
4389 hclge_clear_reset_cause(hdev);
4391 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4392 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4396 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4399 ret = hclge_reset_prepare_up(hdev);
4404 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4409 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4413 hdev->last_reset_time = jiffies;
4414 hdev->rst_stats.reset_fail_cnt = 0;
4415 hdev->rst_stats.reset_done_cnt++;
4416 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4418 hclge_update_reset_level(hdev);
4423 static void hclge_reset(struct hclge_dev *hdev)
4425 if (hclge_reset_prepare(hdev))
4428 if (hclge_reset_wait(hdev))
4431 if (hclge_reset_rebuild(hdev))
4437 if (hclge_reset_err_handle(hdev))
4438 hclge_reset_task_schedule(hdev);
4441 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4443 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4444 struct hclge_dev *hdev = ae_dev->priv;
4446 /* We might end up getting called broadly because of 2 below cases:
4447 * 1. Recoverable error was conveyed through APEI and only way to bring
4448 * normalcy is to reset.
4449 * 2. A new reset request from the stack due to timeout
4451 * check if this is a new reset request and we are not here just because
4452 * last reset attempt did not succeed and watchdog hit us again. We will
4453 * know this if last reset request did not occur very recently (watchdog
4454 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4455 * In case of new request we reset the "reset level" to PF reset.
4456 * And if it is a repeat reset request of the most recent one then we
4457 * want to make sure we throttle the reset request. Therefore, we will
4458 * not allow it again before 3*HZ times.
4461 if (time_before(jiffies, (hdev->last_reset_time +
4462 HCLGE_RESET_INTERVAL))) {
4463 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4467 if (hdev->default_reset_request) {
4469 hclge_get_reset_level(ae_dev,
4470 &hdev->default_reset_request);
4471 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4472 hdev->reset_level = HNAE3_FUNC_RESET;
4475 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4478 /* request reset & schedule reset task */
4479 set_bit(hdev->reset_level, &hdev->reset_request);
4480 hclge_reset_task_schedule(hdev);
4482 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4483 hdev->reset_level++;
4486 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4487 enum hnae3_reset_type rst_type)
4489 struct hclge_dev *hdev = ae_dev->priv;
4491 set_bit(rst_type, &hdev->default_reset_request);
4494 static void hclge_reset_timer(struct timer_list *t)
4496 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4498 /* if default_reset_request has no value, it means that this reset
4499 * request has already be handled, so just return here
4501 if (!hdev->default_reset_request)
4504 dev_info(&hdev->pdev->dev,
4505 "triggering reset in reset timer\n");
4506 hclge_reset_event(hdev->pdev, NULL);
4509 static void hclge_reset_subtask(struct hclge_dev *hdev)
4511 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4513 /* check if there is any ongoing reset in the hardware. This status can
4514 * be checked from reset_pending. If there is then, we need to wait for
4515 * hardware to complete reset.
4516 * a. If we are able to figure out in reasonable time that hardware
4517 * has fully resetted then, we can proceed with driver, client
4519 * b. else, we can come back later to check this status so re-sched
4522 hdev->last_reset_time = jiffies;
4523 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4524 if (hdev->reset_type != HNAE3_NONE_RESET)
4527 /* check if we got any *new* reset requests to be honored */
4528 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4529 if (hdev->reset_type != HNAE3_NONE_RESET)
4530 hclge_do_reset(hdev);
4532 hdev->reset_type = HNAE3_NONE_RESET;
4535 static void hclge_handle_err_reset_request(struct hclge_dev *hdev)
4537 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4538 enum hnae3_reset_type reset_type;
4540 if (ae_dev->hw_err_reset_req) {
4541 reset_type = hclge_get_reset_level(ae_dev,
4542 &ae_dev->hw_err_reset_req);
4543 hclge_set_def_reset_request(ae_dev, reset_type);
4546 if (hdev->default_reset_request && ae_dev->ops->reset_event)
4547 ae_dev->ops->reset_event(hdev->pdev, NULL);
4549 /* enable interrupt after error handling complete */
4550 hclge_enable_vector(&hdev->misc_vector, true);
4553 static void hclge_handle_err_recovery(struct hclge_dev *hdev)
4555 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4557 ae_dev->hw_err_reset_req = 0;
4559 if (hclge_find_error_source(hdev)) {
4560 hclge_handle_error_info_log(ae_dev);
4561 hclge_handle_mac_tnl(hdev);
4564 hclge_handle_err_reset_request(hdev);
4567 static void hclge_misc_err_recovery(struct hclge_dev *hdev)
4569 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4570 struct device *dev = &hdev->pdev->dev;
4573 msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
4574 if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
4575 if (hclge_handle_hw_msix_error
4576 (hdev, &hdev->default_reset_request))
4577 dev_info(dev, "received msix interrupt 0x%x\n",
4581 hclge_handle_hw_ras_error(ae_dev);
4583 hclge_handle_err_reset_request(hdev);
4586 static void hclge_errhand_service_task(struct hclge_dev *hdev)
4588 if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
4591 if (hnae3_dev_ras_imp_supported(hdev))
4592 hclge_handle_err_recovery(hdev);
4594 hclge_misc_err_recovery(hdev);
4597 static void hclge_reset_service_task(struct hclge_dev *hdev)
4599 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4602 if (time_is_before_jiffies(hdev->last_rst_scheduled +
4603 HCLGE_RESET_SCHED_TIMEOUT))
4604 dev_warn(&hdev->pdev->dev,
4605 "reset service task is scheduled after %ums on cpu%u!\n",
4606 jiffies_to_msecs(jiffies - hdev->last_rst_scheduled),
4607 smp_processor_id());
4609 down(&hdev->reset_sem);
4610 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4612 hclge_reset_subtask(hdev);
4614 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4615 up(&hdev->reset_sem);
4618 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4620 #define HCLGE_ALIVE_SECONDS_NORMAL 8
4622 unsigned long alive_time = HCLGE_ALIVE_SECONDS_NORMAL * HZ;
4625 /* start from vport 1 for PF is always alive */
4626 for (i = 1; i < hdev->num_alloc_vport; i++) {
4627 struct hclge_vport *vport = &hdev->vport[i];
4629 if (!test_bit(HCLGE_VPORT_STATE_INITED, &vport->state) ||
4630 !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4632 if (time_after(jiffies, vport->last_active_jiffies +
4634 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4635 dev_warn(&hdev->pdev->dev,
4636 "VF %u heartbeat timeout\n",
4637 i - HCLGE_VF_VPORT_START_NUM);
4642 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4644 unsigned long delta = round_jiffies_relative(HZ);
4646 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4649 /* Always handle the link updating to make sure link state is
4650 * updated when it is triggered by mbx.
4652 hclge_update_link_status(hdev);
4653 hclge_sync_mac_table(hdev);
4654 hclge_sync_promisc_mode(hdev);
4655 hclge_sync_fd_table(hdev);
4657 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4658 delta = jiffies - hdev->last_serv_processed;
4660 if (delta < round_jiffies_relative(HZ)) {
4661 delta = round_jiffies_relative(HZ) - delta;
4666 hdev->serv_processed_cnt++;
4667 hclge_update_vport_alive(hdev);
4669 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4670 hdev->last_serv_processed = jiffies;
4674 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4675 hclge_update_stats_for_all(hdev);
4677 hclge_update_port_info(hdev);
4678 hclge_sync_vlan_filter(hdev);
4680 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4681 hclge_rfs_filter_expire(hdev);
4683 hdev->last_serv_processed = jiffies;
4686 hclge_task_schedule(hdev, delta);
4689 static void hclge_ptp_service_task(struct hclge_dev *hdev)
4691 unsigned long flags;
4693 if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) ||
4694 !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) ||
4695 !time_is_before_jiffies(hdev->ptp->tx_start + HZ))
4698 /* to prevent concurrence with the irq handler */
4699 spin_lock_irqsave(&hdev->ptp->lock, flags);
4701 /* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq
4702 * handler may handle it just before spin_lock_irqsave().
4704 if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state))
4705 hclge_ptp_clean_tx_hwts(hdev);
4707 spin_unlock_irqrestore(&hdev->ptp->lock, flags);
4710 static void hclge_service_task(struct work_struct *work)
4712 struct hclge_dev *hdev =
4713 container_of(work, struct hclge_dev, service_task.work);
4715 hclge_errhand_service_task(hdev);
4716 hclge_reset_service_task(hdev);
4717 hclge_ptp_service_task(hdev);
4718 hclge_mailbox_service_task(hdev);
4719 hclge_periodic_service_task(hdev);
4721 /* Handle error recovery, reset and mbx again in case periodical task
4722 * delays the handling by calling hclge_task_schedule() in
4723 * hclge_periodic_service_task().
4725 hclge_errhand_service_task(hdev);
4726 hclge_reset_service_task(hdev);
4727 hclge_mailbox_service_task(hdev);
4730 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4732 /* VF handle has no client */
4733 if (!handle->client)
4734 return container_of(handle, struct hclge_vport, nic);
4735 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4736 return container_of(handle, struct hclge_vport, roce);
4738 return container_of(handle, struct hclge_vport, nic);
4741 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4742 struct hnae3_vector_info *vector_info)
4744 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 64
4746 vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4748 /* need an extend offset to config vector >= 64 */
4749 if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4750 vector_info->io_addr = hdev->hw.hw.io_base +
4751 HCLGE_VECTOR_REG_BASE +
4752 (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4754 vector_info->io_addr = hdev->hw.hw.io_base +
4755 HCLGE_VECTOR_EXT_REG_BASE +
4756 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4757 HCLGE_VECTOR_REG_OFFSET_H +
4758 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4759 HCLGE_VECTOR_REG_OFFSET;
4761 hdev->vector_status[idx] = hdev->vport[0].vport_id;
4762 hdev->vector_irq[idx] = vector_info->vector;
4765 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4766 struct hnae3_vector_info *vector_info)
4768 struct hclge_vport *vport = hclge_get_vport(handle);
4769 struct hnae3_vector_info *vector = vector_info;
4770 struct hclge_dev *hdev = vport->back;
4775 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4776 vector_num = min(hdev->num_msi_left, vector_num);
4778 for (j = 0; j < vector_num; j++) {
4779 while (++i < hdev->num_nic_msi) {
4780 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4781 hclge_get_vector_info(hdev, i, vector);
4789 hdev->num_msi_left -= alloc;
4790 hdev->num_msi_used += alloc;
4795 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4799 for (i = 0; i < hdev->num_msi; i++)
4800 if (vector == hdev->vector_irq[i])
4806 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4808 struct hclge_vport *vport = hclge_get_vport(handle);
4809 struct hclge_dev *hdev = vport->back;
4812 vector_id = hclge_get_vector_index(hdev, vector);
4813 if (vector_id < 0) {
4814 dev_err(&hdev->pdev->dev,
4815 "Get vector index fail. vector = %d\n", vector);
4819 hclge_free_vector(hdev, vector_id);
4824 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4827 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4828 struct hclge_vport *vport = hclge_get_vport(handle);
4829 struct hclge_comm_rss_cfg *rss_cfg = &vport->back->rss_cfg;
4831 hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc);
4833 hclge_comm_get_rss_indir_tbl(rss_cfg, indir,
4834 ae_dev->dev_specs.rss_ind_tbl_size);
4839 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4840 const u8 *key, const u8 hfunc)
4842 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4843 struct hclge_vport *vport = hclge_get_vport(handle);
4844 struct hclge_dev *hdev = vport->back;
4845 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
4848 ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key, hfunc);
4850 dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
4854 /* Update the shadow RSS table with user specified qids */
4855 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4856 rss_cfg->rss_indirection_tbl[i] = indir[i];
4858 /* Update the hardware */
4859 return hclge_comm_set_rss_indir_table(ae_dev, &hdev->hw.hw,
4860 rss_cfg->rss_indirection_tbl);
4863 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4864 struct ethtool_rxnfc *nfc)
4866 struct hclge_vport *vport = hclge_get_vport(handle);
4867 struct hclge_dev *hdev = vport->back;
4870 ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw,
4871 &hdev->rss_cfg, nfc);
4873 dev_err(&hdev->pdev->dev,
4874 "failed to set rss tuple, ret = %d.\n", ret);
4881 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4882 struct ethtool_rxnfc *nfc)
4884 struct hclge_vport *vport = hclge_get_vport(handle);
4890 ret = hclge_comm_get_rss_tuple(&vport->back->rss_cfg, nfc->flow_type,
4892 if (ret || !tuple_sets)
4895 nfc->data = hclge_comm_convert_rss_tuple(tuple_sets);
4900 static int hclge_get_tc_size(struct hnae3_handle *handle)
4902 struct hclge_vport *vport = hclge_get_vport(handle);
4903 struct hclge_dev *hdev = vport->back;
4905 return hdev->pf_rss_size_max;
4908 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4910 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4911 struct hclge_vport *vport = hdev->vport;
4912 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4913 u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4914 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4915 struct hnae3_tc_info *tc_info;
4920 tc_info = &vport->nic.kinfo.tc_info;
4921 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4922 rss_size = tc_info->tqp_count[i];
4925 if (!(hdev->hw_tc_map & BIT(i)))
4928 /* tc_size set to hardware is the log2 of roundup power of two
4929 * of rss_size, the acutal queue size is limited by indirection
4932 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4934 dev_err(&hdev->pdev->dev,
4935 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4940 roundup_size = roundup_pow_of_two(rss_size);
4941 roundup_size = ilog2(roundup_size);
4944 tc_size[i] = roundup_size;
4945 tc_offset[i] = tc_info->tqp_offset[i];
4948 return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid,
4952 int hclge_rss_init_hw(struct hclge_dev *hdev)
4954 u16 *rss_indir = hdev->rss_cfg.rss_indirection_tbl;
4955 u8 *key = hdev->rss_cfg.rss_hash_key;
4956 u8 hfunc = hdev->rss_cfg.rss_algo;
4959 ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
4964 ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, hfunc, key);
4968 ret = hclge_comm_set_rss_input_tuple(&hdev->vport[0].nic,
4974 return hclge_init_rss_tc_mode(hdev);
4977 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4978 int vector_id, bool en,
4979 struct hnae3_ring_chain_node *ring_chain)
4981 struct hclge_dev *hdev = vport->back;
4982 struct hnae3_ring_chain_node *node;
4983 struct hclge_desc desc;
4984 struct hclge_ctrl_vector_chain_cmd *req =
4985 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4986 enum hclge_comm_cmd_status status;
4987 enum hclge_opcode_type op;
4988 u16 tqp_type_and_id;
4991 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4992 hclge_cmd_setup_basic_desc(&desc, op, false);
4993 req->int_vector_id_l = hnae3_get_field(vector_id,
4994 HCLGE_VECTOR_ID_L_M,
4995 HCLGE_VECTOR_ID_L_S);
4996 req->int_vector_id_h = hnae3_get_field(vector_id,
4997 HCLGE_VECTOR_ID_H_M,
4998 HCLGE_VECTOR_ID_H_S);
5001 for (node = ring_chain; node; node = node->next) {
5002 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
5003 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
5005 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
5006 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
5007 HCLGE_TQP_ID_S, node->tqp_index);
5008 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
5010 hnae3_get_field(node->int_gl_idx,
5011 HNAE3_RING_GL_IDX_M,
5012 HNAE3_RING_GL_IDX_S));
5013 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
5014 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
5015 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
5016 req->vfid = vport->vport_id;
5018 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5020 dev_err(&hdev->pdev->dev,
5021 "Map TQP fail, status is %d.\n",
5027 hclge_cmd_setup_basic_desc(&desc,
5030 req->int_vector_id_l =
5031 hnae3_get_field(vector_id,
5032 HCLGE_VECTOR_ID_L_M,
5033 HCLGE_VECTOR_ID_L_S);
5034 req->int_vector_id_h =
5035 hnae3_get_field(vector_id,
5036 HCLGE_VECTOR_ID_H_M,
5037 HCLGE_VECTOR_ID_H_S);
5042 req->int_cause_num = i;
5043 req->vfid = vport->vport_id;
5044 status = hclge_cmd_send(&hdev->hw, &desc, 1);
5046 dev_err(&hdev->pdev->dev,
5047 "Map TQP fail, status is %d.\n", status);
5055 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
5056 struct hnae3_ring_chain_node *ring_chain)
5058 struct hclge_vport *vport = hclge_get_vport(handle);
5059 struct hclge_dev *hdev = vport->back;
5062 vector_id = hclge_get_vector_index(hdev, vector);
5063 if (vector_id < 0) {
5064 dev_err(&hdev->pdev->dev,
5065 "failed to get vector index. vector=%d\n", vector);
5069 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
5072 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
5073 struct hnae3_ring_chain_node *ring_chain)
5075 struct hclge_vport *vport = hclge_get_vport(handle);
5076 struct hclge_dev *hdev = vport->back;
5079 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
5082 vector_id = hclge_get_vector_index(hdev, vector);
5083 if (vector_id < 0) {
5084 dev_err(&handle->pdev->dev,
5085 "Get vector index fail. ret =%d\n", vector_id);
5089 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
5091 dev_err(&handle->pdev->dev,
5092 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5098 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5099 bool en_uc, bool en_mc, bool en_bc)
5101 struct hclge_vport *vport = &hdev->vport[vf_id];
5102 struct hnae3_handle *handle = &vport->nic;
5103 struct hclge_promisc_cfg_cmd *req;
5104 struct hclge_desc desc;
5105 bool uc_tx_en = en_uc;
5109 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5111 req = (struct hclge_promisc_cfg_cmd *)desc.data;
5114 if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5117 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5118 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5119 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5120 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5121 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5122 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5123 req->extend_promisc = promisc_cfg;
5125 /* to be compatible with DEVICE_VERSION_V1/2 */
5127 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5128 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5129 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5130 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5131 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5132 req->promisc = promisc_cfg;
5134 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5136 dev_err(&hdev->pdev->dev,
5137 "failed to set vport %u promisc mode, ret = %d.\n",
5143 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5144 bool en_mc_pmc, bool en_bc_pmc)
5146 return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5147 en_uc_pmc, en_mc_pmc, en_bc_pmc);
5150 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5153 struct hclge_vport *vport = hclge_get_vport(handle);
5154 struct hclge_dev *hdev = vport->back;
5155 bool en_bc_pmc = true;
5157 /* For device whose version below V2, if broadcast promisc enabled,
5158 * vlan filter is always bypassed. So broadcast promisc should be
5159 * disabled until user enable promisc mode
5161 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5162 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5164 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5168 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5170 struct hclge_vport *vport = hclge_get_vport(handle);
5172 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
5175 static void hclge_sync_fd_state(struct hclge_dev *hdev)
5177 if (hlist_empty(&hdev->fd_rule_list))
5178 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5181 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
5183 if (!test_bit(location, hdev->fd_bmap)) {
5184 set_bit(location, hdev->fd_bmap);
5185 hdev->hclge_fd_rule_num++;
5189 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
5191 if (test_bit(location, hdev->fd_bmap)) {
5192 clear_bit(location, hdev->fd_bmap);
5193 hdev->hclge_fd_rule_num--;
5197 static void hclge_fd_free_node(struct hclge_dev *hdev,
5198 struct hclge_fd_rule *rule)
5200 hlist_del(&rule->rule_node);
5202 hclge_sync_fd_state(hdev);
5205 static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5206 struct hclge_fd_rule *old_rule,
5207 struct hclge_fd_rule *new_rule,
5208 enum HCLGE_FD_NODE_STATE state)
5211 case HCLGE_FD_TO_ADD:
5212 case HCLGE_FD_ACTIVE:
5213 /* 1) if the new state is TO_ADD, just replace the old rule
5214 * with the same location, no matter its state, because the
5215 * new rule will be configured to the hardware.
5216 * 2) if the new state is ACTIVE, it means the new rule
5217 * has been configured to the hardware, so just replace
5218 * the old rule node with the same location.
5219 * 3) for it doesn't add a new node to the list, so it's
5220 * unnecessary to update the rule number and fd_bmap.
5222 new_rule->rule_node.next = old_rule->rule_node.next;
5223 new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5224 memcpy(old_rule, new_rule, sizeof(*old_rule));
5227 case HCLGE_FD_DELETED:
5228 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5229 hclge_fd_free_node(hdev, old_rule);
5231 case HCLGE_FD_TO_DEL:
5232 /* if new request is TO_DEL, and old rule is existent
5233 * 1) the state of old rule is TO_DEL, we need do nothing,
5234 * because we delete rule by location, other rule content
5236 * 2) the state of old rule is ACTIVE, we need to change its
5237 * state to TO_DEL, so the rule will be deleted when periodic
5238 * task being scheduled.
5239 * 3) the state of old rule is TO_ADD, it means the rule hasn't
5240 * been added to hardware, so we just delete the rule node from
5241 * fd_rule_list directly.
5243 if (old_rule->state == HCLGE_FD_TO_ADD) {
5244 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5245 hclge_fd_free_node(hdev, old_rule);
5248 old_rule->state = HCLGE_FD_TO_DEL;
5253 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5255 struct hclge_fd_rule **parent)
5257 struct hclge_fd_rule *rule;
5258 struct hlist_node *node;
5260 hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5261 if (rule->location == location)
5263 else if (rule->location > location)
5265 /* record the parent node, use to keep the nodes in fd_rule_list
5274 /* insert fd rule node in ascend order according to rule->location */
5275 static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5276 struct hclge_fd_rule *rule,
5277 struct hclge_fd_rule *parent)
5279 INIT_HLIST_NODE(&rule->rule_node);
5282 hlist_add_behind(&rule->rule_node, &parent->rule_node);
5284 hlist_add_head(&rule->rule_node, hlist);
5287 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5288 struct hclge_fd_user_def_cfg *cfg)
5290 struct hclge_fd_user_def_cfg_cmd *req;
5291 struct hclge_desc desc;
5295 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5297 req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5299 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5300 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5301 HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5302 req->ol2_cfg = cpu_to_le16(data);
5305 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5306 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5307 HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5308 req->ol3_cfg = cpu_to_le16(data);
5311 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5312 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5313 HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5314 req->ol4_cfg = cpu_to_le16(data);
5316 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5318 dev_err(&hdev->pdev->dev,
5319 "failed to set fd user def data, ret= %d\n", ret);
5323 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5327 if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5331 spin_lock_bh(&hdev->fd_rule_lock);
5333 ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5335 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5338 spin_unlock_bh(&hdev->fd_rule_lock);
5341 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5342 struct hclge_fd_rule *rule)
5344 struct hlist_head *hlist = &hdev->fd_rule_list;
5345 struct hclge_fd_rule *fd_rule, *parent = NULL;
5346 struct hclge_fd_user_def_info *info, *old_info;
5347 struct hclge_fd_user_def_cfg *cfg;
5349 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5350 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5353 /* for valid layer is start from 1, so need minus 1 to get the cfg */
5354 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5355 info = &rule->ep.user_def;
5357 if (!cfg->ref_cnt || cfg->offset == info->offset)
5360 if (cfg->ref_cnt > 1)
5363 fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5365 old_info = &fd_rule->ep.user_def;
5366 if (info->layer == old_info->layer)
5371 dev_err(&hdev->pdev->dev,
5372 "No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5377 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5378 struct hclge_fd_rule *rule)
5380 struct hclge_fd_user_def_cfg *cfg;
5382 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5383 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5386 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5387 if (!cfg->ref_cnt) {
5388 cfg->offset = rule->ep.user_def.offset;
5389 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5394 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5395 struct hclge_fd_rule *rule)
5397 struct hclge_fd_user_def_cfg *cfg;
5399 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5400 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5403 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5408 if (!cfg->ref_cnt) {
5410 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5414 static void hclge_update_fd_list(struct hclge_dev *hdev,
5415 enum HCLGE_FD_NODE_STATE state, u16 location,
5416 struct hclge_fd_rule *new_rule)
5418 struct hlist_head *hlist = &hdev->fd_rule_list;
5419 struct hclge_fd_rule *fd_rule, *parent = NULL;
5421 fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5423 hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5424 if (state == HCLGE_FD_ACTIVE)
5425 hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5426 hclge_sync_fd_user_def_cfg(hdev, true);
5428 hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5432 /* it's unlikely to fail here, because we have checked the rule
5435 if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5436 dev_warn(&hdev->pdev->dev,
5437 "failed to delete fd rule %u, it's inexistent\n",
5442 hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5443 hclge_sync_fd_user_def_cfg(hdev, true);
5445 hclge_fd_insert_rule_node(hlist, new_rule, parent);
5446 hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5448 if (state == HCLGE_FD_TO_ADD) {
5449 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5450 hclge_task_schedule(hdev, 0);
5454 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5456 struct hclge_get_fd_mode_cmd *req;
5457 struct hclge_desc desc;
5460 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5462 req = (struct hclge_get_fd_mode_cmd *)desc.data;
5464 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5466 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5470 *fd_mode = req->mode;
5475 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5476 u32 *stage1_entry_num,
5477 u32 *stage2_entry_num,
5478 u16 *stage1_counter_num,
5479 u16 *stage2_counter_num)
5481 struct hclge_get_fd_allocation_cmd *req;
5482 struct hclge_desc desc;
5485 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5487 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5489 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5491 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5496 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5497 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5498 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5499 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5504 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5505 enum HCLGE_FD_STAGE stage_num)
5507 struct hclge_set_fd_key_config_cmd *req;
5508 struct hclge_fd_key_cfg *stage;
5509 struct hclge_desc desc;
5512 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5514 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5515 stage = &hdev->fd_cfg.key_cfg[stage_num];
5516 req->stage = stage_num;
5517 req->key_select = stage->key_sel;
5518 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5519 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5520 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5521 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5522 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5523 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5525 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5527 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5532 static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5534 struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5536 spin_lock_bh(&hdev->fd_rule_lock);
5537 memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5538 spin_unlock_bh(&hdev->fd_rule_lock);
5540 hclge_fd_set_user_def_cmd(hdev, cfg);
5543 static int hclge_init_fd_config(struct hclge_dev *hdev)
5545 #define LOW_2_WORDS 0x03
5546 struct hclge_fd_key_cfg *key_cfg;
5549 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
5552 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5556 switch (hdev->fd_cfg.fd_mode) {
5557 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5558 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5560 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5561 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5564 dev_err(&hdev->pdev->dev,
5565 "Unsupported flow director mode %u\n",
5566 hdev->fd_cfg.fd_mode);
5570 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5571 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5572 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5573 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5574 key_cfg->outer_sipv6_word_en = 0;
5575 key_cfg->outer_dipv6_word_en = 0;
5577 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5578 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5579 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5580 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5582 /* If use max 400bit key, we can support tuples for ether type */
5583 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5584 key_cfg->tuple_active |=
5585 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5586 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5587 key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5590 /* roce_type is used to filter roce frames
5591 * dst_vport is used to specify the rule
5593 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5595 ret = hclge_get_fd_allocation(hdev,
5596 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5597 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5598 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5599 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5603 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5606 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5607 int loc, u8 *key, bool is_add)
5609 struct hclge_fd_tcam_config_1_cmd *req1;
5610 struct hclge_fd_tcam_config_2_cmd *req2;
5611 struct hclge_fd_tcam_config_3_cmd *req3;
5612 struct hclge_desc desc[3];
5615 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5616 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
5617 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5618 desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
5619 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5621 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5622 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5623 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5625 req1->stage = stage;
5626 req1->xy_sel = sel_x ? 1 : 0;
5627 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5628 req1->index = cpu_to_le32(loc);
5629 req1->entry_vld = sel_x ? is_add : 0;
5632 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5633 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5634 sizeof(req2->tcam_data));
5635 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5636 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5639 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5641 dev_err(&hdev->pdev->dev,
5642 "config tcam key fail, ret=%d\n",
5648 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5649 struct hclge_fd_ad_data *action)
5651 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5652 struct hclge_fd_ad_config_cmd *req;
5653 struct hclge_desc desc;
5657 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5659 req = (struct hclge_fd_ad_config_cmd *)desc.data;
5660 req->index = cpu_to_le32(loc);
5663 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5664 action->write_rule_id_to_bd);
5665 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5667 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5668 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5669 action->override_tc);
5670 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5671 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5674 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5675 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5676 action->forward_to_direct_queue);
5677 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5679 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5680 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5681 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5682 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5683 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5684 action->counter_id);
5686 req->ad_data = cpu_to_le64(ad_data);
5687 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5689 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5694 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5695 struct hclge_fd_rule *rule)
5697 int offset, moffset, ip_offset;
5698 enum HCLGE_FD_KEY_OPT key_opt;
5699 u16 tmp_x_s, tmp_y_s;
5700 u32 tmp_x_l, tmp_y_l;
5704 if (rule->unused_tuple & BIT(tuple_bit))
5707 key_opt = tuple_key_info[tuple_bit].key_opt;
5708 offset = tuple_key_info[tuple_bit].offset;
5709 moffset = tuple_key_info[tuple_bit].moffset;
5713 calc_x(*key_x, p[offset], p[moffset]);
5714 calc_y(*key_y, p[offset], p[moffset]);
5718 calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5719 calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5720 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5721 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5725 calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5726 calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5727 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5728 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5732 for (i = 0; i < ETH_ALEN; i++) {
5733 calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
5735 calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
5741 ip_offset = IPV4_INDEX * sizeof(u32);
5742 calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
5743 *(u32 *)(&p[moffset + ip_offset]));
5744 calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
5745 *(u32 *)(&p[moffset + ip_offset]));
5746 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5747 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5755 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5756 u8 vf_id, u8 network_port_id)
5758 u32 port_number = 0;
5760 if (port_type == HOST_PORT) {
5761 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5763 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5765 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5767 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5768 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5769 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5775 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5776 __le32 *key_x, __le32 *key_y,
5777 struct hclge_fd_rule *rule)
5779 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5780 u8 cur_pos = 0, tuple_size, shift_bits;
5783 for (i = 0; i < MAX_META_DATA; i++) {
5784 tuple_size = meta_data_key_info[i].key_length;
5785 tuple_bit = key_cfg->meta_data_active & BIT(i);
5787 switch (tuple_bit) {
5788 case BIT(ROCE_TYPE):
5789 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5790 cur_pos += tuple_size;
5792 case BIT(DST_VPORT):
5793 port_number = hclge_get_port_number(HOST_PORT, 0,
5795 hnae3_set_field(meta_data,
5796 GENMASK(cur_pos + tuple_size, cur_pos),
5797 cur_pos, port_number);
5798 cur_pos += tuple_size;
5805 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5806 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5807 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5809 *key_x = cpu_to_le32(tmp_x << shift_bits);
5810 *key_y = cpu_to_le32(tmp_y << shift_bits);
5813 /* A complete key is combined with meta data key and tuple key.
5814 * Meta data key is stored at the MSB region, and tuple key is stored at
5815 * the LSB region, unused bits will be filled 0.
5817 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5818 struct hclge_fd_rule *rule)
5820 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5821 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5822 u8 *cur_key_x, *cur_key_y;
5823 u8 meta_data_region;
5828 memset(key_x, 0, sizeof(key_x));
5829 memset(key_y, 0, sizeof(key_y));
5833 for (i = 0; i < MAX_TUPLE; i++) {
5836 tuple_size = tuple_key_info[i].key_length / 8;
5837 if (!(key_cfg->tuple_active & BIT(i)))
5840 tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
5843 cur_key_x += tuple_size;
5844 cur_key_y += tuple_size;
5848 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5849 MAX_META_DATA_LENGTH / 8;
5851 hclge_fd_convert_meta_data(key_cfg,
5852 (__le32 *)(key_x + meta_data_region),
5853 (__le32 *)(key_y + meta_data_region),
5856 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5859 dev_err(&hdev->pdev->dev,
5860 "fd key_y config fail, loc=%u, ret=%d\n",
5861 rule->queue_id, ret);
5865 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5868 dev_err(&hdev->pdev->dev,
5869 "fd key_x config fail, loc=%u, ret=%d\n",
5870 rule->queue_id, ret);
5874 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5875 struct hclge_fd_rule *rule)
5877 struct hclge_vport *vport = hdev->vport;
5878 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5879 struct hclge_fd_ad_data ad_data;
5881 memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5882 ad_data.ad_id = rule->location;
5884 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5885 ad_data.drop_packet = true;
5886 } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5887 ad_data.override_tc = true;
5889 kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5891 ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
5893 ad_data.forward_to_direct_queue = true;
5894 ad_data.queue_id = rule->queue_id;
5897 if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) {
5898 ad_data.use_counter = true;
5899 ad_data.counter_id = rule->vf_id %
5900 hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1];
5902 ad_data.use_counter = false;
5903 ad_data.counter_id = 0;
5906 ad_data.use_next_stage = false;
5907 ad_data.next_input_key = 0;
5909 ad_data.write_rule_id_to_bd = true;
5910 ad_data.rule_id = rule->location;
5912 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5915 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5918 if (!spec || !unused_tuple)
5921 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5924 *unused_tuple |= BIT(INNER_SRC_IP);
5927 *unused_tuple |= BIT(INNER_DST_IP);
5930 *unused_tuple |= BIT(INNER_SRC_PORT);
5933 *unused_tuple |= BIT(INNER_DST_PORT);
5936 *unused_tuple |= BIT(INNER_IP_TOS);
5941 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5944 if (!spec || !unused_tuple)
5947 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5948 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5951 *unused_tuple |= BIT(INNER_SRC_IP);
5954 *unused_tuple |= BIT(INNER_DST_IP);
5957 *unused_tuple |= BIT(INNER_IP_TOS);
5960 *unused_tuple |= BIT(INNER_IP_PROTO);
5962 if (spec->l4_4_bytes)
5965 if (spec->ip_ver != ETH_RX_NFC_IP4)
5971 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5974 if (!spec || !unused_tuple)
5977 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5979 /* check whether src/dst ip address used */
5980 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5981 *unused_tuple |= BIT(INNER_SRC_IP);
5983 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5984 *unused_tuple |= BIT(INNER_DST_IP);
5987 *unused_tuple |= BIT(INNER_SRC_PORT);
5990 *unused_tuple |= BIT(INNER_DST_PORT);
5993 *unused_tuple |= BIT(INNER_IP_TOS);
5998 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
6001 if (!spec || !unused_tuple)
6004 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6005 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
6007 /* check whether src/dst ip address used */
6008 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
6009 *unused_tuple |= BIT(INNER_SRC_IP);
6011 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
6012 *unused_tuple |= BIT(INNER_DST_IP);
6014 if (!spec->l4_proto)
6015 *unused_tuple |= BIT(INNER_IP_PROTO);
6018 *unused_tuple |= BIT(INNER_IP_TOS);
6020 if (spec->l4_4_bytes)
6026 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
6028 if (!spec || !unused_tuple)
6031 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
6032 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
6033 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
6035 if (is_zero_ether_addr(spec->h_source))
6036 *unused_tuple |= BIT(INNER_SRC_MAC);
6038 if (is_zero_ether_addr(spec->h_dest))
6039 *unused_tuple |= BIT(INNER_DST_MAC);
6042 *unused_tuple |= BIT(INNER_ETH_TYPE);
6047 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
6048 struct ethtool_rx_flow_spec *fs,
6051 if (fs->flow_type & FLOW_EXT) {
6052 if (fs->h_ext.vlan_etype) {
6053 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
6057 if (!fs->h_ext.vlan_tci)
6058 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6060 if (fs->m_ext.vlan_tci &&
6061 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
6062 dev_err(&hdev->pdev->dev,
6063 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
6064 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
6068 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
6071 if (fs->flow_type & FLOW_MAC_EXT) {
6072 if (hdev->fd_cfg.fd_mode !=
6073 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6074 dev_err(&hdev->pdev->dev,
6075 "FLOW_MAC_EXT is not supported in current fd mode!\n");
6079 if (is_zero_ether_addr(fs->h_ext.h_dest))
6080 *unused_tuple |= BIT(INNER_DST_MAC);
6082 *unused_tuple &= ~BIT(INNER_DST_MAC);
6088 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
6089 struct hclge_fd_user_def_info *info)
6091 switch (flow_type) {
6093 info->layer = HCLGE_FD_USER_DEF_L2;
6094 *unused_tuple &= ~BIT(INNER_L2_RSV);
6097 case IPV6_USER_FLOW:
6098 info->layer = HCLGE_FD_USER_DEF_L3;
6099 *unused_tuple &= ~BIT(INNER_L3_RSV);
6105 info->layer = HCLGE_FD_USER_DEF_L4;
6106 *unused_tuple &= ~BIT(INNER_L4_RSV);
6115 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
6117 return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
6120 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
6121 struct ethtool_rx_flow_spec *fs,
6123 struct hclge_fd_user_def_info *info)
6125 u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
6126 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6127 u16 data, offset, data_mask, offset_mask;
6130 info->layer = HCLGE_FD_USER_DEF_NONE;
6131 *unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
6133 if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
6136 /* user-def data from ethtool is 64 bit value, the bit0~15 is used
6137 * for data, and bit32~47 is used for offset.
6139 data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6140 data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6141 offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6142 offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6144 if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
6145 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
6149 if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
6150 dev_err(&hdev->pdev->dev,
6151 "user-def offset[%u] should be no more than %u\n",
6152 offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
6156 if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
6157 dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
6161 ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
6163 dev_err(&hdev->pdev->dev,
6164 "unsupported flow type for user-def bytes, ret = %d\n",
6170 info->data_mask = data_mask;
6171 info->offset = offset;
6176 static int hclge_fd_check_spec(struct hclge_dev *hdev,
6177 struct ethtool_rx_flow_spec *fs,
6179 struct hclge_fd_user_def_info *info)
6184 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6185 dev_err(&hdev->pdev->dev,
6186 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
6188 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
6192 ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
6196 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6197 switch (flow_type) {
6201 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
6205 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
6211 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6214 case IPV6_USER_FLOW:
6215 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6219 if (hdev->fd_cfg.fd_mode !=
6220 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6221 dev_err(&hdev->pdev->dev,
6222 "ETHER_FLOW is not supported in current fd mode!\n");
6226 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6230 dev_err(&hdev->pdev->dev,
6231 "unsupported protocol type, protocol type = %#x\n",
6237 dev_err(&hdev->pdev->dev,
6238 "failed to check flow union tuple, ret = %d\n",
6243 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6246 static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
6247 struct ethtool_rx_flow_spec *fs,
6248 struct hclge_fd_rule *rule, u8 ip_proto)
6250 rule->tuples.src_ip[IPV4_INDEX] =
6251 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6252 rule->tuples_mask.src_ip[IPV4_INDEX] =
6253 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6255 rule->tuples.dst_ip[IPV4_INDEX] =
6256 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6257 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6258 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6260 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6261 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6263 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6264 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6266 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6267 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6269 rule->tuples.ether_proto = ETH_P_IP;
6270 rule->tuples_mask.ether_proto = 0xFFFF;
6272 rule->tuples.ip_proto = ip_proto;
6273 rule->tuples_mask.ip_proto = 0xFF;
6276 static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
6277 struct ethtool_rx_flow_spec *fs,
6278 struct hclge_fd_rule *rule)
6280 rule->tuples.src_ip[IPV4_INDEX] =
6281 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6282 rule->tuples_mask.src_ip[IPV4_INDEX] =
6283 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6285 rule->tuples.dst_ip[IPV4_INDEX] =
6286 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6287 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6288 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6290 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6291 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6293 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6294 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6296 rule->tuples.ether_proto = ETH_P_IP;
6297 rule->tuples_mask.ether_proto = 0xFFFF;
6300 static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
6301 struct ethtool_rx_flow_spec *fs,
6302 struct hclge_fd_rule *rule, u8 ip_proto)
6304 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
6306 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6309 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6311 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6314 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6315 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6317 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6318 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6320 rule->tuples.ether_proto = ETH_P_IPV6;
6321 rule->tuples_mask.ether_proto = 0xFFFF;
6323 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6324 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6326 rule->tuples.ip_proto = ip_proto;
6327 rule->tuples_mask.ip_proto = 0xFF;
6330 static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
6331 struct ethtool_rx_flow_spec *fs,
6332 struct hclge_fd_rule *rule)
6334 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6336 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6339 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6341 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6344 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6345 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6347 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6348 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6350 rule->tuples.ether_proto = ETH_P_IPV6;
6351 rule->tuples_mask.ether_proto = 0xFFFF;
6354 static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
6355 struct ethtool_rx_flow_spec *fs,
6356 struct hclge_fd_rule *rule)
6358 ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6359 ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6361 ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6362 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6364 rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6365 rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6368 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6369 struct hclge_fd_rule *rule)
6371 switch (info->layer) {
6372 case HCLGE_FD_USER_DEF_L2:
6373 rule->tuples.l2_user_def = info->data;
6374 rule->tuples_mask.l2_user_def = info->data_mask;
6376 case HCLGE_FD_USER_DEF_L3:
6377 rule->tuples.l3_user_def = info->data;
6378 rule->tuples_mask.l3_user_def = info->data_mask;
6380 case HCLGE_FD_USER_DEF_L4:
6381 rule->tuples.l4_user_def = (u32)info->data << 16;
6382 rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6388 rule->ep.user_def = *info;
6391 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
6392 struct ethtool_rx_flow_spec *fs,
6393 struct hclge_fd_rule *rule,
6394 struct hclge_fd_user_def_info *info)
6396 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6398 switch (flow_type) {
6400 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
6403 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
6406 hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
6409 hclge_fd_get_ip4_tuple(hdev, fs, rule);
6412 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
6415 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
6418 hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
6420 case IPV6_USER_FLOW:
6421 hclge_fd_get_ip6_tuple(hdev, fs, rule);
6424 hclge_fd_get_ether_tuple(hdev, fs, rule);
6430 if (fs->flow_type & FLOW_EXT) {
6431 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6432 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6433 hclge_fd_get_user_def_tuple(info, rule);
6436 if (fs->flow_type & FLOW_MAC_EXT) {
6437 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6438 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6444 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6445 struct hclge_fd_rule *rule)
6449 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6453 return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6456 static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6457 struct hclge_fd_rule *rule)
6461 spin_lock_bh(&hdev->fd_rule_lock);
6463 if (hdev->fd_active_type != rule->rule_type &&
6464 (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6465 hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6466 dev_err(&hdev->pdev->dev,
6467 "mode conflict(new type %d, active type %d), please delete existent rules first\n",
6468 rule->rule_type, hdev->fd_active_type);
6469 spin_unlock_bh(&hdev->fd_rule_lock);
6473 ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6477 ret = hclge_clear_arfs_rules(hdev);
6481 ret = hclge_fd_config_rule(hdev, rule);
6485 rule->state = HCLGE_FD_ACTIVE;
6486 hdev->fd_active_type = rule->rule_type;
6487 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6490 spin_unlock_bh(&hdev->fd_rule_lock);
6494 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6496 struct hclge_vport *vport = hclge_get_vport(handle);
6497 struct hclge_dev *hdev = vport->back;
6499 return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6502 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6503 u16 *vport_id, u8 *action, u16 *queue_id)
6505 struct hclge_vport *vport = hdev->vport;
6507 if (ring_cookie == RX_CLS_FLOW_DISC) {
6508 *action = HCLGE_FD_ACTION_DROP_PACKET;
6510 u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6511 u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6514 /* To keep consistent with user's configuration, minus 1 when
6515 * printing 'vf', because vf id from ethtool is added 1 for vf.
6517 if (vf > hdev->num_req_vfs) {
6518 dev_err(&hdev->pdev->dev,
6519 "Error: vf id (%u) should be less than %u\n",
6520 vf - 1U, hdev->num_req_vfs);
6524 *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6525 tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6528 dev_err(&hdev->pdev->dev,
6529 "Error: queue id (%u) > max tqp num (%u)\n",
6534 *action = HCLGE_FD_ACTION_SELECT_QUEUE;
6541 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6542 struct ethtool_rxnfc *cmd)
6544 struct hclge_vport *vport = hclge_get_vport(handle);
6545 struct hclge_dev *hdev = vport->back;
6546 struct hclge_fd_user_def_info info;
6547 u16 dst_vport_id = 0, q_index = 0;
6548 struct ethtool_rx_flow_spec *fs;
6549 struct hclge_fd_rule *rule;
6554 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
6555 dev_err(&hdev->pdev->dev,
6556 "flow table director is not supported\n");
6561 dev_err(&hdev->pdev->dev,
6562 "please enable flow director first\n");
6566 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6568 ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6572 ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6577 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6581 ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
6587 rule->flow_type = fs->flow_type;
6588 rule->location = fs->location;
6589 rule->unused_tuple = unused;
6590 rule->vf_id = dst_vport_id;
6591 rule->queue_id = q_index;
6592 rule->action = action;
6593 rule->rule_type = HCLGE_FD_EP_ACTIVE;
6595 ret = hclge_add_fd_entry_common(hdev, rule);
6602 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6603 struct ethtool_rxnfc *cmd)
6605 struct hclge_vport *vport = hclge_get_vport(handle);
6606 struct hclge_dev *hdev = vport->back;
6607 struct ethtool_rx_flow_spec *fs;
6610 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
6613 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6615 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6618 spin_lock_bh(&hdev->fd_rule_lock);
6619 if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6620 !test_bit(fs->location, hdev->fd_bmap)) {
6621 dev_err(&hdev->pdev->dev,
6622 "Delete fail, rule %u is inexistent\n", fs->location);
6623 spin_unlock_bh(&hdev->fd_rule_lock);
6627 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6632 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6635 spin_unlock_bh(&hdev->fd_rule_lock);
6639 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6642 struct hclge_fd_rule *rule;
6643 struct hlist_node *node;
6646 spin_lock_bh(&hdev->fd_rule_lock);
6648 for_each_set_bit(location, hdev->fd_bmap,
6649 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6650 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6654 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6656 hlist_del(&rule->rule_node);
6659 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6660 hdev->hclge_fd_rule_num = 0;
6661 bitmap_zero(hdev->fd_bmap,
6662 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6665 spin_unlock_bh(&hdev->fd_rule_lock);
6668 static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6670 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
6673 hclge_clear_fd_rules_in_list(hdev, true);
6674 hclge_fd_disable_user_def(hdev);
6677 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6679 struct hclge_vport *vport = hclge_get_vport(handle);
6680 struct hclge_dev *hdev = vport->back;
6681 struct hclge_fd_rule *rule;
6682 struct hlist_node *node;
6684 /* Return ok here, because reset error handling will check this
6685 * return value. If error is returned here, the reset process will
6688 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
6691 /* if fd is disabled, should not restore it when reset */
6695 spin_lock_bh(&hdev->fd_rule_lock);
6696 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6697 if (rule->state == HCLGE_FD_ACTIVE)
6698 rule->state = HCLGE_FD_TO_ADD;
6700 spin_unlock_bh(&hdev->fd_rule_lock);
6701 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6706 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6707 struct ethtool_rxnfc *cmd)
6709 struct hclge_vport *vport = hclge_get_vport(handle);
6710 struct hclge_dev *hdev = vport->back;
6712 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev) || hclge_is_cls_flower_active(handle))
6715 cmd->rule_cnt = hdev->hclge_fd_rule_num;
6716 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6721 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6722 struct ethtool_tcpip4_spec *spec,
6723 struct ethtool_tcpip4_spec *spec_mask)
6725 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6726 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6727 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6729 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6730 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6731 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6733 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6734 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6735 0 : cpu_to_be16(rule->tuples_mask.src_port);
6737 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6738 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6739 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6741 spec->tos = rule->tuples.ip_tos;
6742 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6743 0 : rule->tuples_mask.ip_tos;
6746 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6747 struct ethtool_usrip4_spec *spec,
6748 struct ethtool_usrip4_spec *spec_mask)
6750 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6751 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6752 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6754 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6755 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6756 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6758 spec->tos = rule->tuples.ip_tos;
6759 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6760 0 : rule->tuples_mask.ip_tos;
6762 spec->proto = rule->tuples.ip_proto;
6763 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6764 0 : rule->tuples_mask.ip_proto;
6766 spec->ip_ver = ETH_RX_NFC_IP4;
6769 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6770 struct ethtool_tcpip6_spec *spec,
6771 struct ethtool_tcpip6_spec *spec_mask)
6773 cpu_to_be32_array(spec->ip6src,
6774 rule->tuples.src_ip, IPV6_SIZE);
6775 cpu_to_be32_array(spec->ip6dst,
6776 rule->tuples.dst_ip, IPV6_SIZE);
6777 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6778 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6780 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6783 if (rule->unused_tuple & BIT(INNER_DST_IP))
6784 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6786 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6789 spec->tclass = rule->tuples.ip_tos;
6790 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6791 0 : rule->tuples_mask.ip_tos;
6793 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6794 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6795 0 : cpu_to_be16(rule->tuples_mask.src_port);
6797 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6798 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6799 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6802 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6803 struct ethtool_usrip6_spec *spec,
6804 struct ethtool_usrip6_spec *spec_mask)
6806 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6807 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6808 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6809 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6811 cpu_to_be32_array(spec_mask->ip6src,
6812 rule->tuples_mask.src_ip, IPV6_SIZE);
6814 if (rule->unused_tuple & BIT(INNER_DST_IP))
6815 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6817 cpu_to_be32_array(spec_mask->ip6dst,
6818 rule->tuples_mask.dst_ip, IPV6_SIZE);
6820 spec->tclass = rule->tuples.ip_tos;
6821 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6822 0 : rule->tuples_mask.ip_tos;
6824 spec->l4_proto = rule->tuples.ip_proto;
6825 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6826 0 : rule->tuples_mask.ip_proto;
6829 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6830 struct ethhdr *spec,
6831 struct ethhdr *spec_mask)
6833 ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6834 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6836 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6837 eth_zero_addr(spec_mask->h_source);
6839 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6841 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6842 eth_zero_addr(spec_mask->h_dest);
6844 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6846 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6847 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6848 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6851 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
6852 struct hclge_fd_rule *rule)
6854 if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
6855 HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
6856 fs->h_ext.data[0] = 0;
6857 fs->h_ext.data[1] = 0;
6858 fs->m_ext.data[0] = 0;
6859 fs->m_ext.data[1] = 0;
6861 fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
6862 fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
6864 cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
6865 fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
6869 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6870 struct hclge_fd_rule *rule)
6872 if (fs->flow_type & FLOW_EXT) {
6873 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6874 fs->m_ext.vlan_tci =
6875 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6876 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6878 hclge_fd_get_user_def_info(fs, rule);
6881 if (fs->flow_type & FLOW_MAC_EXT) {
6882 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6883 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6884 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6886 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6887 rule->tuples_mask.dst_mac);
6891 static struct hclge_fd_rule *hclge_get_fd_rule(struct hclge_dev *hdev,
6894 struct hclge_fd_rule *rule = NULL;
6895 struct hlist_node *node2;
6897 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6898 if (rule->location == location)
6900 else if (rule->location > location)
6907 static void hclge_fd_get_ring_cookie(struct ethtool_rx_flow_spec *fs,
6908 struct hclge_fd_rule *rule)
6910 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6911 fs->ring_cookie = RX_CLS_FLOW_DISC;
6915 fs->ring_cookie = rule->queue_id;
6916 vf_id = rule->vf_id;
6917 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6918 fs->ring_cookie |= vf_id;
6922 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6923 struct ethtool_rxnfc *cmd)
6925 struct hclge_vport *vport = hclge_get_vport(handle);
6926 struct hclge_fd_rule *rule = NULL;
6927 struct hclge_dev *hdev = vport->back;
6928 struct ethtool_rx_flow_spec *fs;
6930 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
6933 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6935 spin_lock_bh(&hdev->fd_rule_lock);
6937 rule = hclge_get_fd_rule(hdev, fs->location);
6939 spin_unlock_bh(&hdev->fd_rule_lock);
6943 fs->flow_type = rule->flow_type;
6944 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6948 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6949 &fs->m_u.tcp_ip4_spec);
6952 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6953 &fs->m_u.usr_ip4_spec);
6958 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6959 &fs->m_u.tcp_ip6_spec);
6961 case IPV6_USER_FLOW:
6962 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6963 &fs->m_u.usr_ip6_spec);
6965 /* The flow type of fd rule has been checked before adding in to rule
6966 * list. As other flow types have been handled, it must be ETHER_FLOW
6967 * for the default case
6970 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6971 &fs->m_u.ether_spec);
6975 hclge_fd_get_ext_info(fs, rule);
6977 hclge_fd_get_ring_cookie(fs, rule);
6979 spin_unlock_bh(&hdev->fd_rule_lock);
6984 static int hclge_get_all_rules(struct hnae3_handle *handle,
6985 struct ethtool_rxnfc *cmd, u32 *rule_locs)
6987 struct hclge_vport *vport = hclge_get_vport(handle);
6988 struct hclge_dev *hdev = vport->back;
6989 struct hclge_fd_rule *rule;
6990 struct hlist_node *node2;
6993 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
6996 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6998 spin_lock_bh(&hdev->fd_rule_lock);
6999 hlist_for_each_entry_safe(rule, node2,
7000 &hdev->fd_rule_list, rule_node) {
7001 if (cnt == cmd->rule_cnt) {
7002 spin_unlock_bh(&hdev->fd_rule_lock);
7006 if (rule->state == HCLGE_FD_TO_DEL)
7009 rule_locs[cnt] = rule->location;
7013 spin_unlock_bh(&hdev->fd_rule_lock);
7015 cmd->rule_cnt = cnt;
7020 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
7021 struct hclge_fd_rule_tuples *tuples)
7023 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
7024 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
7026 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
7027 tuples->ip_proto = fkeys->basic.ip_proto;
7028 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
7030 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
7031 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
7032 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
7036 for (i = 0; i < IPV6_SIZE; i++) {
7037 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
7038 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
7043 /* traverse all rules, check whether an existed rule has the same tuples */
7044 static struct hclge_fd_rule *
7045 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
7046 const struct hclge_fd_rule_tuples *tuples)
7048 struct hclge_fd_rule *rule = NULL;
7049 struct hlist_node *node;
7051 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7052 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
7059 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
7060 struct hclge_fd_rule *rule)
7062 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
7063 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
7064 BIT(INNER_SRC_PORT);
7067 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
7068 rule->state = HCLGE_FD_TO_ADD;
7069 if (tuples->ether_proto == ETH_P_IP) {
7070 if (tuples->ip_proto == IPPROTO_TCP)
7071 rule->flow_type = TCP_V4_FLOW;
7073 rule->flow_type = UDP_V4_FLOW;
7075 if (tuples->ip_proto == IPPROTO_TCP)
7076 rule->flow_type = TCP_V6_FLOW;
7078 rule->flow_type = UDP_V6_FLOW;
7080 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
7081 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
7084 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
7085 u16 flow_id, struct flow_keys *fkeys)
7087 struct hclge_vport *vport = hclge_get_vport(handle);
7088 struct hclge_fd_rule_tuples new_tuples = {};
7089 struct hclge_dev *hdev = vport->back;
7090 struct hclge_fd_rule *rule;
7093 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
7096 /* when there is already fd rule existed add by user,
7097 * arfs should not work
7099 spin_lock_bh(&hdev->fd_rule_lock);
7100 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
7101 hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
7102 spin_unlock_bh(&hdev->fd_rule_lock);
7106 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
7108 /* check is there flow director filter existed for this flow,
7109 * if not, create a new filter for it;
7110 * if filter exist with different queue id, modify the filter;
7111 * if filter exist with same queue id, do nothing
7113 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7115 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
7116 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7117 spin_unlock_bh(&hdev->fd_rule_lock);
7121 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
7123 spin_unlock_bh(&hdev->fd_rule_lock);
7127 rule->location = bit_id;
7128 rule->arfs.flow_id = flow_id;
7129 rule->queue_id = queue_id;
7130 hclge_fd_build_arfs_rule(&new_tuples, rule);
7131 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
7132 hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
7133 } else if (rule->queue_id != queue_id) {
7134 rule->queue_id = queue_id;
7135 rule->state = HCLGE_FD_TO_ADD;
7136 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7137 hclge_task_schedule(hdev, 0);
7139 spin_unlock_bh(&hdev->fd_rule_lock);
7140 return rule->location;
7143 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
7145 #ifdef CONFIG_RFS_ACCEL
7146 struct hnae3_handle *handle = &hdev->vport[0].nic;
7147 struct hclge_fd_rule *rule;
7148 struct hlist_node *node;
7150 spin_lock_bh(&hdev->fd_rule_lock);
7151 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
7152 spin_unlock_bh(&hdev->fd_rule_lock);
7155 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7156 if (rule->state != HCLGE_FD_ACTIVE)
7158 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
7159 rule->arfs.flow_id, rule->location)) {
7160 rule->state = HCLGE_FD_TO_DEL;
7161 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7164 spin_unlock_bh(&hdev->fd_rule_lock);
7168 /* make sure being called after lock up with fd_rule_lock */
7169 static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
7171 #ifdef CONFIG_RFS_ACCEL
7172 struct hclge_fd_rule *rule;
7173 struct hlist_node *node;
7176 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
7179 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7180 switch (rule->state) {
7181 case HCLGE_FD_TO_DEL:
7182 case HCLGE_FD_ACTIVE:
7183 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7184 rule->location, NULL, false);
7188 case HCLGE_FD_TO_ADD:
7189 hclge_fd_dec_rule_cnt(hdev, rule->location);
7190 hlist_del(&rule->rule_node);
7197 hclge_sync_fd_state(hdev);
7203 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
7204 struct hclge_fd_rule *rule)
7206 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
7207 struct flow_match_basic match;
7208 u16 ethtype_key, ethtype_mask;
7210 flow_rule_match_basic(flow, &match);
7211 ethtype_key = ntohs(match.key->n_proto);
7212 ethtype_mask = ntohs(match.mask->n_proto);
7214 if (ethtype_key == ETH_P_ALL) {
7218 rule->tuples.ether_proto = ethtype_key;
7219 rule->tuples_mask.ether_proto = ethtype_mask;
7220 rule->tuples.ip_proto = match.key->ip_proto;
7221 rule->tuples_mask.ip_proto = match.mask->ip_proto;
7223 rule->unused_tuple |= BIT(INNER_IP_PROTO);
7224 rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7228 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7229 struct hclge_fd_rule *rule)
7231 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7232 struct flow_match_eth_addrs match;
7234 flow_rule_match_eth_addrs(flow, &match);
7235 ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7236 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7237 ether_addr_copy(rule->tuples.src_mac, match.key->src);
7238 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7240 rule->unused_tuple |= BIT(INNER_DST_MAC);
7241 rule->unused_tuple |= BIT(INNER_SRC_MAC);
7245 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7246 struct hclge_fd_rule *rule)
7248 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7249 struct flow_match_vlan match;
7251 flow_rule_match_vlan(flow, &match);
7252 rule->tuples.vlan_tag1 = match.key->vlan_id |
7253 (match.key->vlan_priority << VLAN_PRIO_SHIFT);
7254 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7255 (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7257 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7261 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
7262 struct hclge_fd_rule *rule)
7266 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7267 struct flow_match_control match;
7269 flow_rule_match_control(flow, &match);
7270 addr_type = match.key->addr_type;
7273 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7274 struct flow_match_ipv4_addrs match;
7276 flow_rule_match_ipv4_addrs(flow, &match);
7277 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7278 rule->tuples_mask.src_ip[IPV4_INDEX] =
7279 be32_to_cpu(match.mask->src);
7280 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7281 rule->tuples_mask.dst_ip[IPV4_INDEX] =
7282 be32_to_cpu(match.mask->dst);
7283 } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7284 struct flow_match_ipv6_addrs match;
7286 flow_rule_match_ipv6_addrs(flow, &match);
7287 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
7289 be32_to_cpu_array(rule->tuples_mask.src_ip,
7290 match.mask->src.s6_addr32, IPV6_SIZE);
7291 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
7293 be32_to_cpu_array(rule->tuples_mask.dst_ip,
7294 match.mask->dst.s6_addr32, IPV6_SIZE);
7296 rule->unused_tuple |= BIT(INNER_SRC_IP);
7297 rule->unused_tuple |= BIT(INNER_DST_IP);
7301 static void hclge_get_cls_key_port(const struct flow_rule *flow,
7302 struct hclge_fd_rule *rule)
7304 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7305 struct flow_match_ports match;
7307 flow_rule_match_ports(flow, &match);
7309 rule->tuples.src_port = be16_to_cpu(match.key->src);
7310 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7311 rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7312 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7314 rule->unused_tuple |= BIT(INNER_SRC_PORT);
7315 rule->unused_tuple |= BIT(INNER_DST_PORT);
7319 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7320 struct flow_cls_offload *cls_flower,
7321 struct hclge_fd_rule *rule)
7323 struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7324 struct flow_dissector *dissector = flow->match.dissector;
7326 if (dissector->used_keys &
7327 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7328 BIT(FLOW_DISSECTOR_KEY_BASIC) |
7329 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7330 BIT(FLOW_DISSECTOR_KEY_VLAN) |
7331 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7332 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7333 BIT(FLOW_DISSECTOR_KEY_PORTS))) {
7334 dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
7335 dissector->used_keys);
7339 hclge_get_cls_key_basic(flow, rule);
7340 hclge_get_cls_key_mac(flow, rule);
7341 hclge_get_cls_key_vlan(flow, rule);
7342 hclge_get_cls_key_ip(flow, rule);
7343 hclge_get_cls_key_port(flow, rule);
7348 static int hclge_check_cls_flower(struct hclge_dev *hdev,
7349 struct flow_cls_offload *cls_flower, int tc)
7351 u32 prio = cls_flower->common.prio;
7353 if (tc < 0 || tc > hdev->tc_max) {
7354 dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7359 prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7360 dev_err(&hdev->pdev->dev,
7361 "prio %u should be in range[1, %u]\n",
7362 prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7366 if (test_bit(prio - 1, hdev->fd_bmap)) {
7367 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7373 static int hclge_add_cls_flower(struct hnae3_handle *handle,
7374 struct flow_cls_offload *cls_flower,
7377 struct hclge_vport *vport = hclge_get_vport(handle);
7378 struct hclge_dev *hdev = vport->back;
7379 struct hclge_fd_rule *rule;
7382 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
7383 dev_err(&hdev->pdev->dev,
7384 "cls flower is not supported\n");
7388 ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7390 dev_err(&hdev->pdev->dev,
7391 "failed to check cls flower params, ret = %d\n", ret);
7395 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7399 ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7405 rule->action = HCLGE_FD_ACTION_SELECT_TC;
7406 rule->cls_flower.tc = tc;
7407 rule->location = cls_flower->common.prio - 1;
7409 rule->cls_flower.cookie = cls_flower->cookie;
7410 rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7412 ret = hclge_add_fd_entry_common(hdev, rule);
7419 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7420 unsigned long cookie)
7422 struct hclge_fd_rule *rule;
7423 struct hlist_node *node;
7425 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7426 if (rule->cls_flower.cookie == cookie)
7433 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7434 struct flow_cls_offload *cls_flower)
7436 struct hclge_vport *vport = hclge_get_vport(handle);
7437 struct hclge_dev *hdev = vport->back;
7438 struct hclge_fd_rule *rule;
7441 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
7444 spin_lock_bh(&hdev->fd_rule_lock);
7446 rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7448 spin_unlock_bh(&hdev->fd_rule_lock);
7452 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7455 spin_unlock_bh(&hdev->fd_rule_lock);
7459 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7460 spin_unlock_bh(&hdev->fd_rule_lock);
7465 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7467 struct hclge_fd_rule *rule;
7468 struct hlist_node *node;
7471 if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7474 spin_lock_bh(&hdev->fd_rule_lock);
7476 hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7477 switch (rule->state) {
7478 case HCLGE_FD_TO_ADD:
7479 ret = hclge_fd_config_rule(hdev, rule);
7482 rule->state = HCLGE_FD_ACTIVE;
7484 case HCLGE_FD_TO_DEL:
7485 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7486 rule->location, NULL, false);
7489 hclge_fd_dec_rule_cnt(hdev, rule->location);
7490 hclge_fd_free_node(hdev, rule);
7499 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7501 spin_unlock_bh(&hdev->fd_rule_lock);
7504 static void hclge_sync_fd_table(struct hclge_dev *hdev)
7506 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
7509 if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7510 bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7512 hclge_clear_fd_rules_in_list(hdev, clear_list);
7515 hclge_sync_fd_user_def_cfg(hdev, false);
7517 hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7520 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7522 struct hclge_vport *vport = hclge_get_vport(handle);
7523 struct hclge_dev *hdev = vport->back;
7525 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7526 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7529 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7531 struct hclge_vport *vport = hclge_get_vport(handle);
7532 struct hclge_dev *hdev = vport->back;
7534 return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
7537 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7539 struct hclge_vport *vport = hclge_get_vport(handle);
7540 struct hclge_dev *hdev = vport->back;
7542 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7545 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7547 struct hclge_vport *vport = hclge_get_vport(handle);
7548 struct hclge_dev *hdev = vport->back;
7550 return hdev->rst_stats.hw_reset_done_cnt;
7553 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7555 struct hclge_vport *vport = hclge_get_vport(handle);
7556 struct hclge_dev *hdev = vport->back;
7558 hdev->fd_en = enable;
7561 set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7563 hclge_restore_fd_entries(handle);
7565 hclge_task_schedule(hdev, 0);
7568 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7570 struct hclge_desc desc;
7571 struct hclge_config_mac_mode_cmd *req =
7572 (struct hclge_config_mac_mode_cmd *)desc.data;
7576 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7579 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7580 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7581 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7582 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7583 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7584 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7585 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7586 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7587 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7588 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7591 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7593 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7595 dev_err(&hdev->pdev->dev,
7596 "mac enable fail, ret =%d.\n", ret);
7599 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7600 u8 switch_param, u8 param_mask)
7602 struct hclge_mac_vlan_switch_cmd *req;
7603 struct hclge_desc desc;
7607 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7608 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7610 /* read current config parameter */
7611 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7613 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7614 req->func_id = cpu_to_le32(func_id);
7616 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7618 dev_err(&hdev->pdev->dev,
7619 "read mac vlan switch parameter fail, ret = %d\n", ret);
7623 /* modify and write new config parameter */
7624 hclge_comm_cmd_reuse_desc(&desc, false);
7625 req->switch_param = (req->switch_param & param_mask) | switch_param;
7626 req->param_mask = param_mask;
7628 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7630 dev_err(&hdev->pdev->dev,
7631 "set mac vlan switch parameter fail, ret = %d\n", ret);
7635 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7638 #define HCLGE_PHY_LINK_STATUS_NUM 200
7640 struct phy_device *phydev = hdev->hw.mac.phydev;
7645 ret = phy_read_status(phydev);
7647 dev_err(&hdev->pdev->dev,
7648 "phy update link status fail, ret = %d\n", ret);
7652 if (phydev->link == link_ret)
7655 msleep(HCLGE_LINK_STATUS_MS);
7656 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7659 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7661 #define HCLGE_MAC_LINK_STATUS_NUM 100
7668 ret = hclge_get_mac_link_status(hdev, &link_status);
7671 if (link_status == link_ret)
7674 msleep(HCLGE_LINK_STATUS_MS);
7675 } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7679 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7684 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7687 hclge_phy_link_status_wait(hdev, link_ret);
7689 return hclge_mac_link_status_wait(hdev, link_ret);
7692 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7694 struct hclge_config_mac_mode_cmd *req;
7695 struct hclge_desc desc;
7699 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7700 /* 1 Read out the MAC mode config at first */
7701 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7702 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7704 dev_err(&hdev->pdev->dev,
7705 "mac loopback get fail, ret =%d.\n", ret);
7709 /* 2 Then setup the loopback flag */
7710 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7711 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7713 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7715 /* 3 Config mac work mode with loopback flag
7716 * and its original configure parameters
7718 hclge_comm_cmd_reuse_desc(&desc, false);
7719 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7721 dev_err(&hdev->pdev->dev,
7722 "mac loopback set fail, ret =%d.\n", ret);
7726 static int hclge_cfg_common_loopback_cmd_send(struct hclge_dev *hdev, bool en,
7727 enum hnae3_loop loop_mode)
7729 struct hclge_common_lb_cmd *req;
7730 struct hclge_desc desc;
7734 req = (struct hclge_common_lb_cmd *)desc.data;
7735 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7737 switch (loop_mode) {
7738 case HNAE3_LOOP_SERIAL_SERDES:
7739 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7741 case HNAE3_LOOP_PARALLEL_SERDES:
7742 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7744 case HNAE3_LOOP_PHY:
7745 loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7748 dev_err(&hdev->pdev->dev,
7749 "unsupported loopback mode %d\n", loop_mode);
7753 req->mask = loop_mode_b;
7755 req->enable = loop_mode_b;
7757 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7759 dev_err(&hdev->pdev->dev,
7760 "failed to send loopback cmd, loop_mode = %d, ret = %d\n",
7766 static int hclge_cfg_common_loopback_wait(struct hclge_dev *hdev)
7768 #define HCLGE_COMMON_LB_RETRY_MS 10
7769 #define HCLGE_COMMON_LB_RETRY_NUM 100
7771 struct hclge_common_lb_cmd *req;
7772 struct hclge_desc desc;
7776 req = (struct hclge_common_lb_cmd *)desc.data;
7779 msleep(HCLGE_COMMON_LB_RETRY_MS);
7780 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7782 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7784 dev_err(&hdev->pdev->dev,
7785 "failed to get loopback done status, ret = %d\n",
7789 } while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7790 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7792 if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7793 dev_err(&hdev->pdev->dev, "wait loopback timeout\n");
7795 } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7796 dev_err(&hdev->pdev->dev, "failed to do loopback test\n");
7803 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7804 enum hnae3_loop loop_mode)
7808 ret = hclge_cfg_common_loopback_cmd_send(hdev, en, loop_mode);
7812 return hclge_cfg_common_loopback_wait(hdev);
7815 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7816 enum hnae3_loop loop_mode)
7820 ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7824 hclge_cfg_mac_mode(hdev, en);
7826 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7828 dev_err(&hdev->pdev->dev,
7829 "serdes loopback config mac mode timeout\n");
7834 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7835 struct phy_device *phydev)
7839 if (!phydev->suspended) {
7840 ret = phy_suspend(phydev);
7845 ret = phy_resume(phydev);
7849 return phy_loopback(phydev, true);
7852 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7853 struct phy_device *phydev)
7857 ret = phy_loopback(phydev, false);
7861 return phy_suspend(phydev);
7864 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7866 struct phy_device *phydev = hdev->hw.mac.phydev;
7870 if (hnae3_dev_phy_imp_supported(hdev))
7871 return hclge_set_common_loopback(hdev, en,
7877 ret = hclge_enable_phy_loopback(hdev, phydev);
7879 ret = hclge_disable_phy_loopback(hdev, phydev);
7881 dev_err(&hdev->pdev->dev,
7882 "set phy loopback fail, ret = %d\n", ret);
7886 hclge_cfg_mac_mode(hdev, en);
7888 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7890 dev_err(&hdev->pdev->dev,
7891 "phy loopback config mac mode timeout\n");
7896 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
7897 u16 stream_id, bool enable)
7899 struct hclge_desc desc;
7900 struct hclge_cfg_com_tqp_queue_cmd *req =
7901 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7903 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7904 req->tqp_id = cpu_to_le16(tqp_id);
7905 req->stream_id = cpu_to_le16(stream_id);
7907 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7909 return hclge_cmd_send(&hdev->hw, &desc, 1);
7912 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
7914 struct hclge_vport *vport = hclge_get_vport(handle);
7915 struct hclge_dev *hdev = vport->back;
7919 for (i = 0; i < handle->kinfo.num_tqps; i++) {
7920 ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
7927 static int hclge_set_loopback(struct hnae3_handle *handle,
7928 enum hnae3_loop loop_mode, bool en)
7930 struct hclge_vport *vport = hclge_get_vport(handle);
7931 struct hclge_dev *hdev = vport->back;
7934 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7935 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7936 * the same, the packets are looped back in the SSU. If SSU loopback
7937 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7939 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7940 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7942 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7943 HCLGE_SWITCH_ALW_LPBK_MASK);
7948 switch (loop_mode) {
7949 case HNAE3_LOOP_APP:
7950 ret = hclge_set_app_loopback(hdev, en);
7952 case HNAE3_LOOP_SERIAL_SERDES:
7953 case HNAE3_LOOP_PARALLEL_SERDES:
7954 ret = hclge_set_common_loopback(hdev, en, loop_mode);
7956 case HNAE3_LOOP_PHY:
7957 ret = hclge_set_phy_loopback(hdev, en);
7959 case HNAE3_LOOP_EXTERNAL:
7963 dev_err(&hdev->pdev->dev,
7964 "loop_mode %d is not supported\n", loop_mode);
7971 ret = hclge_tqp_enable(handle, en);
7973 dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
7974 en ? "enable" : "disable", ret);
7979 static int hclge_set_default_loopback(struct hclge_dev *hdev)
7983 ret = hclge_set_app_loopback(hdev, false);
7987 ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
7991 return hclge_cfg_common_loopback(hdev, false,
7992 HNAE3_LOOP_PARALLEL_SERDES);
7995 static void hclge_flush_link_update(struct hclge_dev *hdev)
7997 #define HCLGE_FLUSH_LINK_TIMEOUT 100000
7999 unsigned long last = hdev->serv_processed_cnt;
8002 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
8003 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
8004 last == hdev->serv_processed_cnt)
8008 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
8010 struct hclge_vport *vport = hclge_get_vport(handle);
8011 struct hclge_dev *hdev = vport->back;
8014 hclge_task_schedule(hdev, 0);
8016 /* Set the DOWN flag here to disable link updating */
8017 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8019 /* flush memory to make sure DOWN is seen by service task */
8020 smp_mb__before_atomic();
8021 hclge_flush_link_update(hdev);
8025 static int hclge_ae_start(struct hnae3_handle *handle)
8027 struct hclge_vport *vport = hclge_get_vport(handle);
8028 struct hclge_dev *hdev = vport->back;
8031 hclge_cfg_mac_mode(hdev, true);
8032 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
8033 hdev->hw.mac.link = 0;
8035 /* reset tqp stats */
8036 hclge_comm_reset_tqp_stats(handle);
8038 hclge_mac_start_phy(hdev);
8043 static void hclge_ae_stop(struct hnae3_handle *handle)
8045 struct hclge_vport *vport = hclge_get_vport(handle);
8046 struct hclge_dev *hdev = vport->back;
8048 set_bit(HCLGE_STATE_DOWN, &hdev->state);
8049 spin_lock_bh(&hdev->fd_rule_lock);
8050 hclge_clear_arfs_rules(hdev);
8051 spin_unlock_bh(&hdev->fd_rule_lock);
8053 /* If it is not PF reset or FLR, the firmware will disable the MAC,
8054 * so it only need to stop phy here.
8056 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
8057 hclge_pfc_pause_en_cfg(hdev, HCLGE_PFC_TX_RX_DISABLE,
8059 if (hdev->reset_type != HNAE3_FUNC_RESET &&
8060 hdev->reset_type != HNAE3_FLR_RESET) {
8061 hclge_mac_stop_phy(hdev);
8062 hclge_update_link_status(hdev);
8067 hclge_reset_tqp(handle);
8069 hclge_config_mac_tnl_int(hdev, false);
8072 hclge_cfg_mac_mode(hdev, false);
8074 hclge_mac_stop_phy(hdev);
8076 /* reset tqp stats */
8077 hclge_comm_reset_tqp_stats(handle);
8078 hclge_update_link_status(hdev);
8081 int hclge_vport_start(struct hclge_vport *vport)
8083 struct hclge_dev *hdev = vport->back;
8085 set_bit(HCLGE_VPORT_STATE_INITED, &vport->state);
8086 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8087 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
8088 vport->last_active_jiffies = jiffies;
8089 vport->need_notify = 0;
8091 if (test_bit(vport->vport_id, hdev->vport_config_block)) {
8092 if (vport->vport_id) {
8093 hclge_restore_mac_table_common(vport);
8094 hclge_restore_vport_vlan_table(vport);
8096 hclge_restore_hw_table(hdev);
8100 clear_bit(vport->vport_id, hdev->vport_config_block);
8105 void hclge_vport_stop(struct hclge_vport *vport)
8107 clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state);
8108 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8109 vport->need_notify = 0;
8112 static int hclge_client_start(struct hnae3_handle *handle)
8114 struct hclge_vport *vport = hclge_get_vport(handle);
8116 return hclge_vport_start(vport);
8119 static void hclge_client_stop(struct hnae3_handle *handle)
8121 struct hclge_vport *vport = hclge_get_vport(handle);
8123 hclge_vport_stop(vport);
8126 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
8127 u16 cmdq_resp, u8 resp_code,
8128 enum hclge_mac_vlan_tbl_opcode op)
8130 struct hclge_dev *hdev = vport->back;
8133 dev_err(&hdev->pdev->dev,
8134 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8139 if (op == HCLGE_MAC_VLAN_ADD) {
8140 if (!resp_code || resp_code == 1)
8142 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
8143 resp_code == HCLGE_ADD_MC_OVERFLOW)
8146 dev_err(&hdev->pdev->dev,
8147 "add mac addr failed for undefined, code=%u.\n",
8150 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
8153 } else if (resp_code == 1) {
8154 dev_dbg(&hdev->pdev->dev,
8155 "remove mac addr failed for miss.\n");
8159 dev_err(&hdev->pdev->dev,
8160 "remove mac addr failed for undefined, code=%u.\n",
8163 } else if (op == HCLGE_MAC_VLAN_LKUP) {
8166 } else if (resp_code == 1) {
8167 dev_dbg(&hdev->pdev->dev,
8168 "lookup mac addr failed for miss.\n");
8172 dev_err(&hdev->pdev->dev,
8173 "lookup mac addr failed for undefined, code=%u.\n",
8178 dev_err(&hdev->pdev->dev,
8179 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
8184 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
8186 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8188 unsigned int word_num;
8189 unsigned int bit_num;
8191 if (vfid > 255 || vfid < 0)
8194 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
8195 word_num = vfid / 32;
8196 bit_num = vfid % 32;
8198 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8200 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
8202 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
8203 bit_num = vfid % 32;
8205 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8207 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
8213 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
8215 #define HCLGE_DESC_NUMBER 3
8216 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8219 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
8220 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
8221 if (desc[i].data[j])
8227 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
8228 const u8 *addr, bool is_mc)
8230 const unsigned char *mac_addr = addr;
8231 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
8232 (mac_addr[0]) | (mac_addr[1] << 8);
8233 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
8235 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8237 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8238 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8241 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8242 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8245 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8246 struct hclge_mac_vlan_tbl_entry_cmd *req)
8248 struct hclge_dev *hdev = vport->back;
8249 struct hclge_desc desc;
8254 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8256 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8258 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8260 dev_err(&hdev->pdev->dev,
8261 "del mac addr failed for cmd_send, ret =%d.\n",
8265 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8266 retval = le16_to_cpu(desc.retval);
8268 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8269 HCLGE_MAC_VLAN_REMOVE);
8272 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8273 struct hclge_mac_vlan_tbl_entry_cmd *req,
8274 struct hclge_desc *desc,
8277 struct hclge_dev *hdev = vport->back;
8282 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8284 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
8285 memcpy(desc[0].data,
8287 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8288 hclge_cmd_setup_basic_desc(&desc[1],
8289 HCLGE_OPC_MAC_VLAN_ADD,
8291 desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
8292 hclge_cmd_setup_basic_desc(&desc[2],
8293 HCLGE_OPC_MAC_VLAN_ADD,
8295 ret = hclge_cmd_send(&hdev->hw, desc, 3);
8297 memcpy(desc[0].data,
8299 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8300 ret = hclge_cmd_send(&hdev->hw, desc, 1);
8303 dev_err(&hdev->pdev->dev,
8304 "lookup mac addr failed for cmd_send, ret =%d.\n",
8308 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8309 retval = le16_to_cpu(desc[0].retval);
8311 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8312 HCLGE_MAC_VLAN_LKUP);
8315 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8316 struct hclge_mac_vlan_tbl_entry_cmd *req,
8317 struct hclge_desc *mc_desc)
8319 struct hclge_dev *hdev = vport->back;
8326 struct hclge_desc desc;
8328 hclge_cmd_setup_basic_desc(&desc,
8329 HCLGE_OPC_MAC_VLAN_ADD,
8331 memcpy(desc.data, req,
8332 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8333 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8334 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8335 retval = le16_to_cpu(desc.retval);
8337 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8339 HCLGE_MAC_VLAN_ADD);
8341 hclge_comm_cmd_reuse_desc(&mc_desc[0], false);
8342 mc_desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
8343 hclge_comm_cmd_reuse_desc(&mc_desc[1], false);
8344 mc_desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
8345 hclge_comm_cmd_reuse_desc(&mc_desc[2], false);
8346 mc_desc[2].flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_NEXT);
8347 memcpy(mc_desc[0].data, req,
8348 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8349 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8350 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8351 retval = le16_to_cpu(mc_desc[0].retval);
8353 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8355 HCLGE_MAC_VLAN_ADD);
8359 dev_err(&hdev->pdev->dev,
8360 "add mac addr failed for cmd_send, ret =%d.\n",
8368 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8369 u16 *allocated_size)
8371 struct hclge_umv_spc_alc_cmd *req;
8372 struct hclge_desc desc;
8375 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8376 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8378 req->space_size = cpu_to_le32(space_size);
8380 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8382 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8387 *allocated_size = le32_to_cpu(desc.data[1]);
8392 static int hclge_init_umv_space(struct hclge_dev *hdev)
8394 u16 allocated_size = 0;
8397 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8401 if (allocated_size < hdev->wanted_umv_size)
8402 dev_warn(&hdev->pdev->dev,
8403 "failed to alloc umv space, want %u, get %u\n",
8404 hdev->wanted_umv_size, allocated_size);
8406 hdev->max_umv_size = allocated_size;
8407 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8408 hdev->share_umv_size = hdev->priv_umv_size +
8409 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8411 if (hdev->ae_dev->dev_specs.mc_mac_size)
8412 set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps);
8417 static void hclge_reset_umv_space(struct hclge_dev *hdev)
8419 struct hclge_vport *vport;
8422 for (i = 0; i < hdev->num_alloc_vport; i++) {
8423 vport = &hdev->vport[i];
8424 vport->used_umv_num = 0;
8427 mutex_lock(&hdev->vport_lock);
8428 hdev->share_umv_size = hdev->priv_umv_size +
8429 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8430 mutex_unlock(&hdev->vport_lock);
8432 hdev->used_mc_mac_num = 0;
8435 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8437 struct hclge_dev *hdev = vport->back;
8441 mutex_lock(&hdev->vport_lock);
8443 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8444 hdev->share_umv_size == 0);
8447 mutex_unlock(&hdev->vport_lock);
8452 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8454 struct hclge_dev *hdev = vport->back;
8457 if (vport->used_umv_num > hdev->priv_umv_size)
8458 hdev->share_umv_size++;
8460 if (vport->used_umv_num > 0)
8461 vport->used_umv_num--;
8463 if (vport->used_umv_num >= hdev->priv_umv_size &&
8464 hdev->share_umv_size > 0)
8465 hdev->share_umv_size--;
8466 vport->used_umv_num++;
8470 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8473 struct hclge_mac_node *mac_node, *tmp;
8475 list_for_each_entry_safe(mac_node, tmp, list, node)
8476 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8482 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8483 enum HCLGE_MAC_NODE_STATE state)
8486 /* from set_rx_mode or tmp_add_list */
8487 case HCLGE_MAC_TO_ADD:
8488 if (mac_node->state == HCLGE_MAC_TO_DEL)
8489 mac_node->state = HCLGE_MAC_ACTIVE;
8491 /* only from set_rx_mode */
8492 case HCLGE_MAC_TO_DEL:
8493 if (mac_node->state == HCLGE_MAC_TO_ADD) {
8494 list_del(&mac_node->node);
8497 mac_node->state = HCLGE_MAC_TO_DEL;
8500 /* only from tmp_add_list, the mac_node->state won't be
8503 case HCLGE_MAC_ACTIVE:
8504 if (mac_node->state == HCLGE_MAC_TO_ADD)
8505 mac_node->state = HCLGE_MAC_ACTIVE;
8511 int hclge_update_mac_list(struct hclge_vport *vport,
8512 enum HCLGE_MAC_NODE_STATE state,
8513 enum HCLGE_MAC_ADDR_TYPE mac_type,
8514 const unsigned char *addr)
8516 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8517 struct hclge_dev *hdev = vport->back;
8518 struct hclge_mac_node *mac_node;
8519 struct list_head *list;
8521 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8522 &vport->uc_mac_list : &vport->mc_mac_list;
8524 spin_lock_bh(&vport->mac_list_lock);
8526 /* if the mac addr is already in the mac list, no need to add a new
8527 * one into it, just check the mac addr state, convert it to a new
8528 * state, or just remove it, or do nothing.
8530 mac_node = hclge_find_mac_node(list, addr);
8532 hclge_update_mac_node(mac_node, state);
8533 spin_unlock_bh(&vport->mac_list_lock);
8534 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8538 /* if this address is never added, unnecessary to delete */
8539 if (state == HCLGE_MAC_TO_DEL) {
8540 spin_unlock_bh(&vport->mac_list_lock);
8541 hnae3_format_mac_addr(format_mac_addr, addr);
8542 dev_err(&hdev->pdev->dev,
8543 "failed to delete address %s from mac list\n",
8548 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8550 spin_unlock_bh(&vport->mac_list_lock);
8554 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8556 mac_node->state = state;
8557 ether_addr_copy(mac_node->mac_addr, addr);
8558 list_add_tail(&mac_node->node, list);
8560 spin_unlock_bh(&vport->mac_list_lock);
8565 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8566 const unsigned char *addr)
8568 struct hclge_vport *vport = hclge_get_vport(handle);
8570 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8574 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8575 const unsigned char *addr)
8577 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8578 struct hclge_dev *hdev = vport->back;
8579 struct hclge_mac_vlan_tbl_entry_cmd req;
8580 struct hclge_desc desc;
8581 u16 egress_port = 0;
8584 /* mac addr check */
8585 if (is_zero_ether_addr(addr) ||
8586 is_broadcast_ether_addr(addr) ||
8587 is_multicast_ether_addr(addr)) {
8588 hnae3_format_mac_addr(format_mac_addr, addr);
8589 dev_err(&hdev->pdev->dev,
8590 "Set_uc mac err! invalid mac:%s. is_zero:%d,is_br=%d,is_mul=%d\n",
8591 format_mac_addr, is_zero_ether_addr(addr),
8592 is_broadcast_ether_addr(addr),
8593 is_multicast_ether_addr(addr));
8597 memset(&req, 0, sizeof(req));
8599 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8600 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8602 req.egress_port = cpu_to_le16(egress_port);
8604 hclge_prepare_mac_addr(&req, addr, false);
8606 /* Lookup the mac address in the mac_vlan table, and add
8607 * it if the entry is inexistent. Repeated unicast entry
8608 * is not allowed in the mac vlan table.
8610 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8611 if (ret == -ENOENT) {
8612 mutex_lock(&hdev->vport_lock);
8613 if (!hclge_is_umv_space_full(vport, false)) {
8614 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8616 hclge_update_umv_space(vport, false);
8617 mutex_unlock(&hdev->vport_lock);
8620 mutex_unlock(&hdev->vport_lock);
8622 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8623 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8624 hdev->priv_umv_size);
8629 /* check if we just hit the duplicate */
8636 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8637 const unsigned char *addr)
8639 struct hclge_vport *vport = hclge_get_vport(handle);
8641 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8645 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8646 const unsigned char *addr)
8648 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8649 struct hclge_dev *hdev = vport->back;
8650 struct hclge_mac_vlan_tbl_entry_cmd req;
8653 /* mac addr check */
8654 if (is_zero_ether_addr(addr) ||
8655 is_broadcast_ether_addr(addr) ||
8656 is_multicast_ether_addr(addr)) {
8657 hnae3_format_mac_addr(format_mac_addr, addr);
8658 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%s.\n",
8663 memset(&req, 0, sizeof(req));
8664 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8665 hclge_prepare_mac_addr(&req, addr, false);
8666 ret = hclge_remove_mac_vlan_tbl(vport, &req);
8667 if (!ret || ret == -ENOENT) {
8668 mutex_lock(&hdev->vport_lock);
8669 hclge_update_umv_space(vport, true);
8670 mutex_unlock(&hdev->vport_lock);
8677 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8678 const unsigned char *addr)
8680 struct hclge_vport *vport = hclge_get_vport(handle);
8682 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8686 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8687 const unsigned char *addr)
8689 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8690 struct hclge_dev *hdev = vport->back;
8691 struct hclge_mac_vlan_tbl_entry_cmd req;
8692 struct hclge_desc desc[3];
8693 bool is_new_addr = false;
8696 /* mac addr check */
8697 if (!is_multicast_ether_addr(addr)) {
8698 hnae3_format_mac_addr(format_mac_addr, addr);
8699 dev_err(&hdev->pdev->dev,
8700 "Add mc mac err! invalid mac:%s.\n",
8704 memset(&req, 0, sizeof(req));
8705 hclge_prepare_mac_addr(&req, addr, true);
8706 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8708 if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) &&
8709 hdev->used_mc_mac_num >=
8710 hdev->ae_dev->dev_specs.mc_mac_size)
8715 /* This mac addr do not exist, add new entry for it */
8716 memset(desc[0].data, 0, sizeof(desc[0].data));
8717 memset(desc[1].data, 0, sizeof(desc[0].data));
8718 memset(desc[2].data, 0, sizeof(desc[0].data));
8720 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8723 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8724 if (status == -ENOSPC)
8726 else if (!status && is_new_addr)
8727 hdev->used_mc_mac_num++;
8732 /* if already overflow, not to print each time */
8733 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE)) {
8734 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8735 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8741 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8742 const unsigned char *addr)
8744 struct hclge_vport *vport = hclge_get_vport(handle);
8746 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8750 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8751 const unsigned char *addr)
8753 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8754 struct hclge_dev *hdev = vport->back;
8755 struct hclge_mac_vlan_tbl_entry_cmd req;
8756 enum hclge_comm_cmd_status status;
8757 struct hclge_desc desc[3];
8759 /* mac addr check */
8760 if (!is_multicast_ether_addr(addr)) {
8761 hnae3_format_mac_addr(format_mac_addr, addr);
8762 dev_dbg(&hdev->pdev->dev,
8763 "Remove mc mac err! invalid mac:%s.\n",
8768 memset(&req, 0, sizeof(req));
8769 hclge_prepare_mac_addr(&req, addr, true);
8770 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8772 /* This mac addr exist, remove this handle's VFID for it */
8773 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8777 if (hclge_is_all_function_id_zero(desc)) {
8778 /* All the vfid is zero, so need to delete this entry */
8779 status = hclge_remove_mac_vlan_tbl(vport, &req);
8781 hdev->used_mc_mac_num--;
8783 /* Not all the vfid is zero, update the vfid */
8784 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8786 } else if (status == -ENOENT) {
8793 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8794 struct list_head *list,
8795 enum HCLGE_MAC_ADDR_TYPE mac_type)
8797 int (*sync)(struct hclge_vport *vport, const unsigned char *addr);
8798 struct hclge_mac_node *mac_node, *tmp;
8801 if (mac_type == HCLGE_MAC_ADDR_UC)
8802 sync = hclge_add_uc_addr_common;
8804 sync = hclge_add_mc_addr_common;
8806 list_for_each_entry_safe(mac_node, tmp, list, node) {
8807 ret = sync(vport, mac_node->mac_addr);
8809 mac_node->state = HCLGE_MAC_ACTIVE;
8811 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8814 /* If one unicast mac address is existing in hardware,
8815 * we need to try whether other unicast mac addresses
8816 * are new addresses that can be added.
8817 * Multicast mac address can be reusable, even though
8818 * there is no space to add new multicast mac address,
8819 * we should check whether other mac addresses are
8820 * existing in hardware for reuse.
8822 if ((mac_type == HCLGE_MAC_ADDR_UC && ret != -EEXIST) ||
8823 (mac_type == HCLGE_MAC_ADDR_MC && ret != -ENOSPC))
8829 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8830 struct list_head *list,
8831 enum HCLGE_MAC_ADDR_TYPE mac_type)
8833 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
8834 struct hclge_mac_node *mac_node, *tmp;
8837 if (mac_type == HCLGE_MAC_ADDR_UC)
8838 unsync = hclge_rm_uc_addr_common;
8840 unsync = hclge_rm_mc_addr_common;
8842 list_for_each_entry_safe(mac_node, tmp, list, node) {
8843 ret = unsync(vport, mac_node->mac_addr);
8844 if (!ret || ret == -ENOENT) {
8845 list_del(&mac_node->node);
8848 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8855 static bool hclge_sync_from_add_list(struct list_head *add_list,
8856 struct list_head *mac_list)
8858 struct hclge_mac_node *mac_node, *tmp, *new_node;
8859 bool all_added = true;
8861 list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8862 if (mac_node->state == HCLGE_MAC_TO_ADD)
8865 /* if the mac address from tmp_add_list is not in the
8866 * uc/mc_mac_list, it means have received a TO_DEL request
8867 * during the time window of adding the mac address into mac
8868 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8869 * then it will be removed at next time. else it must be TO_ADD,
8870 * this address hasn't been added into mac table,
8871 * so just remove the mac node.
8873 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8875 hclge_update_mac_node(new_node, mac_node->state);
8876 list_del(&mac_node->node);
8878 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8879 mac_node->state = HCLGE_MAC_TO_DEL;
8880 list_move_tail(&mac_node->node, mac_list);
8882 list_del(&mac_node->node);
8890 static void hclge_sync_from_del_list(struct list_head *del_list,
8891 struct list_head *mac_list)
8893 struct hclge_mac_node *mac_node, *tmp, *new_node;
8895 list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8896 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8898 /* If the mac addr exists in the mac list, it means
8899 * received a new TO_ADD request during the time window
8900 * of configuring the mac address. For the mac node
8901 * state is TO_ADD, and the address is already in the
8902 * in the hardware(due to delete fail), so we just need
8903 * to change the mac node state to ACTIVE.
8905 new_node->state = HCLGE_MAC_ACTIVE;
8906 list_del(&mac_node->node);
8909 list_move_tail(&mac_node->node, mac_list);
8914 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8915 enum HCLGE_MAC_ADDR_TYPE mac_type,
8918 if (mac_type == HCLGE_MAC_ADDR_UC) {
8920 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8922 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8925 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8927 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8931 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8932 enum HCLGE_MAC_ADDR_TYPE mac_type)
8934 struct hclge_mac_node *mac_node, *tmp, *new_node;
8935 struct list_head tmp_add_list, tmp_del_list;
8936 struct list_head *list;
8939 INIT_LIST_HEAD(&tmp_add_list);
8940 INIT_LIST_HEAD(&tmp_del_list);
8942 /* move the mac addr to the tmp_add_list and tmp_del_list, then
8943 * we can add/delete these mac addr outside the spin lock
8945 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8946 &vport->uc_mac_list : &vport->mc_mac_list;
8948 spin_lock_bh(&vport->mac_list_lock);
8950 list_for_each_entry_safe(mac_node, tmp, list, node) {
8951 switch (mac_node->state) {
8952 case HCLGE_MAC_TO_DEL:
8953 list_move_tail(&mac_node->node, &tmp_del_list);
8955 case HCLGE_MAC_TO_ADD:
8956 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8959 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8960 new_node->state = mac_node->state;
8961 list_add_tail(&new_node->node, &tmp_add_list);
8969 spin_unlock_bh(&vport->mac_list_lock);
8971 /* delete first, in order to get max mac table space for adding */
8972 hclge_unsync_vport_mac_list(vport, &tmp_del_list, mac_type);
8973 hclge_sync_vport_mac_list(vport, &tmp_add_list, mac_type);
8975 /* if some mac addresses were added/deleted fail, move back to the
8976 * mac_list, and retry at next time.
8978 spin_lock_bh(&vport->mac_list_lock);
8980 hclge_sync_from_del_list(&tmp_del_list, list);
8981 all_added = hclge_sync_from_add_list(&tmp_add_list, list);
8983 spin_unlock_bh(&vport->mac_list_lock);
8985 hclge_update_overflow_flags(vport, mac_type, all_added);
8988 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
8990 struct hclge_dev *hdev = vport->back;
8992 if (test_bit(vport->vport_id, hdev->vport_config_block))
8995 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
9001 static void hclge_sync_mac_table(struct hclge_dev *hdev)
9005 for (i = 0; i < hdev->num_alloc_vport; i++) {
9006 struct hclge_vport *vport = &hdev->vport[i];
9008 if (!hclge_need_sync_mac_table(vport))
9011 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
9012 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
9016 static void hclge_build_del_list(struct list_head *list,
9018 struct list_head *tmp_del_list)
9020 struct hclge_mac_node *mac_cfg, *tmp;
9022 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
9023 switch (mac_cfg->state) {
9024 case HCLGE_MAC_TO_DEL:
9025 case HCLGE_MAC_ACTIVE:
9026 list_move_tail(&mac_cfg->node, tmp_del_list);
9028 case HCLGE_MAC_TO_ADD:
9030 list_del(&mac_cfg->node);
9038 static void hclge_unsync_del_list(struct hclge_vport *vport,
9039 int (*unsync)(struct hclge_vport *vport,
9040 const unsigned char *addr),
9042 struct list_head *tmp_del_list)
9044 struct hclge_mac_node *mac_cfg, *tmp;
9047 list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
9048 ret = unsync(vport, mac_cfg->mac_addr);
9049 if (!ret || ret == -ENOENT) {
9050 /* clear all mac addr from hardware, but remain these
9051 * mac addr in the mac list, and restore them after
9052 * vf reset finished.
9055 mac_cfg->state == HCLGE_MAC_ACTIVE) {
9056 mac_cfg->state = HCLGE_MAC_TO_ADD;
9058 list_del(&mac_cfg->node);
9061 } else if (is_del_list) {
9062 mac_cfg->state = HCLGE_MAC_TO_DEL;
9067 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
9068 enum HCLGE_MAC_ADDR_TYPE mac_type)
9070 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
9071 struct hclge_dev *hdev = vport->back;
9072 struct list_head tmp_del_list, *list;
9074 if (mac_type == HCLGE_MAC_ADDR_UC) {
9075 list = &vport->uc_mac_list;
9076 unsync = hclge_rm_uc_addr_common;
9078 list = &vport->mc_mac_list;
9079 unsync = hclge_rm_mc_addr_common;
9082 INIT_LIST_HEAD(&tmp_del_list);
9085 set_bit(vport->vport_id, hdev->vport_config_block);
9087 spin_lock_bh(&vport->mac_list_lock);
9089 hclge_build_del_list(list, is_del_list, &tmp_del_list);
9091 spin_unlock_bh(&vport->mac_list_lock);
9093 hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
9095 spin_lock_bh(&vport->mac_list_lock);
9097 hclge_sync_from_del_list(&tmp_del_list, list);
9099 spin_unlock_bh(&vport->mac_list_lock);
9102 /* remove all mac address when uninitailize */
9103 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
9104 enum HCLGE_MAC_ADDR_TYPE mac_type)
9106 struct hclge_mac_node *mac_node, *tmp;
9107 struct hclge_dev *hdev = vport->back;
9108 struct list_head tmp_del_list, *list;
9110 INIT_LIST_HEAD(&tmp_del_list);
9112 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9113 &vport->uc_mac_list : &vport->mc_mac_list;
9115 spin_lock_bh(&vport->mac_list_lock);
9117 list_for_each_entry_safe(mac_node, tmp, list, node) {
9118 switch (mac_node->state) {
9119 case HCLGE_MAC_TO_DEL:
9120 case HCLGE_MAC_ACTIVE:
9121 list_move_tail(&mac_node->node, &tmp_del_list);
9123 case HCLGE_MAC_TO_ADD:
9124 list_del(&mac_node->node);
9130 spin_unlock_bh(&vport->mac_list_lock);
9132 hclge_unsync_vport_mac_list(vport, &tmp_del_list, mac_type);
9134 if (!list_empty(&tmp_del_list))
9135 dev_warn(&hdev->pdev->dev,
9136 "uninit %s mac list for vport %u not completely.\n",
9137 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
9140 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
9141 list_del(&mac_node->node);
9146 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
9148 struct hclge_vport *vport;
9151 for (i = 0; i < hdev->num_alloc_vport; i++) {
9152 vport = &hdev->vport[i];
9153 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
9154 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
9158 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
9159 u16 cmdq_resp, u8 resp_code)
9161 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
9162 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
9163 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
9164 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
9169 dev_err(&hdev->pdev->dev,
9170 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9175 switch (resp_code) {
9176 case HCLGE_ETHERTYPE_SUCCESS_ADD:
9177 case HCLGE_ETHERTYPE_ALREADY_ADD:
9180 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
9181 dev_err(&hdev->pdev->dev,
9182 "add mac ethertype failed for manager table overflow.\n");
9183 return_status = -EIO;
9185 case HCLGE_ETHERTYPE_KEY_CONFLICT:
9186 dev_err(&hdev->pdev->dev,
9187 "add mac ethertype failed for key conflict.\n");
9188 return_status = -EIO;
9191 dev_err(&hdev->pdev->dev,
9192 "add mac ethertype failed for undefined, code=%u.\n",
9194 return_status = -EIO;
9197 return return_status;
9200 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
9203 struct hclge_vport *vport = hclge_get_vport(handle);
9204 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
9205 struct hclge_dev *hdev = vport->back;
9207 vport = hclge_get_vf_vport(hdev, vf);
9211 hnae3_format_mac_addr(format_mac_addr, mac_addr);
9212 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
9213 dev_info(&hdev->pdev->dev,
9214 "Specified MAC(=%s) is same as before, no change committed!\n",
9219 ether_addr_copy(vport->vf_info.mac, mac_addr);
9221 /* there is a timewindow for PF to know VF unalive, it may
9222 * cause send mailbox fail, but it doesn't matter, VF will
9223 * query it when reinit.
9225 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9226 dev_info(&hdev->pdev->dev,
9227 "MAC of VF %d has been set to %s, and it will be reinitialized!\n",
9228 vf, format_mac_addr);
9229 (void)hclge_inform_reset_assert_to_vf(vport);
9233 dev_info(&hdev->pdev->dev,
9234 "MAC of VF %d has been set to %s, will be active after VF reset\n",
9235 vf, format_mac_addr);
9239 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9240 const struct hclge_mac_mgr_tbl_entry_cmd *req)
9242 struct hclge_desc desc;
9247 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9248 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9250 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9252 dev_err(&hdev->pdev->dev,
9253 "add mac ethertype failed for cmd_send, ret =%d.\n",
9258 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9259 retval = le16_to_cpu(desc.retval);
9261 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9264 static int init_mgr_tbl(struct hclge_dev *hdev)
9269 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9270 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9272 dev_err(&hdev->pdev->dev,
9273 "add mac ethertype failed, ret =%d.\n",
9282 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9284 struct hclge_vport *vport = hclge_get_vport(handle);
9285 struct hclge_dev *hdev = vport->back;
9287 ether_addr_copy(p, hdev->hw.mac.mac_addr);
9290 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9291 const u8 *old_addr, const u8 *new_addr)
9293 struct list_head *list = &vport->uc_mac_list;
9294 struct hclge_mac_node *old_node, *new_node;
9296 new_node = hclge_find_mac_node(list, new_addr);
9298 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9302 new_node->state = HCLGE_MAC_TO_ADD;
9303 ether_addr_copy(new_node->mac_addr, new_addr);
9304 list_add(&new_node->node, list);
9306 if (new_node->state == HCLGE_MAC_TO_DEL)
9307 new_node->state = HCLGE_MAC_ACTIVE;
9309 /* make sure the new addr is in the list head, avoid dev
9310 * addr may be not re-added into mac table for the umv space
9311 * limitation after global/imp reset which will clear mac
9312 * table by hardware.
9314 list_move(&new_node->node, list);
9317 if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9318 old_node = hclge_find_mac_node(list, old_addr);
9320 if (old_node->state == HCLGE_MAC_TO_ADD) {
9321 list_del(&old_node->node);
9324 old_node->state = HCLGE_MAC_TO_DEL;
9329 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9334 static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p,
9337 const unsigned char *new_addr = (const unsigned char *)p;
9338 struct hclge_vport *vport = hclge_get_vport(handle);
9339 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
9340 struct hclge_dev *hdev = vport->back;
9341 unsigned char *old_addr = NULL;
9344 /* mac addr check */
9345 if (is_zero_ether_addr(new_addr) ||
9346 is_broadcast_ether_addr(new_addr) ||
9347 is_multicast_ether_addr(new_addr)) {
9348 hnae3_format_mac_addr(format_mac_addr, new_addr);
9349 dev_err(&hdev->pdev->dev,
9350 "change uc mac err! invalid mac: %s.\n",
9355 ret = hclge_pause_addr_cfg(hdev, new_addr);
9357 dev_err(&hdev->pdev->dev,
9358 "failed to configure mac pause address, ret = %d\n",
9364 old_addr = hdev->hw.mac.mac_addr;
9366 spin_lock_bh(&vport->mac_list_lock);
9367 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9369 hnae3_format_mac_addr(format_mac_addr, new_addr);
9370 dev_err(&hdev->pdev->dev,
9371 "failed to change the mac addr:%s, ret = %d\n",
9372 format_mac_addr, ret);
9373 spin_unlock_bh(&vport->mac_list_lock);
9376 hclge_pause_addr_cfg(hdev, old_addr);
9380 /* we must update dev addr with spin lock protect, preventing dev addr
9381 * being removed by set_rx_mode path.
9383 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9384 spin_unlock_bh(&vport->mac_list_lock);
9386 hclge_task_schedule(hdev, 0);
9391 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9393 struct mii_ioctl_data *data = if_mii(ifr);
9395 if (!hnae3_dev_phy_imp_supported(hdev))
9400 data->phy_id = hdev->hw.mac.phy_addr;
9401 /* this command reads phy id and register at the same time */
9404 data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9408 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9414 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9417 struct hclge_vport *vport = hclge_get_vport(handle);
9418 struct hclge_dev *hdev = vport->back;
9422 return hclge_ptp_get_cfg(hdev, ifr);
9424 return hclge_ptp_set_cfg(hdev, ifr);
9426 if (!hdev->hw.mac.phydev)
9427 return hclge_mii_ioctl(hdev, ifr, cmd);
9430 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9433 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id,
9436 struct hclge_port_vlan_filter_bypass_cmd *req;
9437 struct hclge_desc desc;
9440 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false);
9441 req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
9443 hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B,
9446 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9448 dev_err(&hdev->pdev->dev,
9449 "failed to set vport%u port vlan filter bypass state, ret = %d.\n",
9455 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9456 u8 fe_type, bool filter_en, u8 vf_id)
9458 struct hclge_vlan_filter_ctrl_cmd *req;
9459 struct hclge_desc desc;
9462 /* read current vlan filter parameter */
9463 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9464 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9465 req->vlan_type = vlan_type;
9468 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9470 dev_err(&hdev->pdev->dev, "failed to get vport%u vlan filter config, ret = %d.\n",
9475 /* modify and write new config parameter */
9476 hclge_comm_cmd_reuse_desc(&desc, false);
9477 req->vlan_fe = filter_en ?
9478 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9480 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9482 dev_err(&hdev->pdev->dev, "failed to set vport%u vlan filter, ret = %d.\n",
9488 static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
9490 struct hclge_dev *hdev = vport->back;
9491 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
9494 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9495 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9496 HCLGE_FILTER_FE_EGRESS_V1_B,
9497 enable, vport->vport_id);
9499 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9500 HCLGE_FILTER_FE_EGRESS, enable,
9505 if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) {
9506 ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
9508 } else if (!vport->vport_id) {
9509 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
9512 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9513 HCLGE_FILTER_FE_INGRESS,
9520 static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
9522 struct hnae3_handle *handle = &vport->nic;
9523 struct hclge_vport_vlan_cfg *vlan, *tmp;
9524 struct hclge_dev *hdev = vport->back;
9526 if (vport->vport_id) {
9527 if (vport->port_base_vlan_cfg.state !=
9528 HNAE3_PORT_BASE_VLAN_DISABLE)
9531 if (vport->vf_info.trusted && vport->vf_info.request_uc_en)
9533 } else if (handle->netdev_flags & HNAE3_USER_UPE) {
9537 if (!vport->req_vlan_fltr_en)
9540 /* compatible with former device, always enable vlan filter */
9541 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
9544 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
9545 if (vlan->vlan_id != 0)
9551 int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
9553 struct hclge_dev *hdev = vport->back;
9557 mutex_lock(&hdev->vport_lock);
9559 vport->req_vlan_fltr_en = request_en;
9561 need_en = hclge_need_enable_vport_vlan_filter(vport);
9562 if (need_en == vport->cur_vlan_fltr_en) {
9563 mutex_unlock(&hdev->vport_lock);
9567 ret = hclge_set_vport_vlan_filter(vport, need_en);
9569 mutex_unlock(&hdev->vport_lock);
9573 vport->cur_vlan_fltr_en = need_en;
9575 mutex_unlock(&hdev->vport_lock);
9580 static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9582 struct hclge_vport *vport = hclge_get_vport(handle);
9584 return hclge_enable_vport_vlan_filter(vport, enable);
9587 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9588 bool is_kill, u16 vlan,
9589 struct hclge_desc *desc)
9591 struct hclge_vlan_filter_vf_cfg_cmd *req0;
9592 struct hclge_vlan_filter_vf_cfg_cmd *req1;
9597 hclge_cmd_setup_basic_desc(&desc[0],
9598 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9599 hclge_cmd_setup_basic_desc(&desc[1],
9600 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9602 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
9604 vf_byte_off = vfid / 8;
9605 vf_byte_val = 1 << (vfid % 8);
9607 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9608 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9610 req0->vlan_id = cpu_to_le16(vlan);
9611 req0->vlan_cfg = is_kill;
9613 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9614 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9616 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9618 ret = hclge_cmd_send(&hdev->hw, desc, 2);
9620 dev_err(&hdev->pdev->dev,
9621 "Send vf vlan command fail, ret =%d.\n",
9629 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9630 bool is_kill, struct hclge_desc *desc)
9632 struct hclge_vlan_filter_vf_cfg_cmd *req;
9634 req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9637 #define HCLGE_VF_VLAN_NO_ENTRY 2
9638 if (!req->resp_code || req->resp_code == 1)
9641 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9642 set_bit(vfid, hdev->vf_vlan_full);
9643 dev_warn(&hdev->pdev->dev,
9644 "vf vlan table is full, vf vlan filter is disabled\n");
9648 dev_err(&hdev->pdev->dev,
9649 "Add vf vlan filter fail, ret =%u.\n",
9652 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
9653 if (!req->resp_code)
9656 /* vf vlan filter is disabled when vf vlan table is full,
9657 * then new vlan id will not be added into vf vlan table.
9658 * Just return 0 without warning, avoid massive verbose
9659 * print logs when unload.
9661 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9664 dev_err(&hdev->pdev->dev,
9665 "Kill vf vlan filter fail, ret =%u.\n",
9672 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9673 bool is_kill, u16 vlan)
9675 struct hclge_vport *vport = &hdev->vport[vfid];
9676 struct hclge_desc desc[2];
9679 /* if vf vlan table is full, firmware will close vf vlan filter, it
9680 * is unable and unnecessary to add new vlan id to vf vlan filter.
9681 * If spoof check is enable, and vf vlan is full, it shouldn't add
9682 * new vlan, because tx packets with these vlan id will be dropped.
9684 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9685 if (vport->vf_info.spoofchk && vlan) {
9686 dev_err(&hdev->pdev->dev,
9687 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
9693 ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9697 return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9700 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9701 u16 vlan_id, bool is_kill)
9703 struct hclge_vlan_filter_pf_cfg_cmd *req;
9704 struct hclge_desc desc;
9705 u8 vlan_offset_byte_val;
9706 u8 vlan_offset_byte;
9710 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9712 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9713 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9714 HCLGE_VLAN_BYTE_SIZE;
9715 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9717 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9718 req->vlan_offset = vlan_offset_160;
9719 req->vlan_cfg = is_kill;
9720 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9722 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9724 dev_err(&hdev->pdev->dev,
9725 "port vlan command, send fail, ret =%d.\n", ret);
9729 static bool hclge_need_update_port_vlan(struct hclge_dev *hdev, u16 vport_id,
9730 u16 vlan_id, bool is_kill)
9732 /* vlan 0 may be added twice when 8021q module is enabled */
9733 if (!is_kill && !vlan_id &&
9734 test_bit(vport_id, hdev->vlan_table[vlan_id]))
9737 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9738 dev_warn(&hdev->pdev->dev,
9739 "Add port vlan failed, vport %u is already in vlan %u\n",
9745 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9746 dev_warn(&hdev->pdev->dev,
9747 "Delete port vlan failed, vport %u is not in vlan %u\n",
9755 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9756 u16 vport_id, u16 vlan_id,
9759 u16 vport_idx, vport_num = 0;
9762 if (is_kill && !vlan_id)
9765 if (vlan_id >= VLAN_N_VID)
9768 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9770 dev_err(&hdev->pdev->dev,
9771 "Set %u vport vlan filter config fail, ret =%d.\n",
9776 if (!hclge_need_update_port_vlan(hdev, vport_id, vlan_id, is_kill))
9779 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9782 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9783 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9789 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9791 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9792 struct hclge_vport_vtag_tx_cfg_cmd *req;
9793 struct hclge_dev *hdev = vport->back;
9794 struct hclge_desc desc;
9798 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9800 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9801 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9802 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9803 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9804 vcfg->accept_tag1 ? 1 : 0);
9805 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9806 vcfg->accept_untag1 ? 1 : 0);
9807 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9808 vcfg->accept_tag2 ? 1 : 0);
9809 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9810 vcfg->accept_untag2 ? 1 : 0);
9811 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9812 vcfg->insert_tag1_en ? 1 : 0);
9813 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9814 vcfg->insert_tag2_en ? 1 : 0);
9815 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9816 vcfg->tag_shift_mode_en ? 1 : 0);
9817 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9819 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9820 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9821 HCLGE_VF_NUM_PER_BYTE;
9822 req->vf_bitmap[bmap_index] =
9823 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9825 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9827 dev_err(&hdev->pdev->dev,
9828 "Send port txvlan cfg command fail, ret =%d\n",
9834 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9836 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9837 struct hclge_vport_vtag_rx_cfg_cmd *req;
9838 struct hclge_dev *hdev = vport->back;
9839 struct hclge_desc desc;
9843 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9845 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9846 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9847 vcfg->strip_tag1_en ? 1 : 0);
9848 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9849 vcfg->strip_tag2_en ? 1 : 0);
9850 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9851 vcfg->vlan1_vlan_prionly ? 1 : 0);
9852 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9853 vcfg->vlan2_vlan_prionly ? 1 : 0);
9854 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9855 vcfg->strip_tag1_discard_en ? 1 : 0);
9856 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9857 vcfg->strip_tag2_discard_en ? 1 : 0);
9859 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9860 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9861 HCLGE_VF_NUM_PER_BYTE;
9862 req->vf_bitmap[bmap_index] =
9863 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9865 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9867 dev_err(&hdev->pdev->dev,
9868 "Send port rxvlan cfg command fail, ret =%d\n",
9874 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9875 u16 port_base_vlan_state,
9876 u16 vlan_tag, u8 qos)
9880 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9881 vport->txvlan_cfg.accept_tag1 = true;
9882 vport->txvlan_cfg.insert_tag1_en = false;
9883 vport->txvlan_cfg.default_tag1 = 0;
9885 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9887 vport->txvlan_cfg.accept_tag1 =
9888 ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9889 vport->txvlan_cfg.insert_tag1_en = true;
9890 vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) |
9894 vport->txvlan_cfg.accept_untag1 = true;
9896 /* accept_tag2 and accept_untag2 are not supported on
9897 * pdev revision(0x20), new revision support them,
9898 * this two fields can not be configured by user.
9900 vport->txvlan_cfg.accept_tag2 = true;
9901 vport->txvlan_cfg.accept_untag2 = true;
9902 vport->txvlan_cfg.insert_tag2_en = false;
9903 vport->txvlan_cfg.default_tag2 = 0;
9904 vport->txvlan_cfg.tag_shift_mode_en = true;
9906 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9907 vport->rxvlan_cfg.strip_tag1_en = false;
9908 vport->rxvlan_cfg.strip_tag2_en =
9909 vport->rxvlan_cfg.rx_vlan_offload_en;
9910 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9912 vport->rxvlan_cfg.strip_tag1_en =
9913 vport->rxvlan_cfg.rx_vlan_offload_en;
9914 vport->rxvlan_cfg.strip_tag2_en = true;
9915 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9918 vport->rxvlan_cfg.strip_tag1_discard_en = false;
9919 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9920 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9922 ret = hclge_set_vlan_tx_offload_cfg(vport);
9926 return hclge_set_vlan_rx_offload_cfg(vport);
9929 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9931 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9932 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9933 struct hclge_desc desc;
9936 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9937 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9938 rx_req->ot_fst_vlan_type =
9939 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9940 rx_req->ot_sec_vlan_type =
9941 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9942 rx_req->in_fst_vlan_type =
9943 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9944 rx_req->in_sec_vlan_type =
9945 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9947 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9949 dev_err(&hdev->pdev->dev,
9950 "Send rxvlan protocol type command fail, ret =%d\n",
9955 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9957 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
9958 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9959 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9961 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9963 dev_err(&hdev->pdev->dev,
9964 "Send txvlan protocol type command fail, ret =%d\n",
9970 static int hclge_init_vlan_filter(struct hclge_dev *hdev)
9972 struct hclge_vport *vport;
9976 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9977 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9978 HCLGE_FILTER_FE_EGRESS_V1_B,
9981 /* for revision 0x21, vf vlan filter is per function */
9982 for (i = 0; i < hdev->num_alloc_vport; i++) {
9983 vport = &hdev->vport[i];
9984 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9985 HCLGE_FILTER_FE_EGRESS, true,
9989 vport->cur_vlan_fltr_en = true;
9992 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9993 HCLGE_FILTER_FE_INGRESS, true, 0);
9996 static int hclge_init_vlan_type(struct hclge_dev *hdev)
9998 hdev->vlan_type_cfg.rx_in_fst_vlan_type = ETH_P_8021Q;
9999 hdev->vlan_type_cfg.rx_in_sec_vlan_type = ETH_P_8021Q;
10000 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = ETH_P_8021Q;
10001 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = ETH_P_8021Q;
10002 hdev->vlan_type_cfg.tx_ot_vlan_type = ETH_P_8021Q;
10003 hdev->vlan_type_cfg.tx_in_vlan_type = ETH_P_8021Q;
10005 return hclge_set_vlan_protocol_type(hdev);
10008 static int hclge_init_vport_vlan_offload(struct hclge_dev *hdev)
10010 struct hclge_port_base_vlan_config *cfg;
10011 struct hclge_vport *vport;
10015 for (i = 0; i < hdev->num_alloc_vport; i++) {
10016 vport = &hdev->vport[i];
10017 cfg = &vport->port_base_vlan_cfg;
10019 ret = hclge_vlan_offload_cfg(vport, cfg->state,
10020 cfg->vlan_info.vlan_tag,
10021 cfg->vlan_info.qos);
10028 static int hclge_init_vlan_config(struct hclge_dev *hdev)
10030 struct hnae3_handle *handle = &hdev->vport[0].nic;
10033 ret = hclge_init_vlan_filter(hdev);
10037 ret = hclge_init_vlan_type(hdev);
10041 ret = hclge_init_vport_vlan_offload(hdev);
10045 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
10048 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10049 bool writen_to_tbl)
10051 struct hclge_vport_vlan_cfg *vlan, *tmp;
10052 struct hclge_dev *hdev = vport->back;
10054 mutex_lock(&hdev->vport_lock);
10056 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10057 if (vlan->vlan_id == vlan_id) {
10058 mutex_unlock(&hdev->vport_lock);
10063 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
10065 mutex_unlock(&hdev->vport_lock);
10069 vlan->hd_tbl_status = writen_to_tbl;
10070 vlan->vlan_id = vlan_id;
10072 list_add_tail(&vlan->node, &vport->vlan_list);
10073 mutex_unlock(&hdev->vport_lock);
10076 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
10078 struct hclge_vport_vlan_cfg *vlan, *tmp;
10079 struct hclge_dev *hdev = vport->back;
10082 mutex_lock(&hdev->vport_lock);
10084 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10085 if (!vlan->hd_tbl_status) {
10086 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10088 vlan->vlan_id, false);
10090 dev_err(&hdev->pdev->dev,
10091 "restore vport vlan list failed, ret=%d\n",
10094 mutex_unlock(&hdev->vport_lock);
10098 vlan->hd_tbl_status = true;
10101 mutex_unlock(&hdev->vport_lock);
10106 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10109 struct hclge_vport_vlan_cfg *vlan, *tmp;
10110 struct hclge_dev *hdev = vport->back;
10112 mutex_lock(&hdev->vport_lock);
10114 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10115 if (vlan->vlan_id == vlan_id) {
10116 if (is_write_tbl && vlan->hd_tbl_status)
10117 hclge_set_vlan_filter_hw(hdev,
10118 htons(ETH_P_8021Q),
10123 list_del(&vlan->node);
10129 mutex_unlock(&hdev->vport_lock);
10132 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
10134 struct hclge_vport_vlan_cfg *vlan, *tmp;
10135 struct hclge_dev *hdev = vport->back;
10137 mutex_lock(&hdev->vport_lock);
10139 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10140 if (vlan->hd_tbl_status)
10141 hclge_set_vlan_filter_hw(hdev,
10142 htons(ETH_P_8021Q),
10147 vlan->hd_tbl_status = false;
10149 list_del(&vlan->node);
10153 clear_bit(vport->vport_id, hdev->vf_vlan_full);
10154 mutex_unlock(&hdev->vport_lock);
10157 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
10159 struct hclge_vport_vlan_cfg *vlan, *tmp;
10160 struct hclge_vport *vport;
10163 mutex_lock(&hdev->vport_lock);
10165 for (i = 0; i < hdev->num_alloc_vport; i++) {
10166 vport = &hdev->vport[i];
10167 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10168 list_del(&vlan->node);
10173 mutex_unlock(&hdev->vport_lock);
10176 void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev)
10178 struct hclge_vlan_info *vlan_info;
10179 struct hclge_vport *vport;
10186 /* PF should restore all vfs port base vlan */
10187 for (vf_id = 0; vf_id < hdev->num_alloc_vfs; vf_id++) {
10188 vport = &hdev->vport[vf_id + HCLGE_VF_VPORT_START_NUM];
10189 vlan_info = vport->port_base_vlan_cfg.tbl_sta ?
10190 &vport->port_base_vlan_cfg.vlan_info :
10191 &vport->port_base_vlan_cfg.old_vlan_info;
10193 vlan_id = vlan_info->vlan_tag;
10194 vlan_proto = vlan_info->vlan_proto;
10195 state = vport->port_base_vlan_cfg.state;
10197 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
10198 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
10199 ret = hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
10202 vport->port_base_vlan_cfg.tbl_sta = ret == 0;
10207 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
10209 struct hclge_vport_vlan_cfg *vlan, *tmp;
10210 struct hclge_dev *hdev = vport->back;
10213 mutex_lock(&hdev->vport_lock);
10215 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10216 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10217 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10219 vlan->vlan_id, false);
10222 vlan->hd_tbl_status = true;
10226 mutex_unlock(&hdev->vport_lock);
10229 /* For global reset and imp reset, hardware will clear the mac table,
10230 * so we change the mac address state from ACTIVE to TO_ADD, then they
10231 * can be restored in the service task after reset complete. Furtherly,
10232 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
10233 * be restored after reset, so just remove these mac nodes from mac_list.
10235 static void hclge_mac_node_convert_for_reset(struct list_head *list)
10237 struct hclge_mac_node *mac_node, *tmp;
10239 list_for_each_entry_safe(mac_node, tmp, list, node) {
10240 if (mac_node->state == HCLGE_MAC_ACTIVE) {
10241 mac_node->state = HCLGE_MAC_TO_ADD;
10242 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
10243 list_del(&mac_node->node);
10249 void hclge_restore_mac_table_common(struct hclge_vport *vport)
10251 spin_lock_bh(&vport->mac_list_lock);
10253 hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
10254 hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
10255 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
10257 spin_unlock_bh(&vport->mac_list_lock);
10260 static void hclge_restore_hw_table(struct hclge_dev *hdev)
10262 struct hclge_vport *vport = &hdev->vport[0];
10263 struct hnae3_handle *handle = &vport->nic;
10265 hclge_restore_mac_table_common(vport);
10266 hclge_restore_vport_port_base_vlan_config(hdev);
10267 hclge_restore_vport_vlan_table(vport);
10268 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10269 hclge_restore_fd_entries(handle);
10272 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10274 struct hclge_vport *vport = hclge_get_vport(handle);
10276 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10277 vport->rxvlan_cfg.strip_tag1_en = false;
10278 vport->rxvlan_cfg.strip_tag2_en = enable;
10279 vport->rxvlan_cfg.strip_tag2_discard_en = false;
10281 vport->rxvlan_cfg.strip_tag1_en = enable;
10282 vport->rxvlan_cfg.strip_tag2_en = true;
10283 vport->rxvlan_cfg.strip_tag2_discard_en = true;
10286 vport->rxvlan_cfg.strip_tag1_discard_en = false;
10287 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10288 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10289 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10291 return hclge_set_vlan_rx_offload_cfg(vport);
10294 static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport)
10296 struct hclge_dev *hdev = vport->back;
10298 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
10299 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state);
10302 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10303 u16 port_base_vlan_state,
10304 struct hclge_vlan_info *new_info,
10305 struct hclge_vlan_info *old_info)
10307 struct hclge_dev *hdev = vport->back;
10310 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10311 hclge_rm_vport_all_vlan_table(vport, false);
10312 /* force clear VLAN 0 */
10313 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0);
10316 return hclge_set_vlan_filter_hw(hdev,
10317 htons(new_info->vlan_proto),
10319 new_info->vlan_tag,
10323 vport->port_base_vlan_cfg.tbl_sta = false;
10325 /* force add VLAN 0 */
10326 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
10330 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10331 vport->vport_id, old_info->vlan_tag,
10336 return hclge_add_vport_all_vlan_table(vport);
10339 static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
10340 const struct hclge_vlan_info *old_cfg)
10342 if (new_cfg->vlan_tag != old_cfg->vlan_tag)
10345 if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0))
10351 static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport,
10352 struct hclge_vlan_info *new_info,
10353 struct hclge_vlan_info *old_info)
10355 struct hclge_dev *hdev = vport->back;
10358 /* add new VLAN tag */
10359 ret = hclge_set_vlan_filter_hw(hdev, htons(new_info->vlan_proto),
10360 vport->vport_id, new_info->vlan_tag,
10365 vport->port_base_vlan_cfg.tbl_sta = false;
10366 /* remove old VLAN tag */
10367 if (old_info->vlan_tag == 0)
10368 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
10371 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10373 old_info->vlan_tag, true);
10375 dev_err(&hdev->pdev->dev,
10376 "failed to clear vport%u port base vlan %u, ret = %d.\n",
10377 vport->vport_id, old_info->vlan_tag, ret);
10382 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10383 struct hclge_vlan_info *vlan_info)
10385 struct hnae3_handle *nic = &vport->nic;
10386 struct hclge_vlan_info *old_vlan_info;
10389 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10391 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
10396 if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
10399 if (state == HNAE3_PORT_BASE_VLAN_MODIFY)
10400 ret = hclge_modify_port_base_vlan_tag(vport, vlan_info,
10403 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10409 vport->port_base_vlan_cfg.state = state;
10410 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10411 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10413 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10415 vport->port_base_vlan_cfg.old_vlan_info = *old_vlan_info;
10416 vport->port_base_vlan_cfg.vlan_info = *vlan_info;
10417 vport->port_base_vlan_cfg.tbl_sta = true;
10418 hclge_set_vport_vlan_fltr_change(vport);
10423 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10424 enum hnae3_port_base_vlan_state state,
10427 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10429 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10431 return HNAE3_PORT_BASE_VLAN_ENABLE;
10435 return HNAE3_PORT_BASE_VLAN_DISABLE;
10437 if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan &&
10438 vport->port_base_vlan_cfg.vlan_info.qos == qos)
10439 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10441 return HNAE3_PORT_BASE_VLAN_MODIFY;
10444 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10445 u16 vlan, u8 qos, __be16 proto)
10447 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10448 struct hclge_vport *vport = hclge_get_vport(handle);
10449 struct hclge_dev *hdev = vport->back;
10450 struct hclge_vlan_info vlan_info;
10454 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10455 return -EOPNOTSUPP;
10457 vport = hclge_get_vf_vport(hdev, vfid);
10461 /* qos is a 3 bits value, so can not be bigger than 7 */
10462 if (vlan > VLAN_N_VID - 1 || qos > 7)
10464 if (proto != htons(ETH_P_8021Q))
10465 return -EPROTONOSUPPORT;
10467 state = hclge_get_port_base_vlan_state(vport,
10468 vport->port_base_vlan_cfg.state,
10470 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10473 vlan_info.vlan_tag = vlan;
10474 vlan_info.qos = qos;
10475 vlan_info.vlan_proto = ntohs(proto);
10477 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10479 dev_err(&hdev->pdev->dev,
10480 "failed to update port base vlan for vf %d, ret = %d\n",
10485 /* there is a timewindow for PF to know VF unalive, it may
10486 * cause send mailbox fail, but it doesn't matter, VF will
10487 * query it when reinit.
10488 * for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10491 if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
10492 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10493 (void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10498 set_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN,
10499 &vport->need_notify);
10504 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10506 struct hclge_vlan_info *vlan_info;
10507 struct hclge_vport *vport;
10511 /* clear port base vlan for all vf */
10512 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10513 vport = &hdev->vport[vf];
10514 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10516 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10518 vlan_info->vlan_tag, true);
10520 dev_err(&hdev->pdev->dev,
10521 "failed to clear vf vlan for vf%d, ret = %d\n",
10522 vf - HCLGE_VF_VPORT_START_NUM, ret);
10526 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10527 u16 vlan_id, bool is_kill)
10529 struct hclge_vport *vport = hclge_get_vport(handle);
10530 struct hclge_dev *hdev = vport->back;
10531 bool writen_to_tbl = false;
10534 /* When device is resetting or reset failed, firmware is unable to
10535 * handle mailbox. Just record the vlan id, and remove it after
10538 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10539 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10540 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10544 /* when port base vlan enabled, we use port base vlan as the vlan
10545 * filter entry. In this case, we don't update vlan filter table
10546 * when user add new vlan or remove exist vlan, just update the vport
10547 * vlan list. The vlan id in vlan list will be writen in vlan filter
10548 * table until port base vlan disabled
10550 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10551 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10553 writen_to_tbl = true;
10558 hclge_add_vport_vlan_table(vport, vlan_id,
10560 else if (is_kill && vlan_id != 0)
10561 hclge_rm_vport_vlan_table(vport, vlan_id, false);
10562 } else if (is_kill) {
10563 /* when remove hw vlan filter failed, record the vlan id,
10564 * and try to remove it from hw later, to be consistence
10567 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10570 hclge_set_vport_vlan_fltr_change(vport);
10575 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
10577 struct hclge_vport *vport;
10581 for (i = 0; i < hdev->num_alloc_vport; i++) {
10582 vport = &hdev->vport[i];
10583 if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10587 ret = hclge_enable_vport_vlan_filter(vport,
10588 vport->req_vlan_fltr_en);
10590 dev_err(&hdev->pdev->dev,
10591 "failed to sync vlan filter state for vport%u, ret = %d\n",
10592 vport->vport_id, ret);
10593 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10600 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10602 #define HCLGE_MAX_SYNC_COUNT 60
10604 int i, ret, sync_cnt = 0;
10607 /* start from vport 1 for PF is always alive */
10608 for (i = 0; i < hdev->num_alloc_vport; i++) {
10609 struct hclge_vport *vport = &hdev->vport[i];
10611 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10613 while (vlan_id != VLAN_N_VID) {
10614 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10615 vport->vport_id, vlan_id,
10617 if (ret && ret != -EINVAL)
10620 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10621 hclge_rm_vport_vlan_table(vport, vlan_id, false);
10622 hclge_set_vport_vlan_fltr_change(vport);
10625 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
10628 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10633 hclge_sync_vlan_fltr_state(hdev);
10636 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10638 struct hclge_config_max_frm_size_cmd *req;
10639 struct hclge_desc desc;
10641 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10643 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10644 req->max_frm_size = cpu_to_le16(new_mps);
10645 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10647 return hclge_cmd_send(&hdev->hw, &desc, 1);
10650 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10652 struct hclge_vport *vport = hclge_get_vport(handle);
10654 return hclge_set_vport_mtu(vport, new_mtu);
10657 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10659 struct hclge_dev *hdev = vport->back;
10660 int i, max_frm_size, ret;
10662 /* HW supprt 2 layer vlan */
10663 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10664 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10665 max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10668 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10669 mutex_lock(&hdev->vport_lock);
10670 /* VF's mps must fit within hdev->mps */
10671 if (vport->vport_id && max_frm_size > hdev->mps) {
10672 mutex_unlock(&hdev->vport_lock);
10674 } else if (vport->vport_id) {
10675 vport->mps = max_frm_size;
10676 mutex_unlock(&hdev->vport_lock);
10680 /* PF's mps must be greater then VF's mps */
10681 for (i = 1; i < hdev->num_alloc_vport; i++)
10682 if (max_frm_size < hdev->vport[i].mps) {
10683 dev_err(&hdev->pdev->dev,
10684 "failed to set pf mtu for less than vport %d, mps = %u.\n",
10685 i, hdev->vport[i].mps);
10686 mutex_unlock(&hdev->vport_lock);
10690 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10692 ret = hclge_set_mac_mtu(hdev, max_frm_size);
10694 dev_err(&hdev->pdev->dev,
10695 "Change mtu fail, ret =%d\n", ret);
10699 hdev->mps = max_frm_size;
10700 vport->mps = max_frm_size;
10702 ret = hclge_buffer_alloc(hdev);
10704 dev_err(&hdev->pdev->dev,
10705 "Allocate buffer fail, ret =%d\n", ret);
10708 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10709 mutex_unlock(&hdev->vport_lock);
10713 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10716 struct hclge_reset_tqp_queue_cmd *req;
10717 struct hclge_desc desc;
10720 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10722 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10723 req->tqp_id = cpu_to_le16(queue_id);
10725 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10727 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10729 dev_err(&hdev->pdev->dev,
10730 "Send tqp reset cmd error, status =%d\n", ret);
10737 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id,
10740 struct hclge_reset_tqp_queue_cmd *req;
10741 struct hclge_desc desc;
10744 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10746 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10747 req->tqp_id = cpu_to_le16(queue_id);
10749 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10751 dev_err(&hdev->pdev->dev,
10752 "Get reset status error, status =%d\n", ret);
10756 *reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10761 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10763 struct hclge_comm_tqp *tqp;
10764 struct hnae3_queue *queue;
10766 queue = handle->kinfo.tqp[queue_id];
10767 tqp = container_of(queue, struct hclge_comm_tqp, q);
10772 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
10774 struct hclge_vport *vport = hclge_get_vport(handle);
10775 struct hclge_dev *hdev = vport->back;
10776 u16 reset_try_times = 0;
10782 for (i = 0; i < handle->kinfo.num_tqps; i++) {
10783 queue_gid = hclge_covert_handle_qid_global(handle, i);
10784 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10786 dev_err(&hdev->pdev->dev,
10787 "failed to send reset tqp cmd, ret = %d\n",
10792 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10793 ret = hclge_get_reset_status(hdev, queue_gid,
10801 /* Wait for tqp hw reset */
10802 usleep_range(1000, 1200);
10805 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10806 dev_err(&hdev->pdev->dev,
10807 "wait for tqp hw reset timeout\n");
10811 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10813 dev_err(&hdev->pdev->dev,
10814 "failed to deassert soft reset, ret = %d\n",
10818 reset_try_times = 0;
10823 static int hclge_reset_rcb(struct hnae3_handle *handle)
10825 #define HCLGE_RESET_RCB_NOT_SUPPORT 0U
10826 #define HCLGE_RESET_RCB_SUCCESS 1U
10828 struct hclge_vport *vport = hclge_get_vport(handle);
10829 struct hclge_dev *hdev = vport->back;
10830 struct hclge_reset_cmd *req;
10831 struct hclge_desc desc;
10836 queue_gid = hclge_covert_handle_qid_global(handle, 0);
10838 req = (struct hclge_reset_cmd *)desc.data;
10839 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
10840 hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
10841 req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
10842 req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
10844 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10846 dev_err(&hdev->pdev->dev,
10847 "failed to send rcb reset cmd, ret = %d\n", ret);
10851 return_status = req->fun_reset_rcb_return_status;
10852 if (return_status == HCLGE_RESET_RCB_SUCCESS)
10855 if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
10856 dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
10861 /* if reset rcb cmd is unsupported, we need to send reset tqp cmd
10862 * again to reset all tqps
10864 return hclge_reset_tqp_cmd(handle);
10867 int hclge_reset_tqp(struct hnae3_handle *handle)
10869 struct hclge_vport *vport = hclge_get_vport(handle);
10870 struct hclge_dev *hdev = vport->back;
10873 /* only need to disable PF's tqp */
10874 if (!vport->vport_id) {
10875 ret = hclge_tqp_enable(handle, false);
10877 dev_err(&hdev->pdev->dev,
10878 "failed to disable tqp, ret = %d\n", ret);
10883 return hclge_reset_rcb(handle);
10886 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10888 struct hclge_vport *vport = hclge_get_vport(handle);
10889 struct hclge_dev *hdev = vport->back;
10891 return hdev->fw_version;
10894 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10896 struct phy_device *phydev = hdev->hw.mac.phydev;
10901 phy_set_asym_pause(phydev, rx_en, tx_en);
10904 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10908 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10911 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10913 dev_err(&hdev->pdev->dev,
10914 "configure pauseparam error, ret = %d.\n", ret);
10919 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10921 struct phy_device *phydev = hdev->hw.mac.phydev;
10922 u16 remote_advertising = 0;
10923 u16 local_advertising;
10924 u32 rx_pause, tx_pause;
10927 if (!phydev->link || !phydev->autoneg)
10930 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10933 remote_advertising = LPA_PAUSE_CAP;
10935 if (phydev->asym_pause)
10936 remote_advertising |= LPA_PAUSE_ASYM;
10938 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10939 remote_advertising);
10940 tx_pause = flowctl & FLOW_CTRL_TX;
10941 rx_pause = flowctl & FLOW_CTRL_RX;
10943 if (phydev->duplex == HCLGE_MAC_HALF) {
10948 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10951 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10952 u32 *rx_en, u32 *tx_en)
10954 struct hclge_vport *vport = hclge_get_vport(handle);
10955 struct hclge_dev *hdev = vport->back;
10956 u8 media_type = hdev->hw.mac.media_type;
10958 *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
10959 hclge_get_autoneg(handle) : 0;
10961 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10967 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10970 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10973 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10982 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10983 u32 rx_en, u32 tx_en)
10985 if (rx_en && tx_en)
10986 hdev->fc_mode_last_time = HCLGE_FC_FULL;
10987 else if (rx_en && !tx_en)
10988 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10989 else if (!rx_en && tx_en)
10990 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10992 hdev->fc_mode_last_time = HCLGE_FC_NONE;
10994 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10997 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10998 u32 rx_en, u32 tx_en)
11000 struct hclge_vport *vport = hclge_get_vport(handle);
11001 struct hclge_dev *hdev = vport->back;
11002 struct phy_device *phydev = hdev->hw.mac.phydev;
11005 if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
11006 fc_autoneg = hclge_get_autoneg(handle);
11007 if (auto_neg != fc_autoneg) {
11008 dev_info(&hdev->pdev->dev,
11009 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
11010 return -EOPNOTSUPP;
11014 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
11015 dev_info(&hdev->pdev->dev,
11016 "Priority flow control enabled. Cannot set link flow control.\n");
11017 return -EOPNOTSUPP;
11020 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
11022 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
11024 if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
11025 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
11028 return phy_start_aneg(phydev);
11030 return -EOPNOTSUPP;
11033 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
11034 u8 *auto_neg, u32 *speed, u8 *duplex, u32 *lane_num)
11036 struct hclge_vport *vport = hclge_get_vport(handle);
11037 struct hclge_dev *hdev = vport->back;
11040 *speed = hdev->hw.mac.speed;
11042 *duplex = hdev->hw.mac.duplex;
11044 *auto_neg = hdev->hw.mac.autoneg;
11046 *lane_num = hdev->hw.mac.lane_num;
11049 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
11052 struct hclge_vport *vport = hclge_get_vport(handle);
11053 struct hclge_dev *hdev = vport->back;
11055 /* When nic is down, the service task is not running, doesn't update
11056 * the port information per second. Query the port information before
11057 * return the media type, ensure getting the correct media information.
11059 hclge_update_port_info(hdev);
11062 *media_type = hdev->hw.mac.media_type;
11065 *module_type = hdev->hw.mac.module_type;
11068 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
11069 u8 *tp_mdix_ctrl, u8 *tp_mdix)
11071 struct hclge_vport *vport = hclge_get_vport(handle);
11072 struct hclge_dev *hdev = vport->back;
11073 struct phy_device *phydev = hdev->hw.mac.phydev;
11074 int mdix_ctrl, mdix, is_resolved;
11075 unsigned int retval;
11078 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11079 *tp_mdix = ETH_TP_MDI_INVALID;
11083 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
11085 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
11086 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
11087 HCLGE_PHY_MDIX_CTRL_S);
11089 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
11090 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
11091 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
11093 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
11095 switch (mdix_ctrl) {
11097 *tp_mdix_ctrl = ETH_TP_MDI;
11100 *tp_mdix_ctrl = ETH_TP_MDI_X;
11103 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
11106 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11111 *tp_mdix = ETH_TP_MDI_INVALID;
11113 *tp_mdix = ETH_TP_MDI_X;
11115 *tp_mdix = ETH_TP_MDI;
11118 static void hclge_info_show(struct hclge_dev *hdev)
11120 struct device *dev = &hdev->pdev->dev;
11122 dev_info(dev, "PF info begin:\n");
11124 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
11125 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
11126 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
11127 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
11128 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
11129 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
11130 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
11131 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
11132 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
11133 dev_info(dev, "This is %s PF\n",
11134 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
11135 dev_info(dev, "DCB %s\n",
11136 hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
11137 dev_info(dev, "MQPRIO %s\n",
11138 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
11139 dev_info(dev, "Default tx spare buffer size: %u\n",
11140 hdev->tx_spare_buf_size);
11142 dev_info(dev, "PF info end.\n");
11145 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
11146 struct hclge_vport *vport)
11148 struct hnae3_client *client = vport->nic.client;
11149 struct hclge_dev *hdev = ae_dev->priv;
11150 int rst_cnt = hdev->rst_stats.reset_cnt;
11153 ret = client->ops->init_instance(&vport->nic);
11157 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11158 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11159 rst_cnt != hdev->rst_stats.reset_cnt) {
11164 /* Enable nic hw error interrupts */
11165 ret = hclge_config_nic_hw_error(hdev, true);
11167 dev_err(&ae_dev->pdev->dev,
11168 "fail(%d) to enable hw error interrupts\n", ret);
11172 hnae3_set_client_init_flag(client, ae_dev, 1);
11174 if (netif_msg_drv(&hdev->vport->nic))
11175 hclge_info_show(hdev);
11180 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11181 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11182 msleep(HCLGE_WAIT_RESET_DONE);
11184 client->ops->uninit_instance(&vport->nic, 0);
11189 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
11190 struct hclge_vport *vport)
11192 struct hclge_dev *hdev = ae_dev->priv;
11193 struct hnae3_client *client;
11197 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
11201 client = hdev->roce_client;
11202 ret = hclge_init_roce_base_info(vport);
11206 rst_cnt = hdev->rst_stats.reset_cnt;
11207 ret = client->ops->init_instance(&vport->roce);
11211 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11212 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11213 rst_cnt != hdev->rst_stats.reset_cnt) {
11215 goto init_roce_err;
11218 /* Enable roce ras interrupts */
11219 ret = hclge_config_rocee_ras_interrupt(hdev, true);
11221 dev_err(&ae_dev->pdev->dev,
11222 "fail(%d) to enable roce ras interrupts\n", ret);
11223 goto init_roce_err;
11226 hnae3_set_client_init_flag(client, ae_dev, 1);
11231 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11232 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11233 msleep(HCLGE_WAIT_RESET_DONE);
11235 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11240 static int hclge_init_client_instance(struct hnae3_client *client,
11241 struct hnae3_ae_dev *ae_dev)
11243 struct hclge_dev *hdev = ae_dev->priv;
11244 struct hclge_vport *vport = &hdev->vport[0];
11247 switch (client->type) {
11248 case HNAE3_CLIENT_KNIC:
11249 hdev->nic_client = client;
11250 vport->nic.client = client;
11251 ret = hclge_init_nic_client_instance(ae_dev, vport);
11255 ret = hclge_init_roce_client_instance(ae_dev, vport);
11260 case HNAE3_CLIENT_ROCE:
11261 if (hnae3_dev_roce_supported(hdev)) {
11262 hdev->roce_client = client;
11263 vport->roce.client = client;
11266 ret = hclge_init_roce_client_instance(ae_dev, vport);
11278 hdev->nic_client = NULL;
11279 vport->nic.client = NULL;
11282 hdev->roce_client = NULL;
11283 vport->roce.client = NULL;
11287 static void hclge_uninit_client_instance(struct hnae3_client *client,
11288 struct hnae3_ae_dev *ae_dev)
11290 struct hclge_dev *hdev = ae_dev->priv;
11291 struct hclge_vport *vport = &hdev->vport[0];
11293 if (hdev->roce_client) {
11294 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11295 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11296 msleep(HCLGE_WAIT_RESET_DONE);
11298 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11299 hdev->roce_client = NULL;
11300 vport->roce.client = NULL;
11302 if (client->type == HNAE3_CLIENT_ROCE)
11304 if (hdev->nic_client && client->ops->uninit_instance) {
11305 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11306 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11307 msleep(HCLGE_WAIT_RESET_DONE);
11309 client->ops->uninit_instance(&vport->nic, 0);
11310 hdev->nic_client = NULL;
11311 vport->nic.client = NULL;
11315 static int hclge_dev_mem_map(struct hclge_dev *hdev)
11317 struct pci_dev *pdev = hdev->pdev;
11318 struct hclge_hw *hw = &hdev->hw;
11320 /* for device does not have device memory, return directly */
11321 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
11325 devm_ioremap_wc(&pdev->dev,
11326 pci_resource_start(pdev, HCLGE_MEM_BAR),
11327 pci_resource_len(pdev, HCLGE_MEM_BAR));
11328 if (!hw->hw.mem_base) {
11329 dev_err(&pdev->dev, "failed to map device memory\n");
11336 static int hclge_pci_init(struct hclge_dev *hdev)
11338 struct pci_dev *pdev = hdev->pdev;
11339 struct hclge_hw *hw;
11342 ret = pci_enable_device(pdev);
11344 dev_err(&pdev->dev, "failed to enable PCI device\n");
11348 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11350 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11352 dev_err(&pdev->dev,
11353 "can't set consistent PCI DMA");
11354 goto err_disable_device;
11356 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
11359 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
11361 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
11362 goto err_disable_device;
11365 pci_set_master(pdev);
11367 hw->hw.io_base = pcim_iomap(pdev, 2, 0);
11368 if (!hw->hw.io_base) {
11369 dev_err(&pdev->dev, "Can't map configuration register space\n");
11371 goto err_release_regions;
11374 ret = hclge_dev_mem_map(hdev);
11376 goto err_unmap_io_base;
11378 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11383 pcim_iounmap(pdev, hdev->hw.hw.io_base);
11384 err_release_regions:
11385 pci_release_regions(pdev);
11386 err_disable_device:
11387 pci_disable_device(pdev);
11392 static void hclge_pci_uninit(struct hclge_dev *hdev)
11394 struct pci_dev *pdev = hdev->pdev;
11396 if (hdev->hw.hw.mem_base)
11397 devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base);
11399 pcim_iounmap(pdev, hdev->hw.hw.io_base);
11400 pci_free_irq_vectors(pdev);
11401 pci_release_mem_regions(pdev);
11402 pci_disable_device(pdev);
11405 static void hclge_state_init(struct hclge_dev *hdev)
11407 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11408 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11409 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11410 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11411 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11412 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11413 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11416 static void hclge_state_uninit(struct hclge_dev *hdev)
11418 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11419 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11421 if (hdev->reset_timer.function)
11422 del_timer_sync(&hdev->reset_timer);
11423 if (hdev->service_task.work.func)
11424 cancel_delayed_work_sync(&hdev->service_task);
11427 static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
11428 enum hnae3_reset_type rst_type)
11430 #define HCLGE_RESET_RETRY_WAIT_MS 500
11431 #define HCLGE_RESET_RETRY_CNT 5
11433 struct hclge_dev *hdev = ae_dev->priv;
11437 while (retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
11438 down(&hdev->reset_sem);
11439 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11440 hdev->reset_type = rst_type;
11441 ret = hclge_reset_prepare(hdev);
11442 if (!ret && !hdev->reset_pending)
11445 dev_err(&hdev->pdev->dev,
11446 "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n",
11447 ret, hdev->reset_pending, retry_cnt);
11448 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11449 up(&hdev->reset_sem);
11450 msleep(HCLGE_RESET_RETRY_WAIT_MS);
11453 /* disable misc vector before reset done */
11454 hclge_enable_vector(&hdev->misc_vector, false);
11455 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
11457 if (hdev->reset_type == HNAE3_FLR_RESET)
11458 hdev->rst_stats.flr_rst_cnt++;
11461 static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
11463 struct hclge_dev *hdev = ae_dev->priv;
11466 hclge_enable_vector(&hdev->misc_vector, true);
11468 ret = hclge_reset_rebuild(hdev);
11470 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11472 hdev->reset_type = HNAE3_NONE_RESET;
11473 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11474 up(&hdev->reset_sem);
11477 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11481 for (i = 0; i < hdev->num_alloc_vport; i++) {
11482 struct hclge_vport *vport = &hdev->vport[i];
11485 /* Send cmd to clear vport's FUNC_RST_ING */
11486 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11488 dev_warn(&hdev->pdev->dev,
11489 "clear vport(%u) rst failed %d!\n",
11490 vport->vport_id, ret);
11494 static int hclge_clear_hw_resource(struct hclge_dev *hdev)
11496 struct hclge_desc desc;
11499 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
11501 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11502 /* This new command is only supported by new firmware, it will
11503 * fail with older firmware. Error value -EOPNOSUPP can only be
11504 * returned by older firmware running this command, to keep code
11505 * backward compatible we will override this value and return
11508 if (ret && ret != -EOPNOTSUPP) {
11509 dev_err(&hdev->pdev->dev,
11510 "failed to clear hw resource, ret = %d\n", ret);
11516 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
11518 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11519 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
11522 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
11524 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11525 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
11528 static struct hclge_wol_info *hclge_get_wol_info(struct hnae3_handle *handle)
11530 struct hclge_vport *vport = hclge_get_vport(handle);
11532 return &vport->back->hw.mac.wol;
11535 static int hclge_get_wol_supported_mode(struct hclge_dev *hdev,
11536 u32 *wol_supported)
11538 struct hclge_query_wol_supported_cmd *wol_supported_cmd;
11539 struct hclge_desc desc;
11542 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_WOL_GET_SUPPORTED_MODE,
11544 wol_supported_cmd = (struct hclge_query_wol_supported_cmd *)desc.data;
11546 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11548 dev_err(&hdev->pdev->dev,
11549 "failed to query wol supported, ret = %d\n", ret);
11553 *wol_supported = le32_to_cpu(wol_supported_cmd->supported_wake_mode);
11558 static int hclge_set_wol_cfg(struct hclge_dev *hdev,
11559 struct hclge_wol_info *wol_info)
11561 struct hclge_wol_cfg_cmd *wol_cfg_cmd;
11562 struct hclge_desc desc;
11565 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_WOL_CFG, false);
11566 wol_cfg_cmd = (struct hclge_wol_cfg_cmd *)desc.data;
11567 wol_cfg_cmd->wake_on_lan_mode = cpu_to_le32(wol_info->wol_current_mode);
11568 wol_cfg_cmd->sopass_size = wol_info->wol_sopass_size;
11569 memcpy(wol_cfg_cmd->sopass, wol_info->wol_sopass, SOPASS_MAX);
11571 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11573 dev_err(&hdev->pdev->dev,
11574 "failed to set wol config, ret = %d\n", ret);
11579 static int hclge_update_wol(struct hclge_dev *hdev)
11581 struct hclge_wol_info *wol_info = &hdev->hw.mac.wol;
11583 if (!hnae3_ae_dev_wol_supported(hdev->ae_dev))
11586 return hclge_set_wol_cfg(hdev, wol_info);
11589 static int hclge_init_wol(struct hclge_dev *hdev)
11591 struct hclge_wol_info *wol_info = &hdev->hw.mac.wol;
11594 if (!hnae3_ae_dev_wol_supported(hdev->ae_dev))
11597 memset(wol_info, 0, sizeof(struct hclge_wol_info));
11598 ret = hclge_get_wol_supported_mode(hdev,
11599 &wol_info->wol_support_mode);
11601 wol_info->wol_support_mode = 0;
11605 return hclge_update_wol(hdev);
11608 static void hclge_get_wol(struct hnae3_handle *handle,
11609 struct ethtool_wolinfo *wol)
11611 struct hclge_wol_info *wol_info = hclge_get_wol_info(handle);
11613 wol->supported = wol_info->wol_support_mode;
11614 wol->wolopts = wol_info->wol_current_mode;
11615 if (wol_info->wol_current_mode & WAKE_MAGICSECURE)
11616 memcpy(wol->sopass, wol_info->wol_sopass, SOPASS_MAX);
11619 static int hclge_set_wol(struct hnae3_handle *handle,
11620 struct ethtool_wolinfo *wol)
11622 struct hclge_wol_info *wol_info = hclge_get_wol_info(handle);
11623 struct hclge_vport *vport = hclge_get_vport(handle);
11627 wol_mode = wol->wolopts;
11628 if (wol_mode & ~wol_info->wol_support_mode)
11631 wol_info->wol_current_mode = wol_mode;
11632 if (wol_mode & WAKE_MAGICSECURE) {
11633 memcpy(wol_info->wol_sopass, wol->sopass, SOPASS_MAX);
11634 wol_info->wol_sopass_size = SOPASS_MAX;
11636 wol_info->wol_sopass_size = 0;
11639 ret = hclge_set_wol_cfg(vport->back, wol_info);
11641 wol_info->wol_current_mode = 0;
11646 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11648 struct pci_dev *pdev = ae_dev->pdev;
11649 struct hclge_dev *hdev;
11652 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11657 hdev->ae_dev = ae_dev;
11658 hdev->reset_type = HNAE3_NONE_RESET;
11659 hdev->reset_level = HNAE3_FUNC_RESET;
11660 ae_dev->priv = hdev;
11662 /* HW supprt 2 layer vlan */
11663 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11665 mutex_init(&hdev->vport_lock);
11666 spin_lock_init(&hdev->fd_rule_lock);
11667 sema_init(&hdev->reset_sem, 1);
11669 ret = hclge_pci_init(hdev);
11673 ret = hclge_devlink_init(hdev);
11675 goto err_pci_uninit;
11677 /* Firmware command queue initialize */
11678 ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
11680 goto err_devlink_uninit;
11682 /* Firmware command initialize */
11683 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version,
11684 true, hdev->reset_pending);
11686 goto err_cmd_uninit;
11688 ret = hclge_clear_hw_resource(hdev);
11690 goto err_cmd_uninit;
11692 ret = hclge_get_cap(hdev);
11694 goto err_cmd_uninit;
11696 ret = hclge_query_dev_specs(hdev);
11698 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11700 goto err_cmd_uninit;
11703 ret = hclge_configure(hdev);
11705 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11706 goto err_cmd_uninit;
11709 ret = hclge_init_msi(hdev);
11711 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11712 goto err_cmd_uninit;
11715 ret = hclge_misc_irq_init(hdev);
11717 goto err_msi_uninit;
11719 ret = hclge_alloc_tqps(hdev);
11721 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11722 goto err_msi_irq_uninit;
11725 ret = hclge_alloc_vport(hdev);
11727 goto err_msi_irq_uninit;
11729 ret = hclge_map_tqp(hdev);
11731 goto err_msi_irq_uninit;
11733 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
11734 if (hnae3_dev_phy_imp_supported(hdev))
11735 ret = hclge_update_tp_port_info(hdev);
11737 ret = hclge_mac_mdio_config(hdev);
11740 goto err_msi_irq_uninit;
11743 ret = hclge_init_umv_space(hdev);
11745 goto err_mdiobus_unreg;
11747 ret = hclge_mac_init(hdev);
11749 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11750 goto err_mdiobus_unreg;
11753 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11755 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11756 goto err_mdiobus_unreg;
11759 ret = hclge_config_gro(hdev);
11761 goto err_mdiobus_unreg;
11763 ret = hclge_init_vlan_config(hdev);
11765 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11766 goto err_mdiobus_unreg;
11769 ret = hclge_tm_schd_init(hdev);
11771 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11772 goto err_mdiobus_unreg;
11775 ret = hclge_comm_rss_init_cfg(&hdev->vport->nic, hdev->ae_dev,
11778 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11779 goto err_mdiobus_unreg;
11782 ret = hclge_rss_init_hw(hdev);
11784 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11785 goto err_mdiobus_unreg;
11788 ret = init_mgr_tbl(hdev);
11790 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11791 goto err_mdiobus_unreg;
11794 ret = hclge_init_fd_config(hdev);
11796 dev_err(&pdev->dev,
11797 "fd table init fail, ret=%d\n", ret);
11798 goto err_mdiobus_unreg;
11801 ret = hclge_ptp_init(hdev);
11803 goto err_mdiobus_unreg;
11805 ret = hclge_update_port_info(hdev);
11807 goto err_mdiobus_unreg;
11809 INIT_KFIFO(hdev->mac_tnl_log);
11811 hclge_dcb_ops_set(hdev);
11813 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11814 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11816 hclge_clear_all_event_cause(hdev);
11817 hclge_clear_resetting_state(hdev);
11819 /* Log and clear the hw errors those already occurred */
11820 if (hnae3_dev_ras_imp_supported(hdev))
11821 hclge_handle_occurred_error(hdev);
11823 hclge_handle_all_hns_hw_errors(ae_dev);
11825 /* request delayed reset for the error recovery because an immediate
11826 * global reset on a PF affecting pending initialization of other PFs
11828 if (ae_dev->hw_err_reset_req) {
11829 enum hnae3_reset_type reset_level;
11831 reset_level = hclge_get_reset_level(ae_dev,
11832 &ae_dev->hw_err_reset_req);
11833 hclge_set_def_reset_request(ae_dev, reset_level);
11834 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11837 hclge_init_rxd_adv_layout(hdev);
11839 /* Enable MISC vector(vector0) */
11840 hclge_enable_vector(&hdev->misc_vector, true);
11842 ret = hclge_init_wol(hdev);
11844 dev_warn(&pdev->dev,
11845 "failed to wake on lan init, ret = %d\n", ret);
11847 hclge_state_init(hdev);
11848 hdev->last_reset_time = jiffies;
11850 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11851 HCLGE_DRIVER_NAME);
11853 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11858 if (hdev->hw.mac.phydev)
11859 mdiobus_unregister(hdev->hw.mac.mdio_bus);
11860 err_msi_irq_uninit:
11861 hclge_misc_irq_uninit(hdev);
11863 pci_free_irq_vectors(pdev);
11865 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
11866 err_devlink_uninit:
11867 hclge_devlink_uninit(hdev);
11869 pcim_iounmap(pdev, hdev->hw.hw.io_base);
11870 pci_release_regions(pdev);
11871 pci_disable_device(pdev);
11873 mutex_destroy(&hdev->vport_lock);
11877 static void hclge_stats_clear(struct hclge_dev *hdev)
11879 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11880 memset(&hdev->fec_stats, 0, sizeof(hdev->fec_stats));
11883 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11885 return hclge_config_switch_param(hdev, vf, enable,
11886 HCLGE_SWITCH_ANTI_SPOOF_MASK);
11889 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11891 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11892 HCLGE_FILTER_FE_NIC_INGRESS_B,
11896 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11900 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11902 dev_err(&hdev->pdev->dev,
11903 "Set vf %d mac spoof check %s failed, ret=%d\n",
11904 vf, enable ? "on" : "off", ret);
11908 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11910 dev_err(&hdev->pdev->dev,
11911 "Set vf %d vlan spoof check %s failed, ret=%d\n",
11912 vf, enable ? "on" : "off", ret);
11917 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11920 struct hclge_vport *vport = hclge_get_vport(handle);
11921 struct hclge_dev *hdev = vport->back;
11922 u32 new_spoofchk = enable ? 1 : 0;
11925 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11926 return -EOPNOTSUPP;
11928 vport = hclge_get_vf_vport(hdev, vf);
11932 if (vport->vf_info.spoofchk == new_spoofchk)
11935 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11936 dev_warn(&hdev->pdev->dev,
11937 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11939 else if (enable && hclge_is_umv_space_full(vport, true))
11940 dev_warn(&hdev->pdev->dev,
11941 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11944 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11948 vport->vf_info.spoofchk = new_spoofchk;
11952 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11954 struct hclge_vport *vport = hdev->vport;
11958 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11961 /* resume the vf spoof check state after reset */
11962 for (i = 0; i < hdev->num_alloc_vport; i++) {
11963 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11964 vport->vf_info.spoofchk);
11974 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11976 struct hclge_vport *vport = hclge_get_vport(handle);
11977 struct hclge_dev *hdev = vport->back;
11978 u32 new_trusted = enable ? 1 : 0;
11980 vport = hclge_get_vf_vport(hdev, vf);
11984 if (vport->vf_info.trusted == new_trusted)
11987 vport->vf_info.trusted = new_trusted;
11988 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
11989 hclge_task_schedule(hdev, 0);
11994 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11999 /* reset vf rate to default value */
12000 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
12001 struct hclge_vport *vport = &hdev->vport[vf];
12003 vport->vf_info.max_tx_rate = 0;
12004 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
12006 dev_err(&hdev->pdev->dev,
12007 "vf%d failed to reset to default, ret=%d\n",
12008 vf - HCLGE_VF_VPORT_START_NUM, ret);
12012 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
12013 int min_tx_rate, int max_tx_rate)
12015 if (min_tx_rate != 0 ||
12016 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
12017 dev_err(&hdev->pdev->dev,
12018 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
12019 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
12026 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
12027 int min_tx_rate, int max_tx_rate, bool force)
12029 struct hclge_vport *vport = hclge_get_vport(handle);
12030 struct hclge_dev *hdev = vport->back;
12033 ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
12037 vport = hclge_get_vf_vport(hdev, vf);
12041 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
12044 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
12048 vport->vf_info.max_tx_rate = max_tx_rate;
12053 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
12055 struct hnae3_handle *handle = &hdev->vport->nic;
12056 struct hclge_vport *vport;
12060 /* resume the vf max_tx_rate after reset */
12061 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
12062 vport = hclge_get_vf_vport(hdev, vf);
12066 /* zero means max rate, after reset, firmware already set it to
12067 * max rate, so just continue.
12069 if (!vport->vf_info.max_tx_rate)
12072 ret = hclge_set_vf_rate(handle, vf, 0,
12073 vport->vf_info.max_tx_rate, true);
12075 dev_err(&hdev->pdev->dev,
12076 "vf%d failed to resume tx_rate:%u, ret=%d\n",
12077 vf, vport->vf_info.max_tx_rate, ret);
12085 static void hclge_reset_vport_state(struct hclge_dev *hdev)
12087 struct hclge_vport *vport = hdev->vport;
12090 for (i = 0; i < hdev->num_alloc_vport; i++) {
12091 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
12096 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
12098 struct hclge_dev *hdev = ae_dev->priv;
12099 struct pci_dev *pdev = ae_dev->pdev;
12102 set_bit(HCLGE_STATE_DOWN, &hdev->state);
12104 hclge_stats_clear(hdev);
12105 /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
12106 * so here should not clean table in memory.
12108 if (hdev->reset_type == HNAE3_IMP_RESET ||
12109 hdev->reset_type == HNAE3_GLOBAL_RESET) {
12110 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
12111 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
12112 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
12113 hclge_reset_umv_space(hdev);
12116 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version,
12117 true, hdev->reset_pending);
12119 dev_err(&pdev->dev, "Cmd queue init failed\n");
12123 ret = hclge_map_tqp(hdev);
12125 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
12129 ret = hclge_mac_init(hdev);
12131 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
12135 ret = hclge_tp_port_init(hdev);
12137 dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
12142 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
12144 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
12148 ret = hclge_config_gro(hdev);
12152 ret = hclge_init_vlan_config(hdev);
12154 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
12158 ret = hclge_tm_init_hw(hdev, true);
12160 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
12164 ret = hclge_rss_init_hw(hdev);
12166 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
12170 ret = init_mgr_tbl(hdev);
12172 dev_err(&pdev->dev,
12173 "failed to reinit manager table, ret = %d\n", ret);
12177 ret = hclge_init_fd_config(hdev);
12179 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
12183 ret = hclge_ptp_init(hdev);
12187 /* Log and clear the hw errors those already occurred */
12188 if (hnae3_dev_ras_imp_supported(hdev))
12189 hclge_handle_occurred_error(hdev);
12191 hclge_handle_all_hns_hw_errors(ae_dev);
12193 /* Re-enable the hw error interrupts because
12194 * the interrupts get disabled on global reset.
12196 ret = hclge_config_nic_hw_error(hdev, true);
12198 dev_err(&pdev->dev,
12199 "fail(%d) to re-enable NIC hw error interrupts\n",
12204 if (hdev->roce_client) {
12205 ret = hclge_config_rocee_ras_interrupt(hdev, true);
12207 dev_err(&pdev->dev,
12208 "fail(%d) to re-enable roce ras interrupts\n",
12214 hclge_reset_vport_state(hdev);
12215 ret = hclge_reset_vport_spoofchk(hdev);
12219 ret = hclge_resume_vf_rate(hdev);
12223 hclge_init_rxd_adv_layout(hdev);
12225 ret = hclge_update_wol(hdev);
12227 dev_warn(&pdev->dev,
12228 "failed to update wol config, ret = %d\n", ret);
12230 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
12231 HCLGE_DRIVER_NAME);
12236 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
12238 struct hclge_dev *hdev = ae_dev->priv;
12239 struct hclge_mac *mac = &hdev->hw.mac;
12241 hclge_reset_vf_rate(hdev);
12242 hclge_clear_vf_vlan(hdev);
12243 hclge_state_uninit(hdev);
12244 hclge_ptp_uninit(hdev);
12245 hclge_uninit_rxd_adv_layout(hdev);
12246 hclge_uninit_mac_table(hdev);
12247 hclge_del_all_fd_entries(hdev);
12250 mdiobus_unregister(mac->mdio_bus);
12252 /* Disable MISC vector(vector0) */
12253 hclge_enable_vector(&hdev->misc_vector, false);
12254 synchronize_irq(hdev->misc_vector.vector_irq);
12256 /* Disable all hw interrupts */
12257 hclge_config_mac_tnl_int(hdev, false);
12258 hclge_config_nic_hw_error(hdev, false);
12259 hclge_config_rocee_ras_interrupt(hdev, false);
12261 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
12262 hclge_misc_irq_uninit(hdev);
12263 hclge_devlink_uninit(hdev);
12264 hclge_pci_uninit(hdev);
12265 hclge_uninit_vport_vlan_table(hdev);
12266 mutex_destroy(&hdev->vport_lock);
12267 ae_dev->priv = NULL;
12270 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
12272 struct hclge_vport *vport = hclge_get_vport(handle);
12273 struct hclge_dev *hdev = vport->back;
12275 return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
12278 static void hclge_get_channels(struct hnae3_handle *handle,
12279 struct ethtool_channels *ch)
12281 ch->max_combined = hclge_get_max_channels(handle);
12282 ch->other_count = 1;
12284 ch->combined_count = handle->kinfo.rss_size;
12287 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
12288 u16 *alloc_tqps, u16 *max_rss_size)
12290 struct hclge_vport *vport = hclge_get_vport(handle);
12291 struct hclge_dev *hdev = vport->back;
12293 *alloc_tqps = vport->alloc_tqps;
12294 *max_rss_size = hdev->pf_rss_size_max;
12297 static int hclge_set_rss_tc_mode_cfg(struct hnae3_handle *handle)
12299 struct hclge_vport *vport = hclge_get_vport(handle);
12300 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
12301 struct hclge_dev *hdev = vport->back;
12302 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
12303 u16 tc_valid[HCLGE_MAX_TC_NUM];
12307 roundup_size = roundup_pow_of_two(vport->nic.kinfo.rss_size);
12308 roundup_size = ilog2(roundup_size);
12309 /* Set the RSS TC mode according to the new RSS size */
12310 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
12313 if (!(hdev->hw_tc_map & BIT(i)))
12317 tc_size[i] = roundup_size;
12318 tc_offset[i] = vport->nic.kinfo.rss_size * i;
12321 return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid,
12325 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
12326 bool rxfh_configured)
12328 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
12329 struct hclge_vport *vport = hclge_get_vport(handle);
12330 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
12331 struct hclge_dev *hdev = vport->back;
12332 u16 cur_rss_size = kinfo->rss_size;
12333 u16 cur_tqps = kinfo->num_tqps;
12338 kinfo->req_rss_size = new_tqps_num;
12340 ret = hclge_tm_vport_map_update(hdev);
12342 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
12346 ret = hclge_set_rss_tc_mode_cfg(handle);
12350 /* RSS indirection table has been configured by user */
12351 if (rxfh_configured)
12354 /* Reinitializes the rss indirect table according to the new RSS size */
12355 rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
12360 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
12361 rss_indir[i] = i % kinfo->rss_size;
12363 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
12365 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
12372 dev_info(&hdev->pdev->dev,
12373 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
12374 cur_rss_size, kinfo->rss_size,
12375 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
12380 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
12381 u32 *regs_num_64_bit)
12383 struct hclge_desc desc;
12387 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
12388 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12390 dev_err(&hdev->pdev->dev,
12391 "Query register number cmd failed, ret = %d.\n", ret);
12395 *regs_num_32_bit = le32_to_cpu(desc.data[0]);
12396 *regs_num_64_bit = le32_to_cpu(desc.data[1]);
12398 total_num = *regs_num_32_bit + *regs_num_64_bit;
12405 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12408 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
12409 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
12411 struct hclge_desc *desc;
12412 u32 *reg_val = data;
12422 nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
12423 cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
12424 HCLGE_32_BIT_REG_RTN_DATANUM);
12425 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12429 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
12430 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12432 dev_err(&hdev->pdev->dev,
12433 "Query 32 bit register cmd failed, ret = %d.\n", ret);
12438 for (i = 0; i < cmd_num; i++) {
12440 desc_data = (__le32 *)(&desc[i].data[0]);
12441 n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
12443 desc_data = (__le32 *)(&desc[i]);
12444 n = HCLGE_32_BIT_REG_RTN_DATANUM;
12446 for (k = 0; k < n; k++) {
12447 *reg_val++ = le32_to_cpu(*desc_data++);
12459 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12462 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
12463 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
12465 struct hclge_desc *desc;
12466 u64 *reg_val = data;
12476 nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
12477 cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
12478 HCLGE_64_BIT_REG_RTN_DATANUM);
12479 desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12483 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
12484 ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12486 dev_err(&hdev->pdev->dev,
12487 "Query 64 bit register cmd failed, ret = %d.\n", ret);
12492 for (i = 0; i < cmd_num; i++) {
12494 desc_data = (__le64 *)(&desc[i].data[0]);
12495 n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
12497 desc_data = (__le64 *)(&desc[i]);
12498 n = HCLGE_64_BIT_REG_RTN_DATANUM;
12500 for (k = 0; k < n; k++) {
12501 *reg_val++ = le64_to_cpu(*desc_data++);
12513 #define MAX_SEPARATE_NUM 4
12514 #define SEPARATOR_VALUE 0xFDFCFBFA
12515 #define REG_NUM_PER_LINE 4
12516 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
12517 #define REG_SEPARATOR_LINE 1
12518 #define REG_NUM_REMAIN_MASK 3
12520 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
12524 /* initialize command BD except the last one */
12525 for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
12526 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
12528 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
12531 /* initialize the last command BD */
12532 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
12534 return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
12537 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
12541 u32 entries_per_desc, desc_index, index, offset, i;
12542 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
12545 ret = hclge_query_bd_num_cmd_send(hdev, desc);
12547 dev_err(&hdev->pdev->dev,
12548 "Get dfx bd num fail, status is %d.\n", ret);
12552 entries_per_desc = ARRAY_SIZE(desc[0].data);
12553 for (i = 0; i < type_num; i++) {
12554 offset = hclge_dfx_bd_offset_list[i];
12555 index = offset % entries_per_desc;
12556 desc_index = offset / entries_per_desc;
12557 bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
12563 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
12564 struct hclge_desc *desc_src, int bd_num,
12565 enum hclge_opcode_type cmd)
12567 struct hclge_desc *desc = desc_src;
12570 hclge_cmd_setup_basic_desc(desc, cmd, true);
12571 for (i = 0; i < bd_num - 1; i++) {
12572 desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
12574 hclge_cmd_setup_basic_desc(desc, cmd, true);
12578 ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
12580 dev_err(&hdev->pdev->dev,
12581 "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
12587 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
12590 int entries_per_desc, reg_num, separator_num, desc_index, index, i;
12591 struct hclge_desc *desc = desc_src;
12594 entries_per_desc = ARRAY_SIZE(desc->data);
12595 reg_num = entries_per_desc * bd_num;
12596 separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
12597 for (i = 0; i < reg_num; i++) {
12598 index = i % entries_per_desc;
12599 desc_index = i / entries_per_desc;
12600 *reg++ = le32_to_cpu(desc[desc_index].data[index]);
12602 for (i = 0; i < separator_num; i++)
12603 *reg++ = SEPARATOR_VALUE;
12605 return reg_num + separator_num;
12608 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
12610 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12611 int data_len_per_desc, bd_num, i;
12616 bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12620 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12622 dev_err(&hdev->pdev->dev,
12623 "Get dfx reg bd num fail, status is %d.\n", ret);
12627 data_len_per_desc = sizeof_field(struct hclge_desc, data);
12629 for (i = 0; i < dfx_reg_type_num; i++) {
12630 bd_num = bd_num_list[i];
12631 data_len = data_len_per_desc * bd_num;
12632 *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
12636 kfree(bd_num_list);
12640 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
12642 u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12643 int bd_num, bd_num_max, buf_len, i;
12644 struct hclge_desc *desc_src;
12649 bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12653 ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12655 dev_err(&hdev->pdev->dev,
12656 "Get dfx reg bd num fail, status is %d.\n", ret);
12660 bd_num_max = bd_num_list[0];
12661 for (i = 1; i < dfx_reg_type_num; i++)
12662 bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
12664 buf_len = sizeof(*desc_src) * bd_num_max;
12665 desc_src = kzalloc(buf_len, GFP_KERNEL);
12671 for (i = 0; i < dfx_reg_type_num; i++) {
12672 bd_num = bd_num_list[i];
12673 ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
12674 hclge_dfx_reg_opcode_list[i]);
12676 dev_err(&hdev->pdev->dev,
12677 "Get dfx reg fail, status is %d.\n", ret);
12681 reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
12686 kfree(bd_num_list);
12690 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
12691 struct hnae3_knic_private_info *kinfo)
12693 #define HCLGE_RING_REG_OFFSET 0x200
12694 #define HCLGE_RING_INT_REG_OFFSET 0x4
12696 int i, j, reg_num, separator_num;
12700 /* fetching per-PF registers valus from PF PCIe register space */
12701 reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
12702 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12703 for (i = 0; i < reg_num; i++)
12704 *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
12705 for (i = 0; i < separator_num; i++)
12706 *reg++ = SEPARATOR_VALUE;
12707 data_num_sum = reg_num + separator_num;
12709 reg_num = ARRAY_SIZE(common_reg_addr_list);
12710 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12711 for (i = 0; i < reg_num; i++)
12712 *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
12713 for (i = 0; i < separator_num; i++)
12714 *reg++ = SEPARATOR_VALUE;
12715 data_num_sum += reg_num + separator_num;
12717 reg_num = ARRAY_SIZE(ring_reg_addr_list);
12718 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12719 for (j = 0; j < kinfo->num_tqps; j++) {
12720 for (i = 0; i < reg_num; i++)
12721 *reg++ = hclge_read_dev(&hdev->hw,
12722 ring_reg_addr_list[i] +
12723 HCLGE_RING_REG_OFFSET * j);
12724 for (i = 0; i < separator_num; i++)
12725 *reg++ = SEPARATOR_VALUE;
12727 data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
12729 reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
12730 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12731 for (j = 0; j < hdev->num_msi_used - 1; j++) {
12732 for (i = 0; i < reg_num; i++)
12733 *reg++ = hclge_read_dev(&hdev->hw,
12734 tqp_intr_reg_addr_list[i] +
12735 HCLGE_RING_INT_REG_OFFSET * j);
12736 for (i = 0; i < separator_num; i++)
12737 *reg++ = SEPARATOR_VALUE;
12739 data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
12741 return data_num_sum;
12744 static int hclge_get_regs_len(struct hnae3_handle *handle)
12746 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
12747 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12748 struct hclge_vport *vport = hclge_get_vport(handle);
12749 struct hclge_dev *hdev = vport->back;
12750 int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
12751 int regs_lines_32_bit, regs_lines_64_bit;
12754 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
12756 dev_err(&hdev->pdev->dev,
12757 "Get register number failed, ret = %d.\n", ret);
12761 ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
12763 dev_err(&hdev->pdev->dev,
12764 "Get dfx reg len failed, ret = %d.\n", ret);
12768 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
12769 REG_SEPARATOR_LINE;
12770 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
12771 REG_SEPARATOR_LINE;
12772 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
12773 REG_SEPARATOR_LINE;
12774 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
12775 REG_SEPARATOR_LINE;
12776 regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
12777 REG_SEPARATOR_LINE;
12778 regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
12779 REG_SEPARATOR_LINE;
12781 return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
12782 tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
12783 regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
12786 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
12789 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12790 struct hclge_vport *vport = hclge_get_vport(handle);
12791 struct hclge_dev *hdev = vport->back;
12792 u32 regs_num_32_bit, regs_num_64_bit;
12793 int i, reg_num, separator_num, ret;
12796 *version = hdev->fw_version;
12798 ret = hclge_get_regs_num(hdev, ®s_num_32_bit, ®s_num_64_bit);
12800 dev_err(&hdev->pdev->dev,
12801 "Get register number failed, ret = %d.\n", ret);
12805 reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
12807 ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
12809 dev_err(&hdev->pdev->dev,
12810 "Get 32 bit register failed, ret = %d.\n", ret);
12813 reg_num = regs_num_32_bit;
12815 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12816 for (i = 0; i < separator_num; i++)
12817 *reg++ = SEPARATOR_VALUE;
12819 ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
12821 dev_err(&hdev->pdev->dev,
12822 "Get 64 bit register failed, ret = %d.\n", ret);
12825 reg_num = regs_num_64_bit * 2;
12827 separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12828 for (i = 0; i < separator_num; i++)
12829 *reg++ = SEPARATOR_VALUE;
12831 ret = hclge_get_dfx_reg(hdev, reg);
12833 dev_err(&hdev->pdev->dev,
12834 "Get dfx register failed, ret = %d.\n", ret);
12837 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12839 struct hclge_set_led_state_cmd *req;
12840 struct hclge_desc desc;
12843 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12845 req = (struct hclge_set_led_state_cmd *)desc.data;
12846 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12847 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12849 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12851 dev_err(&hdev->pdev->dev,
12852 "Send set led state cmd error, ret =%d\n", ret);
12857 enum hclge_led_status {
12860 HCLGE_LED_NO_CHANGE = 0xFF,
12863 static int hclge_set_led_id(struct hnae3_handle *handle,
12864 enum ethtool_phys_id_state status)
12866 struct hclge_vport *vport = hclge_get_vport(handle);
12867 struct hclge_dev *hdev = vport->back;
12870 case ETHTOOL_ID_ACTIVE:
12871 return hclge_set_led_status(hdev, HCLGE_LED_ON);
12872 case ETHTOOL_ID_INACTIVE:
12873 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12879 static void hclge_get_link_mode(struct hnae3_handle *handle,
12880 unsigned long *supported,
12881 unsigned long *advertising)
12883 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12884 struct hclge_vport *vport = hclge_get_vport(handle);
12885 struct hclge_dev *hdev = vport->back;
12886 unsigned int idx = 0;
12888 for (; idx < size; idx++) {
12889 supported[idx] = hdev->hw.mac.supported[idx];
12890 advertising[idx] = hdev->hw.mac.advertising[idx];
12894 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12896 struct hclge_vport *vport = hclge_get_vport(handle);
12897 struct hclge_dev *hdev = vport->back;
12898 bool gro_en_old = hdev->gro_en;
12901 hdev->gro_en = enable;
12902 ret = hclge_config_gro(hdev);
12904 hdev->gro_en = gro_en_old;
12909 static int hclge_sync_vport_promisc_mode(struct hclge_vport *vport)
12911 struct hnae3_handle *handle = &vport->nic;
12912 struct hclge_dev *hdev = vport->back;
12913 bool uc_en = false;
12914 bool mc_en = false;
12919 if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12920 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12921 vport->last_promisc_flags = vport->overflow_promisc_flags;
12924 if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12929 if (!vport->vport_id) {
12930 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12931 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12932 tmp_flags & HNAE3_MPE);
12934 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
12937 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12943 if (vport->vf_info.trusted) {
12944 uc_en = vport->vf_info.request_uc_en > 0 ||
12945 vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE;
12946 mc_en = vport->vf_info.request_mc_en > 0 ||
12947 vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE;
12949 bc_en = vport->vf_info.request_bc_en > 0;
12951 ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
12954 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12957 hclge_set_vport_vlan_fltr_change(vport);
12962 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12964 struct hclge_vport *vport;
12968 for (i = 0; i < hdev->num_alloc_vport; i++) {
12969 vport = &hdev->vport[i];
12971 ret = hclge_sync_vport_promisc_mode(vport);
12977 static bool hclge_module_existed(struct hclge_dev *hdev)
12979 struct hclge_desc desc;
12983 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12984 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12986 dev_err(&hdev->pdev->dev,
12987 "failed to get SFP exist state, ret = %d\n", ret);
12991 existed = le32_to_cpu(desc.data[0]);
12993 return existed != 0;
12996 /* need 6 bds(total 140 bytes) in one reading
12997 * return the number of bytes actually read, 0 means read failed.
12999 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
13002 struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
13003 struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
13009 /* setup all 6 bds to read module eeprom info. */
13010 for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
13011 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
13014 /* bd0~bd4 need next flag */
13015 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
13016 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
13019 /* setup bd0, this bd contains offset and read length. */
13020 sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
13021 sfp_info_bd0->offset = cpu_to_le16((u16)offset);
13022 read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
13023 sfp_info_bd0->read_len = cpu_to_le16(read_len);
13025 ret = hclge_cmd_send(&hdev->hw, desc, i);
13027 dev_err(&hdev->pdev->dev,
13028 "failed to get SFP eeprom info, ret = %d\n", ret);
13032 /* copy sfp info from bd0 to out buffer. */
13033 copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
13034 memcpy(data, sfp_info_bd0->data, copy_len);
13035 read_len = copy_len;
13037 /* copy sfp info from bd1~bd5 to out buffer if needed. */
13038 for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
13039 if (read_len >= len)
13042 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
13043 memcpy(data + read_len, desc[i].data, copy_len);
13044 read_len += copy_len;
13050 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
13053 struct hclge_vport *vport = hclge_get_vport(handle);
13054 struct hclge_dev *hdev = vport->back;
13058 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
13059 return -EOPNOTSUPP;
13061 if (!hclge_module_existed(hdev))
13064 while (read_len < len) {
13065 data_len = hclge_get_sfp_eeprom_info(hdev,
13072 read_len += data_len;
13078 static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle,
13081 struct hclge_vport *vport = hclge_get_vport(handle);
13082 struct hclge_dev *hdev = vport->back;
13083 struct hclge_desc desc;
13086 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2)
13087 return -EOPNOTSUPP;
13089 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true);
13090 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
13092 dev_err(&hdev->pdev->dev,
13093 "failed to query link diagnosis info, ret = %d\n", ret);
13097 *status_code = le32_to_cpu(desc.data[0]);
13101 /* After disable sriov, VF still has some config and info need clean,
13102 * which configed by PF.
13104 static void hclge_clear_vport_vf_info(struct hclge_vport *vport, int vfid)
13106 struct hclge_dev *hdev = vport->back;
13107 struct hclge_vlan_info vlan_info;
13110 clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state);
13111 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
13112 vport->need_notify = 0;
13115 /* after disable sriov, clean VF rate configured by PF */
13116 ret = hclge_tm_qs_shaper_cfg(vport, 0);
13118 dev_err(&hdev->pdev->dev,
13119 "failed to clean vf%d rate config, ret = %d\n",
13122 vlan_info.vlan_tag = 0;
13124 vlan_info.vlan_proto = ETH_P_8021Q;
13125 ret = hclge_update_port_base_vlan_cfg(vport,
13126 HNAE3_PORT_BASE_VLAN_DISABLE,
13129 dev_err(&hdev->pdev->dev,
13130 "failed to clean vf%d port base vlan, ret = %d\n",
13133 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, false);
13135 dev_err(&hdev->pdev->dev,
13136 "failed to clean vf%d spoof config, ret = %d\n",
13139 memset(&vport->vf_info, 0, sizeof(vport->vf_info));
13142 static void hclge_clean_vport_config(struct hnae3_ae_dev *ae_dev, int num_vfs)
13144 struct hclge_dev *hdev = ae_dev->priv;
13145 struct hclge_vport *vport;
13148 for (i = 0; i < num_vfs; i++) {
13149 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
13151 hclge_clear_vport_vf_info(vport, i);
13155 static int hclge_get_dscp_prio(struct hnae3_handle *h, u8 dscp, u8 *tc_mode,
13158 struct hclge_vport *vport = hclge_get_vport(h);
13160 if (dscp >= HNAE3_MAX_DSCP)
13164 *tc_mode = vport->nic.kinfo.tc_map_mode;
13166 *priority = vport->nic.kinfo.dscp_prio[dscp] == HNAE3_PRIO_ID_INVALID ? 0 :
13167 vport->nic.kinfo.dscp_prio[dscp];
13172 static const struct hnae3_ae_ops hclge_ops = {
13173 .init_ae_dev = hclge_init_ae_dev,
13174 .uninit_ae_dev = hclge_uninit_ae_dev,
13175 .reset_prepare = hclge_reset_prepare_general,
13176 .reset_done = hclge_reset_done,
13177 .init_client_instance = hclge_init_client_instance,
13178 .uninit_client_instance = hclge_uninit_client_instance,
13179 .map_ring_to_vector = hclge_map_ring_to_vector,
13180 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
13181 .get_vector = hclge_get_vector,
13182 .put_vector = hclge_put_vector,
13183 .set_promisc_mode = hclge_set_promisc_mode,
13184 .request_update_promisc_mode = hclge_request_update_promisc_mode,
13185 .set_loopback = hclge_set_loopback,
13186 .start = hclge_ae_start,
13187 .stop = hclge_ae_stop,
13188 .client_start = hclge_client_start,
13189 .client_stop = hclge_client_stop,
13190 .get_status = hclge_get_status,
13191 .get_ksettings_an_result = hclge_get_ksettings_an_result,
13192 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
13193 .get_media_type = hclge_get_media_type,
13194 .check_port_speed = hclge_check_port_speed,
13195 .get_fec_stats = hclge_get_fec_stats,
13196 .get_fec = hclge_get_fec,
13197 .set_fec = hclge_set_fec,
13198 .get_rss_key_size = hclge_comm_get_rss_key_size,
13199 .get_rss = hclge_get_rss,
13200 .set_rss = hclge_set_rss,
13201 .set_rss_tuple = hclge_set_rss_tuple,
13202 .get_rss_tuple = hclge_get_rss_tuple,
13203 .get_tc_size = hclge_get_tc_size,
13204 .get_mac_addr = hclge_get_mac_addr,
13205 .set_mac_addr = hclge_set_mac_addr,
13206 .do_ioctl = hclge_do_ioctl,
13207 .add_uc_addr = hclge_add_uc_addr,
13208 .rm_uc_addr = hclge_rm_uc_addr,
13209 .add_mc_addr = hclge_add_mc_addr,
13210 .rm_mc_addr = hclge_rm_mc_addr,
13211 .set_autoneg = hclge_set_autoneg,
13212 .get_autoneg = hclge_get_autoneg,
13213 .restart_autoneg = hclge_restart_autoneg,
13214 .halt_autoneg = hclge_halt_autoneg,
13215 .get_pauseparam = hclge_get_pauseparam,
13216 .set_pauseparam = hclge_set_pauseparam,
13217 .set_mtu = hclge_set_mtu,
13218 .reset_queue = hclge_reset_tqp,
13219 .get_stats = hclge_get_stats,
13220 .get_mac_stats = hclge_get_mac_stat,
13221 .update_stats = hclge_update_stats,
13222 .get_strings = hclge_get_strings,
13223 .get_sset_count = hclge_get_sset_count,
13224 .get_fw_version = hclge_get_fw_version,
13225 .get_mdix_mode = hclge_get_mdix_mode,
13226 .enable_vlan_filter = hclge_enable_vlan_filter,
13227 .set_vlan_filter = hclge_set_vlan_filter,
13228 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
13229 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
13230 .reset_event = hclge_reset_event,
13231 .get_reset_level = hclge_get_reset_level,
13232 .set_default_reset_request = hclge_set_def_reset_request,
13233 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
13234 .set_channels = hclge_set_channels,
13235 .get_channels = hclge_get_channels,
13236 .get_regs_len = hclge_get_regs_len,
13237 .get_regs = hclge_get_regs,
13238 .set_led_id = hclge_set_led_id,
13239 .get_link_mode = hclge_get_link_mode,
13240 .add_fd_entry = hclge_add_fd_entry,
13241 .del_fd_entry = hclge_del_fd_entry,
13242 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
13243 .get_fd_rule_info = hclge_get_fd_rule_info,
13244 .get_fd_all_rules = hclge_get_all_rules,
13245 .enable_fd = hclge_enable_fd,
13246 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
13247 .dbg_read_cmd = hclge_dbg_read_cmd,
13248 .handle_hw_ras_error = hclge_handle_hw_ras_error,
13249 .get_hw_reset_stat = hclge_get_hw_reset_stat,
13250 .ae_dev_resetting = hclge_ae_dev_resetting,
13251 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
13252 .set_gro_en = hclge_gro_en,
13253 .get_global_queue_id = hclge_covert_handle_qid_global,
13254 .set_timer_task = hclge_set_timer_task,
13255 .mac_connect_phy = hclge_mac_connect_phy,
13256 .mac_disconnect_phy = hclge_mac_disconnect_phy,
13257 .get_vf_config = hclge_get_vf_config,
13258 .set_vf_link_state = hclge_set_vf_link_state,
13259 .set_vf_spoofchk = hclge_set_vf_spoofchk,
13260 .set_vf_trust = hclge_set_vf_trust,
13261 .set_vf_rate = hclge_set_vf_rate,
13262 .set_vf_mac = hclge_set_vf_mac,
13263 .get_module_eeprom = hclge_get_module_eeprom,
13264 .get_cmdq_stat = hclge_get_cmdq_stat,
13265 .add_cls_flower = hclge_add_cls_flower,
13266 .del_cls_flower = hclge_del_cls_flower,
13267 .cls_flower_active = hclge_is_cls_flower_active,
13268 .get_phy_link_ksettings = hclge_get_phy_link_ksettings,
13269 .set_phy_link_ksettings = hclge_set_phy_link_ksettings,
13270 .set_tx_hwts_info = hclge_ptp_set_tx_info,
13271 .get_rx_hwts = hclge_ptp_get_rx_hwts,
13272 .get_ts_info = hclge_ptp_get_ts_info,
13273 .get_link_diagnosis_info = hclge_get_link_diagnosis_info,
13274 .clean_vf_config = hclge_clean_vport_config,
13275 .get_dscp_prio = hclge_get_dscp_prio,
13276 .get_wol = hclge_get_wol,
13277 .set_wol = hclge_set_wol,
13280 static struct hnae3_ae_algo ae_algo = {
13282 .pdev_id_table = ae_algo_pci_tbl,
13285 static int __init hclge_init(void)
13287 pr_info("%s is initializing\n", HCLGE_NAME);
13289 hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME);
13291 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
13295 hnae3_register_ae_algo(&ae_algo);
13300 static void __exit hclge_exit(void)
13302 hnae3_unregister_ae_algo_prepare(&ae_algo);
13303 hnae3_unregister_ae_algo(&ae_algo);
13304 destroy_workqueue(hclge_wq);
13306 module_init(hclge_init);
13307 module_exit(hclge_exit);
13309 MODULE_LICENSE("GPL");
13310 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
13311 MODULE_DESCRIPTION("HCLGE Driver");
13312 MODULE_VERSION(HCLGE_MOD_VERSION);