1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/acpi.h>
5 #include <linux/device.h>
6 #include <linux/etherdevice.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/platform_device.h>
14 #include <linux/if_vlan.h>
15 #include <linux/crash_dump.h>
17 #include <net/rtnetlink.h>
18 #include "hclge_cmd.h"
19 #include "hclge_dcb.h"
20 #include "hclge_main.h"
21 #include "hclge_mbx.h"
22 #include "hclge_mdio.h"
23 #include "hclge_regs.h"
25 #include "hclge_err.h"
27 #include "hclge_devlink.h"
28 #include "hclge_comm_cmd.h"
30 #define HCLGE_NAME "hclge"
32 #define HCLGE_BUF_SIZE_UNIT 256U
33 #define HCLGE_BUF_MUL_BY 2
34 #define HCLGE_BUF_DIV_BY 2
35 #define NEED_RESERVE_TC_NUM 2
36 #define BUF_MAX_PERCENT 100
37 #define BUF_RESERVE_PERCENT 90
39 #define HCLGE_RESET_MAX_FAIL_CNT 5
40 #define HCLGE_RESET_SYNC_TIME 100
41 #define HCLGE_PF_RESET_SYNC_TIME 20
42 #define HCLGE_PF_RESET_SYNC_CNT 1500
44 #define HCLGE_LINK_STATUS_MS 10
46 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
47 static int hclge_init_vlan_config(struct hclge_dev *hdev);
48 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
49 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
50 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
51 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
52 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
53 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
55 static int hclge_set_default_loopback(struct hclge_dev *hdev);
57 static void hclge_sync_mac_table(struct hclge_dev *hdev);
58 static void hclge_restore_hw_table(struct hclge_dev *hdev);
59 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
60 static void hclge_sync_fd_table(struct hclge_dev *hdev);
61 static void hclge_update_fec_stats(struct hclge_dev *hdev);
62 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
65 static struct hnae3_ae_algo ae_algo;
67 static struct workqueue_struct *hclge_wq;
69 static const struct pci_device_id ae_algo_pci_tbl[] = {
70 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
71 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
72 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
73 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
74 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
75 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
76 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
77 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
78 /* required last entry */
82 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
84 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
85 "External Loopback test",
87 "Serdes serial Loopback test",
88 "Serdes parallel Loopback test",
92 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
93 {"mac_tx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1,
94 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
95 {"mac_rx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1,
96 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
97 {"mac_tx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
98 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pause_xoff_time)},
99 {"mac_rx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
100 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pause_xoff_time)},
101 {"mac_tx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
102 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
103 {"mac_rx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
104 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
105 {"mac_tx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
106 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
107 {"mac_tx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
108 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
109 {"mac_tx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
110 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
111 {"mac_tx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
112 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
113 {"mac_tx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
114 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
115 {"mac_tx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
116 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
117 {"mac_tx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
118 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
119 {"mac_tx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
120 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
121 {"mac_tx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
122 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
123 {"mac_tx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
124 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_xoff_time)},
125 {"mac_tx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
126 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_xoff_time)},
127 {"mac_tx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
128 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_xoff_time)},
129 {"mac_tx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
130 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_xoff_time)},
131 {"mac_tx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
132 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_xoff_time)},
133 {"mac_tx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
134 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_xoff_time)},
135 {"mac_tx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
136 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_xoff_time)},
137 {"mac_tx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
138 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_xoff_time)},
139 {"mac_rx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
140 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
141 {"mac_rx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
142 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
143 {"mac_rx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
144 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
145 {"mac_rx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
146 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
147 {"mac_rx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
148 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
149 {"mac_rx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
150 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
151 {"mac_rx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
152 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
153 {"mac_rx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
154 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
155 {"mac_rx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
156 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
157 {"mac_rx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
158 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_xoff_time)},
159 {"mac_rx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
160 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_xoff_time)},
161 {"mac_rx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
162 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_xoff_time)},
163 {"mac_rx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
164 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_xoff_time)},
165 {"mac_rx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
166 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_xoff_time)},
167 {"mac_rx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
168 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_xoff_time)},
169 {"mac_rx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
170 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_xoff_time)},
171 {"mac_rx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
172 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_xoff_time)},
173 {"mac_tx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
174 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
175 {"mac_tx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
176 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
177 {"mac_tx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
178 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
179 {"mac_tx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
180 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
181 {"mac_tx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
182 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
183 {"mac_tx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
184 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
185 {"mac_tx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
186 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
187 {"mac_tx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
188 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
189 {"mac_tx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
190 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
191 {"mac_tx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
192 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
193 {"mac_tx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
194 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
195 {"mac_tx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
196 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
197 {"mac_tx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
198 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
199 {"mac_tx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
200 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
201 {"mac_tx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
202 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
203 {"mac_tx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
204 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
205 {"mac_tx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
206 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
207 {"mac_tx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
208 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
209 {"mac_tx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
210 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
211 {"mac_tx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
212 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
213 {"mac_tx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
214 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
215 {"mac_tx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
216 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
217 {"mac_tx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
218 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
219 {"mac_tx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
220 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
221 {"mac_tx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
222 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
223 {"mac_rx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
224 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
225 {"mac_rx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
226 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
227 {"mac_rx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
228 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
229 {"mac_rx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
230 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
231 {"mac_rx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
232 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
233 {"mac_rx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
234 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
235 {"mac_rx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
236 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
237 {"mac_rx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
238 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
239 {"mac_rx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
240 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
241 {"mac_rx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
242 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
243 {"mac_rx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
244 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
245 {"mac_rx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
246 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
247 {"mac_rx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
248 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
249 {"mac_rx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
250 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
251 {"mac_rx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
252 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
253 {"mac_rx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
254 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
255 {"mac_rx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
256 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
257 {"mac_rx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
258 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
259 {"mac_rx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
260 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
261 {"mac_rx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
262 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
263 {"mac_rx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
264 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
265 {"mac_rx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
266 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
267 {"mac_rx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
268 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
269 {"mac_rx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
270 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
271 {"mac_rx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
272 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
274 {"mac_tx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
275 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
276 {"mac_tx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
277 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
278 {"mac_tx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
279 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
280 {"mac_tx_err_all_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
281 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
282 {"mac_tx_from_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
283 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
284 {"mac_tx_from_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
285 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
286 {"mac_rx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
287 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
288 {"mac_rx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
289 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
290 {"mac_rx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
291 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
292 {"mac_rx_fcs_err_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
293 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
294 {"mac_rx_send_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
295 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
296 {"mac_rx_send_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
297 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
300 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
302 .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
303 .ethter_type = cpu_to_le16(ETH_P_LLDP),
304 .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
305 .i_port_bitmap = 0x1,
309 static const struct key_info meta_data_key_info[] = {
310 { PACKET_TYPE_ID, 6 },
317 { TUNNEL_PACKET, 1 },
320 static const struct key_info tuple_key_info[] = {
321 { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
322 { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
323 { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
324 { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
325 { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
326 { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
327 { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
328 { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
329 { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
330 { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
331 { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
332 { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
333 { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
334 { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
335 { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
336 { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
337 { INNER_DST_MAC, 48, KEY_OPT_MAC,
338 offsetof(struct hclge_fd_rule, tuples.dst_mac),
339 offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
340 { INNER_SRC_MAC, 48, KEY_OPT_MAC,
341 offsetof(struct hclge_fd_rule, tuples.src_mac),
342 offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
343 { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
344 offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
345 offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
346 { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
347 { INNER_ETH_TYPE, 16, KEY_OPT_LE16,
348 offsetof(struct hclge_fd_rule, tuples.ether_proto),
349 offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
350 { INNER_L2_RSV, 16, KEY_OPT_LE16,
351 offsetof(struct hclge_fd_rule, tuples.l2_user_def),
352 offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
353 { INNER_IP_TOS, 8, KEY_OPT_U8,
354 offsetof(struct hclge_fd_rule, tuples.ip_tos),
355 offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
356 { INNER_IP_PROTO, 8, KEY_OPT_U8,
357 offsetof(struct hclge_fd_rule, tuples.ip_proto),
358 offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
359 { INNER_SRC_IP, 32, KEY_OPT_IP,
360 offsetof(struct hclge_fd_rule, tuples.src_ip),
361 offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
362 { INNER_DST_IP, 32, KEY_OPT_IP,
363 offsetof(struct hclge_fd_rule, tuples.dst_ip),
364 offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
365 { INNER_L3_RSV, 16, KEY_OPT_LE16,
366 offsetof(struct hclge_fd_rule, tuples.l3_user_def),
367 offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
368 { INNER_SRC_PORT, 16, KEY_OPT_LE16,
369 offsetof(struct hclge_fd_rule, tuples.src_port),
370 offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
371 { INNER_DST_PORT, 16, KEY_OPT_LE16,
372 offsetof(struct hclge_fd_rule, tuples.dst_port),
373 offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
374 { INNER_L4_RSV, 32, KEY_OPT_LE32,
375 offsetof(struct hclge_fd_rule, tuples.l4_user_def),
376 offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
380 * hclge_cmd_send - send command to command queue
381 * @hw: pointer to the hw struct
382 * @desc: prefilled descriptor for describing the command
383 * @num : the number of descriptors to be sent
385 * This is the main send command for command queue, it
386 * sends the queue, cleans the queue, etc
388 int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
390 return hclge_comm_cmd_send(&hw->hw, desc, num);
393 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
395 #define HCLGE_MAC_CMD_NUM 21
397 u64 *data = (u64 *)(&hdev->mac_stats);
398 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
404 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
405 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
407 dev_err(&hdev->pdev->dev,
408 "Get MAC pkt stats fail, status = %d.\n", ret);
413 /* The first desc has a 64-bit header, so data size need to minus 1 */
414 data_size = sizeof(desc) / (sizeof(u64)) - 1;
416 desc_data = (__le64 *)(&desc[0].data[0]);
417 for (i = 0; i < data_size; i++) {
418 /* data memory is continuous becase only the first desc has a
419 * header in this command
421 *data += le64_to_cpu(*desc_data);
429 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev)
431 #define HCLGE_REG_NUM_PER_DESC 4
433 u32 reg_num = hdev->ae_dev->dev_specs.mac_stats_num;
434 u64 *data = (u64 *)(&hdev->mac_stats);
435 struct hclge_desc *desc;
442 /* The first desc has a 64-bit header, so need to consider it */
443 desc_num = reg_num / HCLGE_REG_NUM_PER_DESC + 1;
445 /* This may be called inside atomic sections,
446 * so GFP_ATOMIC is more suitalbe here
448 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
452 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
453 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
459 data_size = min_t(u32, sizeof(hdev->mac_stats) / sizeof(u64), reg_num);
461 desc_data = (__le64 *)(&desc[0].data[0]);
462 for (i = 0; i < data_size; i++) {
463 /* data memory is continuous becase only the first desc has a
464 * header in this command
466 *data += le64_to_cpu(*desc_data);
476 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *reg_num)
478 struct hclge_desc desc;
481 /* Driver needs total register number of both valid registers and
482 * reserved registers, but the old firmware only returns number
483 * of valid registers in device V2. To be compatible with these
484 * devices, driver uses a fixed value.
486 if (hdev->ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
487 *reg_num = HCLGE_MAC_STATS_MAX_NUM_V1;
491 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
492 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
494 dev_err(&hdev->pdev->dev,
495 "failed to query mac statistic reg number, ret = %d\n",
500 *reg_num = le32_to_cpu(desc.data[0]);
502 dev_err(&hdev->pdev->dev,
503 "mac statistic reg number is invalid!\n");
510 int hclge_mac_update_stats(struct hclge_dev *hdev)
512 /* The firmware supports the new statistics acquisition method */
513 if (hdev->ae_dev->dev_specs.mac_stats_num)
514 return hclge_mac_update_stats_complete(hdev);
516 return hclge_mac_update_stats_defective(hdev);
519 static int hclge_comm_get_count(struct hclge_dev *hdev,
520 const struct hclge_comm_stats_str strs[],
526 for (i = 0; i < size; i++)
527 if (strs[i].stats_num <= hdev->ae_dev->dev_specs.mac_stats_num)
533 static u64 *hclge_comm_get_stats(struct hclge_dev *hdev,
534 const struct hclge_comm_stats_str strs[],
540 for (i = 0; i < size; i++) {
541 if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
544 *buf = HCLGE_STATS_READ(&hdev->mac_stats, strs[i].offset);
551 static u8 *hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset,
552 const struct hclge_comm_stats_str strs[],
555 char *buff = (char *)data;
558 if (stringset != ETH_SS_STATS)
561 for (i = 0; i < size; i++) {
562 if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
565 snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
566 buff = buff + ETH_GSTRING_LEN;
572 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
574 struct hnae3_handle *handle;
577 handle = &hdev->vport[0].nic;
578 if (handle->client) {
579 status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
581 dev_err(&hdev->pdev->dev,
582 "Update TQPS stats fail, status = %d.\n",
587 hclge_update_fec_stats(hdev);
589 status = hclge_mac_update_stats(hdev);
591 dev_err(&hdev->pdev->dev,
592 "Update MAC stats fail, status = %d.\n", status);
595 static void hclge_update_stats(struct hnae3_handle *handle)
597 struct hclge_vport *vport = hclge_get_vport(handle);
598 struct hclge_dev *hdev = vport->back;
601 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
604 status = hclge_mac_update_stats(hdev);
606 dev_err(&hdev->pdev->dev,
607 "Update MAC stats fail, status = %d.\n",
610 status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
612 dev_err(&hdev->pdev->dev,
613 "Update TQPS stats fail, status = %d.\n",
616 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
619 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
621 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
622 HNAE3_SUPPORT_PHY_LOOPBACK | \
623 HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
624 HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK | \
625 HNAE3_SUPPORT_EXTERNAL_LOOPBACK)
627 struct hclge_vport *vport = hclge_get_vport(handle);
628 struct hclge_dev *hdev = vport->back;
631 /* Loopback test support rules:
632 * mac: only GE mode support
633 * serdes: all mac mode will support include GE/XGE/LGE/CGE
634 * phy: only support when phy device exist on board
636 if (stringset == ETH_SS_TEST) {
637 /* clear loopback bit flags at first */
638 handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
639 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
640 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
641 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
642 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
644 handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
648 handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
650 handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
652 handle->flags |= HNAE3_SUPPORT_EXTERNAL_LOOPBACK;
654 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
655 hdev->hw.mac.phydev->drv->set_loopback) ||
656 hnae3_dev_phy_imp_supported(hdev)) {
658 handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
660 } else if (stringset == ETH_SS_STATS) {
661 count = hclge_comm_get_count(hdev, g_mac_stats_string,
662 ARRAY_SIZE(g_mac_stats_string)) +
663 hclge_comm_tqps_get_sset_count(handle);
669 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
672 struct hclge_vport *vport = hclge_get_vport(handle);
673 struct hclge_dev *hdev = vport->back;
674 u8 *p = (char *)data;
677 if (stringset == ETH_SS_STATS) {
678 size = ARRAY_SIZE(g_mac_stats_string);
679 p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string,
681 p = hclge_comm_tqps_get_strings(handle, p);
682 } else if (stringset == ETH_SS_TEST) {
683 if (handle->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK) {
684 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_EXTERNAL],
686 p += ETH_GSTRING_LEN;
688 if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
689 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
691 p += ETH_GSTRING_LEN;
693 if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
694 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
696 p += ETH_GSTRING_LEN;
698 if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
700 hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
702 p += ETH_GSTRING_LEN;
704 if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
705 memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
707 p += ETH_GSTRING_LEN;
712 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
714 struct hclge_vport *vport = hclge_get_vport(handle);
715 struct hclge_dev *hdev = vport->back;
718 p = hclge_comm_get_stats(hdev, g_mac_stats_string,
719 ARRAY_SIZE(g_mac_stats_string), data);
720 p = hclge_comm_tqps_get_stats(handle, p);
723 static void hclge_get_mac_stat(struct hnae3_handle *handle,
724 struct hns3_mac_stats *mac_stats)
726 struct hclge_vport *vport = hclge_get_vport(handle);
727 struct hclge_dev *hdev = vport->back;
729 hclge_update_stats(handle);
731 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
732 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
735 static int hclge_parse_func_status(struct hclge_dev *hdev,
736 struct hclge_func_status_cmd *status)
738 #define HCLGE_MAC_ID_MASK 0xF
740 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
743 /* Set the pf to main pf */
744 if (status->pf_state & HCLGE_PF_STATE_MAIN)
745 hdev->flag |= HCLGE_FLAG_MAIN;
747 hdev->flag &= ~HCLGE_FLAG_MAIN;
749 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
753 static int hclge_query_function_status(struct hclge_dev *hdev)
755 #define HCLGE_QUERY_MAX_CNT 5
757 struct hclge_func_status_cmd *req;
758 struct hclge_desc desc;
762 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
763 req = (struct hclge_func_status_cmd *)desc.data;
766 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
768 dev_err(&hdev->pdev->dev,
769 "query function status failed %d.\n", ret);
773 /* Check pf reset is done */
776 usleep_range(1000, 2000);
777 } while (timeout++ < HCLGE_QUERY_MAX_CNT);
779 return hclge_parse_func_status(hdev, req);
782 static int hclge_query_pf_resource(struct hclge_dev *hdev)
784 struct hclge_pf_res_cmd *req;
785 struct hclge_desc desc;
788 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
789 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
791 dev_err(&hdev->pdev->dev,
792 "query pf resource failed %d.\n", ret);
796 req = (struct hclge_pf_res_cmd *)desc.data;
797 hdev->num_tqps = le16_to_cpu(req->tqp_num) +
798 le16_to_cpu(req->ext_tqp_num);
799 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
801 if (req->tx_buf_size)
803 le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
805 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
807 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
809 if (req->dv_buf_size)
811 le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
813 hdev->dv_buf_size = HCLGE_DEFAULT_DV;
815 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
817 hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
818 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
819 dev_err(&hdev->pdev->dev,
820 "only %u msi resources available, not enough for pf(min:2).\n",
825 if (hnae3_dev_roce_supported(hdev)) {
827 le16_to_cpu(req->pf_intr_vector_number_roce);
829 /* PF should have NIC vectors and Roce vectors,
830 * NIC vectors are queued before Roce vectors.
832 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
834 hdev->num_msi = hdev->num_nic_msi;
840 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
843 case HCLGE_FW_MAC_SPEED_10M:
844 *speed = HCLGE_MAC_SPEED_10M;
846 case HCLGE_FW_MAC_SPEED_100M:
847 *speed = HCLGE_MAC_SPEED_100M;
849 case HCLGE_FW_MAC_SPEED_1G:
850 *speed = HCLGE_MAC_SPEED_1G;
852 case HCLGE_FW_MAC_SPEED_10G:
853 *speed = HCLGE_MAC_SPEED_10G;
855 case HCLGE_FW_MAC_SPEED_25G:
856 *speed = HCLGE_MAC_SPEED_25G;
858 case HCLGE_FW_MAC_SPEED_40G:
859 *speed = HCLGE_MAC_SPEED_40G;
861 case HCLGE_FW_MAC_SPEED_50G:
862 *speed = HCLGE_MAC_SPEED_50G;
864 case HCLGE_FW_MAC_SPEED_100G:
865 *speed = HCLGE_MAC_SPEED_100G;
867 case HCLGE_FW_MAC_SPEED_200G:
868 *speed = HCLGE_MAC_SPEED_200G;
877 static const struct hclge_speed_bit_map speed_bit_map[] = {
878 {HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT},
879 {HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT},
880 {HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT},
881 {HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT},
882 {HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT},
883 {HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT},
884 {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BIT},
885 {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BIT},
886 {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT},
889 static int hclge_get_speed_bit(u32 speed, u32 *speed_bit)
893 for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) {
894 if (speed == speed_bit_map[i].speed) {
895 *speed_bit = speed_bit_map[i].speed_bit;
903 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
905 struct hclge_vport *vport = hclge_get_vport(handle);
906 struct hclge_dev *hdev = vport->back;
907 u32 speed_ability = hdev->hw.mac.speed_ability;
911 ret = hclge_get_speed_bit(speed, &speed_bit);
915 if (speed_bit & speed_ability)
921 static void hclge_update_fec_support(struct hclge_mac *mac)
923 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
924 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
925 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, mac->supported);
926 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
928 if (mac->fec_ability & BIT(HNAE3_FEC_BASER))
929 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
931 if (mac->fec_ability & BIT(HNAE3_FEC_RS))
932 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
934 if (mac->fec_ability & BIT(HNAE3_FEC_LLRS))
935 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
937 if (mac->fec_ability & BIT(HNAE3_FEC_NONE))
938 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
942 static void hclge_convert_setting_sr(u16 speed_ability,
943 unsigned long *link_mode)
945 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
946 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
948 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
949 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
951 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
952 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
954 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
955 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
957 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
958 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
960 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
961 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
965 static void hclge_convert_setting_lr(u16 speed_ability,
966 unsigned long *link_mode)
968 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
969 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
971 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
972 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
974 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
975 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
977 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
978 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
980 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
981 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
983 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
985 ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
989 static void hclge_convert_setting_cr(u16 speed_ability,
990 unsigned long *link_mode)
992 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
993 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
995 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
996 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
998 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
999 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1001 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1002 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1004 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1005 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1007 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1008 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1012 static void hclge_convert_setting_kr(u16 speed_ability,
1013 unsigned long *link_mode)
1015 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1016 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1018 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1019 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1021 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1022 linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1024 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1025 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1027 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1028 linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1030 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1031 linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1033 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1034 linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1038 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1040 /* If firmware has reported fec_ability, don't need to convert by speed */
1041 if (mac->fec_ability)
1044 switch (mac->speed) {
1045 case HCLGE_MAC_SPEED_10G:
1046 case HCLGE_MAC_SPEED_40G:
1047 mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO) |
1048 BIT(HNAE3_FEC_NONE);
1050 case HCLGE_MAC_SPEED_25G:
1051 case HCLGE_MAC_SPEED_50G:
1052 mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1053 BIT(HNAE3_FEC_AUTO) | BIT(HNAE3_FEC_NONE);
1055 case HCLGE_MAC_SPEED_100G:
1056 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) |
1057 BIT(HNAE3_FEC_NONE);
1059 case HCLGE_MAC_SPEED_200G:
1060 mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) |
1061 BIT(HNAE3_FEC_LLRS);
1064 mac->fec_ability = 0;
1069 hclge_update_fec_support(mac);
1072 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1075 struct hclge_mac *mac = &hdev->hw.mac;
1077 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1078 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1081 hclge_convert_setting_sr(speed_ability, mac->supported);
1082 hclge_convert_setting_lr(speed_ability, mac->supported);
1083 hclge_convert_setting_cr(speed_ability, mac->supported);
1084 if (hnae3_dev_fec_supported(hdev))
1085 hclge_convert_setting_fec(mac);
1087 if (hnae3_dev_pause_supported(hdev))
1088 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1090 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1091 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1094 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1097 struct hclge_mac *mac = &hdev->hw.mac;
1099 hclge_convert_setting_kr(speed_ability, mac->supported);
1100 if (hnae3_dev_fec_supported(hdev))
1101 hclge_convert_setting_fec(mac);
1103 if (hnae3_dev_pause_supported(hdev))
1104 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1106 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1107 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1110 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1113 unsigned long *supported = hdev->hw.mac.supported;
1115 /* default to support all speed for GE port */
1117 speed_ability = HCLGE_SUPPORT_GE;
1119 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1120 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1123 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1124 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1126 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1130 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1131 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1132 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1135 if (hnae3_dev_pause_supported(hdev)) {
1136 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1137 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1140 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1141 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1144 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1146 u8 media_type = hdev->hw.mac.media_type;
1148 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1149 hclge_parse_fiber_link_mode(hdev, speed_ability);
1150 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1151 hclge_parse_copper_link_mode(hdev, speed_ability);
1152 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1153 hclge_parse_backplane_link_mode(hdev, speed_ability);
1156 static u32 hclge_get_max_speed(u16 speed_ability)
1158 if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1159 return HCLGE_MAC_SPEED_200G;
1161 if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1162 return HCLGE_MAC_SPEED_100G;
1164 if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1165 return HCLGE_MAC_SPEED_50G;
1167 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1168 return HCLGE_MAC_SPEED_40G;
1170 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1171 return HCLGE_MAC_SPEED_25G;
1173 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1174 return HCLGE_MAC_SPEED_10G;
1176 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1177 return HCLGE_MAC_SPEED_1G;
1179 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1180 return HCLGE_MAC_SPEED_100M;
1182 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1183 return HCLGE_MAC_SPEED_10M;
1185 return HCLGE_MAC_SPEED_1G;
1188 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1190 #define HCLGE_TX_SPARE_SIZE_UNIT 4096
1191 #define SPEED_ABILITY_EXT_SHIFT 8
1193 struct hclge_cfg_param_cmd *req;
1194 u64 mac_addr_tmp_high;
1195 u16 speed_ability_ext;
1199 req = (struct hclge_cfg_param_cmd *)desc[0].data;
1201 /* get the configuration */
1202 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1203 HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1204 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1205 HCLGE_CFG_TQP_DESC_N_M,
1206 HCLGE_CFG_TQP_DESC_N_S);
1208 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1209 HCLGE_CFG_PHY_ADDR_M,
1210 HCLGE_CFG_PHY_ADDR_S);
1211 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1212 HCLGE_CFG_MEDIA_TP_M,
1213 HCLGE_CFG_MEDIA_TP_S);
1214 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1215 HCLGE_CFG_RX_BUF_LEN_M,
1216 HCLGE_CFG_RX_BUF_LEN_S);
1217 /* get mac_address */
1218 mac_addr_tmp = __le32_to_cpu(req->param[2]);
1219 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1220 HCLGE_CFG_MAC_ADDR_H_M,
1221 HCLGE_CFG_MAC_ADDR_H_S);
1223 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1225 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1226 HCLGE_CFG_DEFAULT_SPEED_M,
1227 HCLGE_CFG_DEFAULT_SPEED_S);
1228 cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1229 HCLGE_CFG_RSS_SIZE_M,
1230 HCLGE_CFG_RSS_SIZE_S);
1232 for (i = 0; i < ETH_ALEN; i++)
1233 cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1235 req = (struct hclge_cfg_param_cmd *)desc[1].data;
1236 cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1238 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1239 HCLGE_CFG_SPEED_ABILITY_M,
1240 HCLGE_CFG_SPEED_ABILITY_S);
1241 speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1242 HCLGE_CFG_SPEED_ABILITY_EXT_M,
1243 HCLGE_CFG_SPEED_ABILITY_EXT_S);
1244 cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1246 cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
1247 HCLGE_CFG_VLAN_FLTR_CAP_M,
1248 HCLGE_CFG_VLAN_FLTR_CAP_S);
1250 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1251 HCLGE_CFG_UMV_TBL_SPACE_M,
1252 HCLGE_CFG_UMV_TBL_SPACE_S);
1254 cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1255 HCLGE_CFG_PF_RSS_SIZE_M,
1256 HCLGE_CFG_PF_RSS_SIZE_S);
1258 /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1259 * power of 2, instead of reading out directly. This would
1260 * be more flexible for future changes and expansions.
1261 * When VF max rss size field is HCLGE_CFG_RSS_SIZE_S,
1262 * it does not make sense if PF's field is 0. In this case, PF and VF
1263 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1265 cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1266 1U << cfg->pf_rss_size_max :
1267 cfg->vf_rss_size_max;
1269 /* The unit of the tx spare buffer size queried from configuration
1270 * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is
1273 cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]),
1274 HCLGE_CFG_TX_SPARE_BUF_SIZE_M,
1275 HCLGE_CFG_TX_SPARE_BUF_SIZE_S);
1276 cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT;
1279 /* hclge_get_cfg: query the static parameter from flash
1280 * @hdev: pointer to struct hclge_dev
1281 * @hcfg: the config structure to be getted
1283 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1285 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1286 struct hclge_cfg_param_cmd *req;
1290 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1293 req = (struct hclge_cfg_param_cmd *)desc[i].data;
1294 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1296 hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1297 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1298 /* Len should be united by 4 bytes when send to hardware */
1299 hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1300 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1301 req->offset = cpu_to_le32(offset);
1304 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1306 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1310 hclge_parse_cfg(hcfg, desc);
1315 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1317 #define HCLGE_MAX_NON_TSO_BD_NUM 8U
1319 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1321 ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1322 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1323 ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
1324 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1325 ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1326 ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1327 ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1328 ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1329 ae_dev->dev_specs.tnl_num = 0;
1332 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1333 struct hclge_desc *desc)
1335 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1336 struct hclge_dev_specs_0_cmd *req0;
1337 struct hclge_dev_specs_1_cmd *req1;
1339 req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1340 req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1342 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1343 ae_dev->dev_specs.rss_ind_tbl_size =
1344 le16_to_cpu(req0->rss_ind_tbl_size);
1345 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1346 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1347 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1348 ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1349 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1350 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1351 ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size);
1352 ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size);
1353 ae_dev->dev_specs.tnl_num = req1->tnl_num;
1356 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1358 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1360 if (!dev_specs->max_non_tso_bd_num)
1361 dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1362 if (!dev_specs->rss_ind_tbl_size)
1363 dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1364 if (!dev_specs->rss_key_size)
1365 dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
1366 if (!dev_specs->max_tm_rate)
1367 dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1368 if (!dev_specs->max_qset_num)
1369 dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1370 if (!dev_specs->max_int_gl)
1371 dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1372 if (!dev_specs->max_frm_size)
1373 dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1374 if (!dev_specs->umv_size)
1375 dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1378 static int hclge_query_mac_stats_num(struct hclge_dev *hdev)
1383 ret = hclge_mac_query_reg_num(hdev, ®_num);
1384 if (ret && ret != -EOPNOTSUPP)
1387 hdev->ae_dev->dev_specs.mac_stats_num = reg_num;
1391 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1393 struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1397 ret = hclge_query_mac_stats_num(hdev);
1401 /* set default specifications as devices lower than version V3 do not
1402 * support querying specifications from firmware.
1404 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1405 hclge_set_default_dev_specs(hdev);
1409 for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1410 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1412 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1414 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1416 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1420 hclge_parse_dev_specs(hdev, desc);
1421 hclge_check_dev_specs(hdev);
1426 static int hclge_get_cap(struct hclge_dev *hdev)
1430 ret = hclge_query_function_status(hdev);
1432 dev_err(&hdev->pdev->dev,
1433 "query function status error %d.\n", ret);
1437 /* get pf resource */
1438 return hclge_query_pf_resource(hdev);
1441 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1443 #define HCLGE_MIN_TX_DESC 64
1444 #define HCLGE_MIN_RX_DESC 64
1446 if (!is_kdump_kernel())
1449 dev_info(&hdev->pdev->dev,
1450 "Running kdump kernel. Using minimal resources\n");
1452 /* minimal queue pairs equals to the number of vports */
1453 hdev->num_tqps = hdev->num_req_vfs + 1;
1454 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1455 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1458 static void hclge_init_tc_config(struct hclge_dev *hdev)
1462 if (hdev->tc_max > HNAE3_MAX_TC ||
1464 dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1469 /* Dev does not support DCB */
1470 if (!hnae3_dev_dcb_supported(hdev)) {
1474 hdev->pfc_max = hdev->tc_max;
1477 hdev->tm_info.num_tc = 1;
1479 /* Currently not support uncontiuous tc */
1480 for (i = 0; i < hdev->tm_info.num_tc; i++)
1481 hnae3_set_bit(hdev->hw_tc_map, i, 1);
1483 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1486 static int hclge_configure(struct hclge_dev *hdev)
1488 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1489 struct hclge_cfg cfg;
1492 ret = hclge_get_cfg(hdev, &cfg);
1496 hdev->base_tqp_pid = 0;
1497 hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1498 hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1499 hdev->rx_buf_len = cfg.rx_buf_len;
1500 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1501 hdev->hw.mac.media_type = cfg.media_type;
1502 hdev->hw.mac.phy_addr = cfg.phy_addr;
1503 hdev->num_tx_desc = cfg.tqp_desc_num;
1504 hdev->num_rx_desc = cfg.tqp_desc_num;
1505 hdev->tm_info.num_pg = 1;
1506 hdev->tc_max = cfg.tc_num;
1507 hdev->tm_info.hw_pfc_map = 0;
1509 hdev->wanted_umv_size = cfg.umv_space;
1511 hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size;
1512 hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
1513 hdev->gro_en = true;
1514 if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
1515 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
1517 if (hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
1519 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1522 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1524 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1525 cfg.default_speed, ret);
1529 hclge_parse_link_mode(hdev, cfg.speed_ability);
1531 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1533 hclge_init_tc_config(hdev);
1534 hclge_init_kdump_kernel_config(hdev);
1539 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1542 struct hclge_cfg_tso_status_cmd *req;
1543 struct hclge_desc desc;
1545 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1547 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1548 req->tso_mss_min = cpu_to_le16(tso_mss_min);
1549 req->tso_mss_max = cpu_to_le16(tso_mss_max);
1551 return hclge_cmd_send(&hdev->hw, &desc, 1);
1554 static int hclge_config_gro(struct hclge_dev *hdev)
1556 struct hclge_cfg_gro_status_cmd *req;
1557 struct hclge_desc desc;
1560 if (!hnae3_ae_dev_gro_supported(hdev->ae_dev))
1563 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1564 req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1566 req->gro_en = hdev->gro_en ? 1 : 0;
1568 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1570 dev_err(&hdev->pdev->dev,
1571 "GRO hardware config cmd failed, ret = %d\n", ret);
1576 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1578 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1579 struct hclge_comm_tqp *tqp;
1582 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1583 sizeof(struct hclge_comm_tqp), GFP_KERNEL);
1589 for (i = 0; i < hdev->num_tqps; i++) {
1590 tqp->dev = &hdev->pdev->dev;
1593 tqp->q.ae_algo = &ae_algo;
1594 tqp->q.buf_size = hdev->rx_buf_len;
1595 tqp->q.tx_desc_num = hdev->num_tx_desc;
1596 tqp->q.rx_desc_num = hdev->num_rx_desc;
1598 /* need an extended offset to configure queues >=
1599 * HCLGE_TQP_MAX_SIZE_DEV_V2
1601 if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1602 tqp->q.io_base = hdev->hw.hw.io_base +
1603 HCLGE_TQP_REG_OFFSET +
1604 i * HCLGE_TQP_REG_SIZE;
1606 tqp->q.io_base = hdev->hw.hw.io_base +
1607 HCLGE_TQP_REG_OFFSET +
1608 HCLGE_TQP_EXT_REG_OFFSET +
1609 (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1612 /* when device supports tx push and has device memory,
1613 * the queue can execute push mode or doorbell mode on
1616 if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps))
1617 tqp->q.mem_base = hdev->hw.hw.mem_base +
1618 HCLGE_TQP_MEM_OFFSET(hdev, i);
1626 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1627 u16 tqp_pid, u16 tqp_vid, bool is_pf)
1629 struct hclge_tqp_map_cmd *req;
1630 struct hclge_desc desc;
1633 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1635 req = (struct hclge_tqp_map_cmd *)desc.data;
1636 req->tqp_id = cpu_to_le16(tqp_pid);
1637 req->tqp_vf = func_id;
1638 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1640 req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1641 req->tqp_vid = cpu_to_le16(tqp_vid);
1643 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1645 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1650 static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1652 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1653 struct hclge_dev *hdev = vport->back;
1656 for (i = 0, alloced = 0; i < hdev->num_tqps &&
1657 alloced < num_tqps; i++) {
1658 if (!hdev->htqp[i].alloced) {
1659 hdev->htqp[i].q.handle = &vport->nic;
1660 hdev->htqp[i].q.tqp_index = alloced;
1661 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1662 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1663 kinfo->tqp[alloced] = &hdev->htqp[i].q;
1664 hdev->htqp[i].alloced = true;
1668 vport->alloc_tqps = alloced;
1669 kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1670 vport->alloc_tqps / hdev->tm_info.num_tc);
1672 /* ensure one to one mapping between irq and queue at default */
1673 kinfo->rss_size = min_t(u16, kinfo->rss_size,
1674 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1679 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1680 u16 num_tx_desc, u16 num_rx_desc)
1683 struct hnae3_handle *nic = &vport->nic;
1684 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1685 struct hclge_dev *hdev = vport->back;
1688 kinfo->num_tx_desc = num_tx_desc;
1689 kinfo->num_rx_desc = num_rx_desc;
1691 kinfo->rx_buf_len = hdev->rx_buf_len;
1692 kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size;
1694 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1695 sizeof(struct hnae3_queue *), GFP_KERNEL);
1699 ret = hclge_assign_tqp(vport, num_tqps);
1701 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1706 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1707 struct hclge_vport *vport)
1709 struct hnae3_handle *nic = &vport->nic;
1710 struct hnae3_knic_private_info *kinfo;
1713 kinfo = &nic->kinfo;
1714 for (i = 0; i < vport->alloc_tqps; i++) {
1715 struct hclge_comm_tqp *q =
1716 container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
1720 is_pf = !(vport->vport_id);
1721 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1730 static int hclge_map_tqp(struct hclge_dev *hdev)
1732 struct hclge_vport *vport = hdev->vport;
1735 num_vport = hdev->num_req_vfs + 1;
1736 for (i = 0; i < num_vport; i++) {
1739 ret = hclge_map_tqp_to_vport(hdev, vport);
1749 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1751 struct hnae3_handle *nic = &vport->nic;
1752 struct hclge_dev *hdev = vport->back;
1755 nic->pdev = hdev->pdev;
1756 nic->ae_algo = &ae_algo;
1757 nic->numa_node_mask = hdev->numa_node_mask;
1758 nic->kinfo.io_base = hdev->hw.hw.io_base;
1760 ret = hclge_knic_setup(vport, num_tqps,
1761 hdev->num_tx_desc, hdev->num_rx_desc);
1763 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1768 static int hclge_alloc_vport(struct hclge_dev *hdev)
1770 struct pci_dev *pdev = hdev->pdev;
1771 struct hclge_vport *vport;
1777 /* We need to alloc a vport for main NIC of PF */
1778 num_vport = hdev->num_req_vfs + 1;
1780 if (hdev->num_tqps < num_vport) {
1781 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1782 hdev->num_tqps, num_vport);
1786 /* Alloc the same number of TQPs for every vport */
1787 tqp_per_vport = hdev->num_tqps / num_vport;
1788 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1790 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1795 hdev->vport = vport;
1796 hdev->num_alloc_vport = num_vport;
1798 if (IS_ENABLED(CONFIG_PCI_IOV))
1799 hdev->num_alloc_vfs = hdev->num_req_vfs;
1801 for (i = 0; i < num_vport; i++) {
1803 vport->vport_id = i;
1804 vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1805 vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1806 vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1807 vport->port_base_vlan_cfg.tbl_sta = true;
1808 vport->rxvlan_cfg.rx_vlan_offload_en = true;
1809 vport->req_vlan_fltr_en = true;
1810 INIT_LIST_HEAD(&vport->vlan_list);
1811 INIT_LIST_HEAD(&vport->uc_mac_list);
1812 INIT_LIST_HEAD(&vport->mc_mac_list);
1813 spin_lock_init(&vport->mac_list_lock);
1816 ret = hclge_vport_setup(vport, tqp_main_vport);
1818 ret = hclge_vport_setup(vport, tqp_per_vport);
1821 "vport setup failed for vport %d, %d\n",
1832 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1833 struct hclge_pkt_buf_alloc *buf_alloc)
1835 /* TX buffer size is unit by 128 byte */
1836 #define HCLGE_BUF_SIZE_UNIT_SHIFT 7
1837 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
1838 struct hclge_tx_buff_alloc_cmd *req;
1839 struct hclge_desc desc;
1843 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1845 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1846 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1847 u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1849 req->tx_pkt_buff[i] =
1850 cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1851 HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1854 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1856 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1862 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1863 struct hclge_pkt_buf_alloc *buf_alloc)
1865 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1868 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1873 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1878 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1879 if (hdev->hw_tc_map & BIT(i))
1884 /* Get the number of pfc enabled TCs, which have private buffer */
1885 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1886 struct hclge_pkt_buf_alloc *buf_alloc)
1888 struct hclge_priv_buf *priv;
1892 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1893 priv = &buf_alloc->priv_buf[i];
1894 if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1902 /* Get the number of pfc disabled TCs, which have private buffer */
1903 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1904 struct hclge_pkt_buf_alloc *buf_alloc)
1906 struct hclge_priv_buf *priv;
1910 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1911 priv = &buf_alloc->priv_buf[i];
1912 if (hdev->hw_tc_map & BIT(i) &&
1913 !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1921 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1923 struct hclge_priv_buf *priv;
1927 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1928 priv = &buf_alloc->priv_buf[i];
1930 rx_priv += priv->buf_size;
1935 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1937 u32 i, total_tx_size = 0;
1939 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1940 total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
1942 return total_tx_size;
1945 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
1946 struct hclge_pkt_buf_alloc *buf_alloc,
1949 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
1950 u32 tc_num = hclge_get_tc_num(hdev);
1951 u32 shared_buf, aligned_mps;
1955 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
1957 if (hnae3_dev_dcb_supported(hdev))
1958 shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
1961 shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
1962 + hdev->dv_buf_size;
1964 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
1965 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
1966 HCLGE_BUF_SIZE_UNIT);
1968 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
1969 if (rx_all < rx_priv + shared_std)
1972 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
1973 buf_alloc->s_buf.buf_size = shared_buf;
1974 if (hnae3_dev_dcb_supported(hdev)) {
1975 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
1976 buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
1977 - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
1978 HCLGE_BUF_SIZE_UNIT);
1980 buf_alloc->s_buf.self.high = aligned_mps +
1981 HCLGE_NON_DCB_ADDITIONAL_BUF;
1982 buf_alloc->s_buf.self.low = aligned_mps;
1985 if (hnae3_dev_dcb_supported(hdev)) {
1986 hi_thrd = shared_buf - hdev->dv_buf_size;
1988 if (tc_num <= NEED_RESERVE_TC_NUM)
1989 hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
1993 hi_thrd = hi_thrd / tc_num;
1995 hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
1996 hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
1997 lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
1999 hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2000 lo_thrd = aligned_mps;
2003 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2004 buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2005 buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2011 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2012 struct hclge_pkt_buf_alloc *buf_alloc)
2016 total_size = hdev->pkt_buf_size;
2018 /* alloc tx buffer for all enabled tc */
2019 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2020 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2022 if (hdev->hw_tc_map & BIT(i)) {
2023 if (total_size < hdev->tx_buf_size)
2026 priv->tx_buf_size = hdev->tx_buf_size;
2028 priv->tx_buf_size = 0;
2031 total_size -= priv->tx_buf_size;
2037 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2038 struct hclge_pkt_buf_alloc *buf_alloc)
2040 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2041 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2044 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2045 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2052 if (!(hdev->hw_tc_map & BIT(i)))
2057 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2058 priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2059 priv->wl.high = roundup(priv->wl.low + aligned_mps,
2060 HCLGE_BUF_SIZE_UNIT);
2063 priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2067 priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2070 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2073 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2074 struct hclge_pkt_buf_alloc *buf_alloc)
2076 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2077 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2080 /* let the last to be cleared first */
2081 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2082 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2083 unsigned int mask = BIT((unsigned int)i);
2085 if (hdev->hw_tc_map & mask &&
2086 !(hdev->tm_info.hw_pfc_map & mask)) {
2087 /* Clear the no pfc TC private buffer */
2095 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2096 no_pfc_priv_num == 0)
2100 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2103 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2104 struct hclge_pkt_buf_alloc *buf_alloc)
2106 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2107 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2110 /* let the last to be cleared first */
2111 for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2112 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2113 unsigned int mask = BIT((unsigned int)i);
2115 if (hdev->hw_tc_map & mask &&
2116 hdev->tm_info.hw_pfc_map & mask) {
2117 /* Reduce the number of pfc TC with private buffer */
2125 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2130 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2133 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2134 struct hclge_pkt_buf_alloc *buf_alloc)
2136 #define COMPENSATE_BUFFER 0x3C00
2137 #define COMPENSATE_HALF_MPS_NUM 5
2138 #define PRIV_WL_GAP 0x1800
2140 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2141 u32 tc_num = hclge_get_tc_num(hdev);
2142 u32 half_mps = hdev->mps >> 1;
2147 rx_priv = rx_priv / tc_num;
2149 if (tc_num <= NEED_RESERVE_TC_NUM)
2150 rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2152 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2153 COMPENSATE_HALF_MPS_NUM * half_mps;
2154 min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2155 rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2156 if (rx_priv < min_rx_priv)
2159 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2160 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2167 if (!(hdev->hw_tc_map & BIT(i)))
2171 priv->buf_size = rx_priv;
2172 priv->wl.high = rx_priv - hdev->dv_buf_size;
2173 priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2176 buf_alloc->s_buf.buf_size = 0;
2181 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2182 * @hdev: pointer to struct hclge_dev
2183 * @buf_alloc: pointer to buffer calculation data
2184 * @return: 0: calculate successful, negative: fail
2186 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2187 struct hclge_pkt_buf_alloc *buf_alloc)
2189 /* When DCB is not supported, rx private buffer is not allocated. */
2190 if (!hnae3_dev_dcb_supported(hdev)) {
2191 u32 rx_all = hdev->pkt_buf_size;
2193 rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2194 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2200 if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2203 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2206 /* try to decrease the buffer size */
2207 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2210 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2213 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2219 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2220 struct hclge_pkt_buf_alloc *buf_alloc)
2222 struct hclge_rx_priv_buff_cmd *req;
2223 struct hclge_desc desc;
2227 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2228 req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2230 /* Alloc private buffer TCs */
2231 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2232 struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2235 cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2237 cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2241 cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2242 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2244 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2246 dev_err(&hdev->pdev->dev,
2247 "rx private buffer alloc cmd failed %d\n", ret);
2252 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2253 struct hclge_pkt_buf_alloc *buf_alloc)
2255 struct hclge_rx_priv_wl_buf *req;
2256 struct hclge_priv_buf *priv;
2257 struct hclge_desc desc[2];
2261 for (i = 0; i < 2; i++) {
2262 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2264 req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2266 /* The first descriptor set the NEXT bit to 1 */
2268 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2270 desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2272 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2273 u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2275 priv = &buf_alloc->priv_buf[idx];
2276 req->tc_wl[j].high =
2277 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2278 req->tc_wl[j].high |=
2279 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2281 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2282 req->tc_wl[j].low |=
2283 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2287 /* Send 2 descriptor at one time */
2288 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2290 dev_err(&hdev->pdev->dev,
2291 "rx private waterline config cmd failed %d\n",
2296 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2297 struct hclge_pkt_buf_alloc *buf_alloc)
2299 struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2300 struct hclge_rx_com_thrd *req;
2301 struct hclge_desc desc[2];
2302 struct hclge_tc_thrd *tc;
2306 for (i = 0; i < 2; i++) {
2307 hclge_cmd_setup_basic_desc(&desc[i],
2308 HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2309 req = (struct hclge_rx_com_thrd *)&desc[i].data;
2311 /* The first descriptor set the NEXT bit to 1 */
2313 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2315 desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2317 for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2318 tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2320 req->com_thrd[j].high =
2321 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2322 req->com_thrd[j].high |=
2323 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2324 req->com_thrd[j].low =
2325 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2326 req->com_thrd[j].low |=
2327 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2331 /* Send 2 descriptors at one time */
2332 ret = hclge_cmd_send(&hdev->hw, desc, 2);
2334 dev_err(&hdev->pdev->dev,
2335 "common threshold config cmd failed %d\n", ret);
2339 static int hclge_common_wl_config(struct hclge_dev *hdev,
2340 struct hclge_pkt_buf_alloc *buf_alloc)
2342 struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2343 struct hclge_rx_com_wl *req;
2344 struct hclge_desc desc;
2347 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2349 req = (struct hclge_rx_com_wl *)desc.data;
2350 req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2351 req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2353 req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2354 req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2356 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2358 dev_err(&hdev->pdev->dev,
2359 "common waterline config cmd failed %d\n", ret);
2364 int hclge_buffer_alloc(struct hclge_dev *hdev)
2366 struct hclge_pkt_buf_alloc *pkt_buf;
2369 pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2373 ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2375 dev_err(&hdev->pdev->dev,
2376 "could not calc tx buffer size for all TCs %d\n", ret);
2380 ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2382 dev_err(&hdev->pdev->dev,
2383 "could not alloc tx buffers %d\n", ret);
2387 ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2389 dev_err(&hdev->pdev->dev,
2390 "could not calc rx priv buffer size for all TCs %d\n",
2395 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2397 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2402 if (hnae3_dev_dcb_supported(hdev)) {
2403 ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2405 dev_err(&hdev->pdev->dev,
2406 "could not configure rx private waterline %d\n",
2411 ret = hclge_common_thrd_config(hdev, pkt_buf);
2413 dev_err(&hdev->pdev->dev,
2414 "could not configure common threshold %d\n",
2420 ret = hclge_common_wl_config(hdev, pkt_buf);
2422 dev_err(&hdev->pdev->dev,
2423 "could not configure common waterline %d\n", ret);
2430 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2432 struct hnae3_handle *roce = &vport->roce;
2433 struct hnae3_handle *nic = &vport->nic;
2434 struct hclge_dev *hdev = vport->back;
2436 roce->rinfo.num_vectors = vport->back->num_roce_msi;
2438 if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2441 roce->rinfo.base_vector = hdev->num_nic_msi;
2443 roce->rinfo.netdev = nic->kinfo.netdev;
2444 roce->rinfo.roce_io_base = hdev->hw.hw.io_base;
2445 roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base;
2447 roce->pdev = nic->pdev;
2448 roce->ae_algo = nic->ae_algo;
2449 roce->numa_node_mask = nic->numa_node_mask;
2454 static int hclge_init_msi(struct hclge_dev *hdev)
2456 struct pci_dev *pdev = hdev->pdev;
2460 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2462 PCI_IRQ_MSI | PCI_IRQ_MSIX);
2465 "failed(%d) to allocate MSI/MSI-X vectors\n",
2469 if (vectors < hdev->num_msi)
2470 dev_warn(&hdev->pdev->dev,
2471 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2472 hdev->num_msi, vectors);
2474 hdev->num_msi = vectors;
2475 hdev->num_msi_left = vectors;
2477 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2478 sizeof(u16), GFP_KERNEL);
2479 if (!hdev->vector_status) {
2480 pci_free_irq_vectors(pdev);
2484 for (i = 0; i < hdev->num_msi; i++)
2485 hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2487 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2488 sizeof(int), GFP_KERNEL);
2489 if (!hdev->vector_irq) {
2490 pci_free_irq_vectors(pdev);
2497 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2499 if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2500 duplex = HCLGE_MAC_FULL;
2505 static struct hclge_mac_speed_map hclge_mac_speed_map_to_fw[] = {
2506 {HCLGE_MAC_SPEED_10M, HCLGE_FW_MAC_SPEED_10M},
2507 {HCLGE_MAC_SPEED_100M, HCLGE_FW_MAC_SPEED_100M},
2508 {HCLGE_MAC_SPEED_1G, HCLGE_FW_MAC_SPEED_1G},
2509 {HCLGE_MAC_SPEED_10G, HCLGE_FW_MAC_SPEED_10G},
2510 {HCLGE_MAC_SPEED_25G, HCLGE_FW_MAC_SPEED_25G},
2511 {HCLGE_MAC_SPEED_40G, HCLGE_FW_MAC_SPEED_40G},
2512 {HCLGE_MAC_SPEED_50G, HCLGE_FW_MAC_SPEED_50G},
2513 {HCLGE_MAC_SPEED_100G, HCLGE_FW_MAC_SPEED_100G},
2514 {HCLGE_MAC_SPEED_200G, HCLGE_FW_MAC_SPEED_200G},
2517 static int hclge_convert_to_fw_speed(u32 speed_drv, u32 *speed_fw)
2521 for (i = 0; i < ARRAY_SIZE(hclge_mac_speed_map_to_fw); i++) {
2522 if (hclge_mac_speed_map_to_fw[i].speed_drv == speed_drv) {
2523 *speed_fw = hclge_mac_speed_map_to_fw[i].speed_fw;
2531 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2532 u8 duplex, u8 lane_num)
2534 struct hclge_config_mac_speed_dup_cmd *req;
2535 struct hclge_desc desc;
2539 req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2541 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2544 hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2546 ret = hclge_convert_to_fw_speed(speed, &speed_fw);
2548 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2552 hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, HCLGE_CFG_SPEED_S,
2554 hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2556 req->lane_num = lane_num;
2558 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2560 dev_err(&hdev->pdev->dev,
2561 "mac speed/duplex config cmd failed %d.\n", ret);
2568 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lane_num)
2570 struct hclge_mac *mac = &hdev->hw.mac;
2573 duplex = hclge_check_speed_dup(duplex, speed);
2574 if (!mac->support_autoneg && mac->speed == speed &&
2575 mac->duplex == duplex && (mac->lane_num == lane_num || lane_num == 0))
2578 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex, lane_num);
2582 hdev->hw.mac.speed = speed;
2583 hdev->hw.mac.duplex = duplex;
2585 hdev->hw.mac.lane_num = lane_num;
2590 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2591 u8 duplex, u8 lane_num)
2593 struct hclge_vport *vport = hclge_get_vport(handle);
2594 struct hclge_dev *hdev = vport->back;
2596 return hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num);
2599 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2601 struct hclge_config_auto_neg_cmd *req;
2602 struct hclge_desc desc;
2606 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2608 req = (struct hclge_config_auto_neg_cmd *)desc.data;
2610 hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2611 req->cfg_an_cmd_flag = cpu_to_le32(flag);
2613 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2615 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2621 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2623 struct hclge_vport *vport = hclge_get_vport(handle);
2624 struct hclge_dev *hdev = vport->back;
2626 if (!hdev->hw.mac.support_autoneg) {
2628 dev_err(&hdev->pdev->dev,
2629 "autoneg is not supported by current port\n");
2636 return hclge_set_autoneg_en(hdev, enable);
2639 static int hclge_get_autoneg(struct hnae3_handle *handle)
2641 struct hclge_vport *vport = hclge_get_vport(handle);
2642 struct hclge_dev *hdev = vport->back;
2643 struct phy_device *phydev = hdev->hw.mac.phydev;
2646 return phydev->autoneg;
2648 return hdev->hw.mac.autoneg;
2651 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2653 struct hclge_vport *vport = hclge_get_vport(handle);
2654 struct hclge_dev *hdev = vport->back;
2657 dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2659 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2662 return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2665 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2667 struct hclge_vport *vport = hclge_get_vport(handle);
2668 struct hclge_dev *hdev = vport->back;
2670 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2671 return hclge_set_autoneg_en(hdev, !halt);
2676 static void hclge_parse_fec_stats_lanes(struct hclge_dev *hdev,
2677 struct hclge_desc *desc, u32 desc_len)
2679 u32 lane_size = HCLGE_FEC_STATS_MAX_LANES * 2;
2684 for (i = 0; i < lane_size; i++) {
2685 if (data_index >= HCLGE_DESC_DATA_LEN) {
2690 if (desc_index >= desc_len)
2693 hdev->fec_stats.per_lanes[i] +=
2694 le32_to_cpu(desc[desc_index].data[data_index]);
2699 static void hclge_parse_fec_stats(struct hclge_dev *hdev,
2700 struct hclge_desc *desc, u32 desc_len)
2702 struct hclge_query_fec_stats_cmd *req;
2704 req = (struct hclge_query_fec_stats_cmd *)desc[0].data;
2706 hdev->fec_stats.base_r_lane_num = req->base_r_lane_num;
2707 hdev->fec_stats.rs_corr_blocks +=
2708 le32_to_cpu(req->rs_fec_corr_blocks);
2709 hdev->fec_stats.rs_uncorr_blocks +=
2710 le32_to_cpu(req->rs_fec_uncorr_blocks);
2711 hdev->fec_stats.rs_error_blocks +=
2712 le32_to_cpu(req->rs_fec_error_blocks);
2713 hdev->fec_stats.base_r_corr_blocks +=
2714 le32_to_cpu(req->base_r_fec_corr_blocks);
2715 hdev->fec_stats.base_r_uncorr_blocks +=
2716 le32_to_cpu(req->base_r_fec_uncorr_blocks);
2718 hclge_parse_fec_stats_lanes(hdev, &desc[1], desc_len - 1);
2721 static int hclge_update_fec_stats_hw(struct hclge_dev *hdev)
2723 struct hclge_desc desc[HCLGE_FEC_STATS_CMD_NUM];
2727 for (i = 0; i < HCLGE_FEC_STATS_CMD_NUM; i++) {
2728 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_FEC_STATS,
2730 if (i != (HCLGE_FEC_STATS_CMD_NUM - 1))
2731 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2734 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_FEC_STATS_CMD_NUM);
2738 hclge_parse_fec_stats(hdev, desc, HCLGE_FEC_STATS_CMD_NUM);
2743 static void hclge_update_fec_stats(struct hclge_dev *hdev)
2745 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
2748 if (!hnae3_ae_dev_fec_stats_supported(ae_dev) ||
2749 test_and_set_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state))
2752 ret = hclge_update_fec_stats_hw(hdev);
2754 dev_err(&hdev->pdev->dev,
2755 "failed to update fec stats, ret = %d\n", ret);
2757 clear_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state);
2760 static void hclge_get_fec_stats_total(struct hclge_dev *hdev,
2761 struct ethtool_fec_stats *fec_stats)
2763 fec_stats->corrected_blocks.total = hdev->fec_stats.rs_corr_blocks;
2764 fec_stats->uncorrectable_blocks.total =
2765 hdev->fec_stats.rs_uncorr_blocks;
2768 static void hclge_get_fec_stats_lanes(struct hclge_dev *hdev,
2769 struct ethtool_fec_stats *fec_stats)
2773 if (hdev->fec_stats.base_r_lane_num == 0 ||
2774 hdev->fec_stats.base_r_lane_num > HCLGE_FEC_STATS_MAX_LANES) {
2775 dev_err(&hdev->pdev->dev,
2776 "fec stats lane number(%llu) is invalid\n",
2777 hdev->fec_stats.base_r_lane_num);
2781 for (i = 0; i < hdev->fec_stats.base_r_lane_num; i++) {
2782 fec_stats->corrected_blocks.lanes[i] =
2783 hdev->fec_stats.base_r_corr_per_lanes[i];
2784 fec_stats->uncorrectable_blocks.lanes[i] =
2785 hdev->fec_stats.base_r_uncorr_per_lanes[i];
2789 static void hclge_comm_get_fec_stats(struct hclge_dev *hdev,
2790 struct ethtool_fec_stats *fec_stats)
2792 u32 fec_mode = hdev->hw.mac.fec_mode;
2795 case BIT(HNAE3_FEC_RS):
2796 case BIT(HNAE3_FEC_LLRS):
2797 hclge_get_fec_stats_total(hdev, fec_stats);
2799 case BIT(HNAE3_FEC_BASER):
2800 hclge_get_fec_stats_lanes(hdev, fec_stats);
2803 dev_err(&hdev->pdev->dev,
2804 "fec stats is not supported by current fec mode(0x%x)\n",
2810 static void hclge_get_fec_stats(struct hnae3_handle *handle,
2811 struct ethtool_fec_stats *fec_stats)
2813 struct hclge_vport *vport = hclge_get_vport(handle);
2814 struct hclge_dev *hdev = vport->back;
2815 u32 fec_mode = hdev->hw.mac.fec_mode;
2817 if (fec_mode == BIT(HNAE3_FEC_NONE) ||
2818 fec_mode == BIT(HNAE3_FEC_AUTO) ||
2819 fec_mode == BIT(HNAE3_FEC_USER_DEF))
2822 hclge_update_fec_stats(hdev);
2824 hclge_comm_get_fec_stats(hdev, fec_stats);
2827 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2829 struct hclge_config_fec_cmd *req;
2830 struct hclge_desc desc;
2833 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2835 req = (struct hclge_config_fec_cmd *)desc.data;
2836 if (fec_mode & BIT(HNAE3_FEC_AUTO))
2837 hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2838 if (fec_mode & BIT(HNAE3_FEC_RS))
2839 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2840 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2841 if (fec_mode & BIT(HNAE3_FEC_LLRS))
2842 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2843 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_LLRS);
2844 if (fec_mode & BIT(HNAE3_FEC_BASER))
2845 hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2846 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2848 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2850 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2855 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2857 struct hclge_vport *vport = hclge_get_vport(handle);
2858 struct hclge_dev *hdev = vport->back;
2859 struct hclge_mac *mac = &hdev->hw.mac;
2862 if (fec_mode && !(mac->fec_ability & fec_mode)) {
2863 dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2867 ret = hclge_set_fec_hw(hdev, fec_mode);
2871 mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2875 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2878 struct hclge_vport *vport = hclge_get_vport(handle);
2879 struct hclge_dev *hdev = vport->back;
2880 struct hclge_mac *mac = &hdev->hw.mac;
2883 *fec_ability = mac->fec_ability;
2885 *fec_mode = mac->fec_mode;
2888 static int hclge_mac_init(struct hclge_dev *hdev)
2890 struct hclge_mac *mac = &hdev->hw.mac;
2893 hdev->support_sfp_query = true;
2894 hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2895 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2896 hdev->hw.mac.duplex, hdev->hw.mac.lane_num);
2900 if (hdev->hw.mac.support_autoneg) {
2901 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2908 if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2909 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2914 ret = hclge_set_mac_mtu(hdev, hdev->mps);
2916 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2920 ret = hclge_set_default_loopback(hdev);
2924 ret = hclge_buffer_alloc(hdev);
2926 dev_err(&hdev->pdev->dev,
2927 "allocate buffer fail, ret=%d\n", ret);
2932 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2934 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2935 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) {
2936 hdev->last_mbx_scheduled = jiffies;
2937 mod_delayed_work(hclge_wq, &hdev->service_task, 0);
2941 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2943 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2944 test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) &&
2945 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) {
2946 hdev->last_rst_scheduled = jiffies;
2947 mod_delayed_work(hclge_wq, &hdev->service_task, 0);
2951 static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
2953 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2954 !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
2955 mod_delayed_work(hclge_wq, &hdev->service_task, 0);
2958 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2960 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2961 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2962 mod_delayed_work(hclge_wq, &hdev->service_task, delay_time);
2965 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2967 struct hclge_link_status_cmd *req;
2968 struct hclge_desc desc;
2971 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2972 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2974 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2979 req = (struct hclge_link_status_cmd *)desc.data;
2980 *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2981 HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2986 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2988 struct phy_device *phydev = hdev->hw.mac.phydev;
2990 *link_status = HCLGE_LINK_STATUS_DOWN;
2992 if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2995 if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2998 return hclge_get_mac_link_status(hdev, link_status);
3001 static void hclge_push_link_status(struct hclge_dev *hdev)
3003 struct hclge_vport *vport;
3007 for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
3008 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
3010 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
3011 vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
3014 ret = hclge_push_vf_link_status(vport);
3016 dev_err(&hdev->pdev->dev,
3017 "failed to push link status to vf%u, ret = %d\n",
3023 static void hclge_update_link_status(struct hclge_dev *hdev)
3025 struct hnae3_handle *rhandle = &hdev->vport[0].roce;
3026 struct hnae3_handle *handle = &hdev->vport[0].nic;
3027 struct hnae3_client *rclient = hdev->roce_client;
3028 struct hnae3_client *client = hdev->nic_client;
3035 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
3038 ret = hclge_get_mac_phy_link(hdev, &state);
3040 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
3044 if (state != hdev->hw.mac.link) {
3045 hdev->hw.mac.link = state;
3046 client->ops->link_status_change(handle, state);
3047 hclge_config_mac_tnl_int(hdev, state);
3048 if (rclient && rclient->ops->link_status_change)
3049 rclient->ops->link_status_change(rhandle, state);
3051 hclge_push_link_status(hdev);
3054 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
3057 static void hclge_update_speed_advertising(struct hclge_mac *mac)
3061 if (hclge_get_speed_bit(mac->speed, &speed_ability))
3064 switch (mac->module_type) {
3065 case HNAE3_MODULE_TYPE_FIBRE_LR:
3066 hclge_convert_setting_lr(speed_ability, mac->advertising);
3068 case HNAE3_MODULE_TYPE_FIBRE_SR:
3069 case HNAE3_MODULE_TYPE_AOC:
3070 hclge_convert_setting_sr(speed_ability, mac->advertising);
3072 case HNAE3_MODULE_TYPE_CR:
3073 hclge_convert_setting_cr(speed_ability, mac->advertising);
3075 case HNAE3_MODULE_TYPE_KR:
3076 hclge_convert_setting_kr(speed_ability, mac->advertising);
3083 static void hclge_update_fec_advertising(struct hclge_mac *mac)
3085 if (mac->fec_mode & BIT(HNAE3_FEC_RS))
3086 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
3088 else if (mac->fec_mode & BIT(HNAE3_FEC_LLRS))
3089 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
3091 else if (mac->fec_mode & BIT(HNAE3_FEC_BASER))
3092 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
3095 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
3099 static void hclge_update_pause_advertising(struct hclge_dev *hdev)
3101 struct hclge_mac *mac = &hdev->hw.mac;
3104 switch (hdev->fc_mode_last_time) {
3105 case HCLGE_FC_RX_PAUSE:
3109 case HCLGE_FC_TX_PAUSE:
3123 linkmode_set_pause(mac->advertising, tx_en, rx_en);
3126 static void hclge_update_advertising(struct hclge_dev *hdev)
3128 struct hclge_mac *mac = &hdev->hw.mac;
3130 linkmode_zero(mac->advertising);
3131 hclge_update_speed_advertising(mac);
3132 hclge_update_fec_advertising(mac);
3133 hclge_update_pause_advertising(hdev);
3136 static void hclge_update_port_capability(struct hclge_dev *hdev,
3137 struct hclge_mac *mac)
3139 if (hnae3_dev_fec_supported(hdev))
3140 hclge_convert_setting_fec(mac);
3142 /* firmware can not identify back plane type, the media type
3143 * read from configuration can help deal it
3145 if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
3146 mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
3147 mac->module_type = HNAE3_MODULE_TYPE_KR;
3148 else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3149 mac->module_type = HNAE3_MODULE_TYPE_TP;
3151 if (mac->support_autoneg) {
3152 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
3153 linkmode_copy(mac->advertising, mac->supported);
3155 linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
3157 hclge_update_advertising(hdev);
3161 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
3163 struct hclge_sfp_info_cmd *resp;
3164 struct hclge_desc desc;
3167 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3168 resp = (struct hclge_sfp_info_cmd *)desc.data;
3169 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3170 if (ret == -EOPNOTSUPP) {
3171 dev_warn(&hdev->pdev->dev,
3172 "IMP do not support get SFP speed %d\n", ret);
3175 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
3179 *speed = le32_to_cpu(resp->speed);
3184 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
3186 struct hclge_sfp_info_cmd *resp;
3187 struct hclge_desc desc;
3190 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3191 resp = (struct hclge_sfp_info_cmd *)desc.data;
3193 resp->query_type = QUERY_ACTIVE_SPEED;
3195 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3196 if (ret == -EOPNOTSUPP) {
3197 dev_warn(&hdev->pdev->dev,
3198 "IMP does not support get SFP info %d\n", ret);
3201 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
3205 /* In some case, mac speed get from IMP may be 0, it shouldn't be
3206 * set to mac->speed.
3208 if (!le32_to_cpu(resp->speed))
3211 mac->speed = le32_to_cpu(resp->speed);
3212 /* if resp->speed_ability is 0, it means it's an old version
3213 * firmware, do not update these params
3215 if (resp->speed_ability) {
3216 mac->module_type = le32_to_cpu(resp->module_type);
3217 mac->speed_ability = le32_to_cpu(resp->speed_ability);
3218 mac->autoneg = resp->autoneg;
3219 mac->support_autoneg = resp->autoneg_ability;
3220 mac->speed_type = QUERY_ACTIVE_SPEED;
3221 mac->lane_num = resp->lane_num;
3222 if (!resp->active_fec)
3225 mac->fec_mode = BIT(resp->active_fec);
3226 mac->fec_ability = resp->fec_ability;
3228 mac->speed_type = QUERY_SFP_SPEED;
3234 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3235 struct ethtool_link_ksettings *cmd)
3237 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3238 struct hclge_vport *vport = hclge_get_vport(handle);
3239 struct hclge_phy_link_ksetting_0_cmd *req0;
3240 struct hclge_phy_link_ksetting_1_cmd *req1;
3241 u32 supported, advertising, lp_advertising;
3242 struct hclge_dev *hdev = vport->back;
3245 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3247 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
3248 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3251 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3253 dev_err(&hdev->pdev->dev,
3254 "failed to get phy link ksetting, ret = %d.\n", ret);
3258 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3259 cmd->base.autoneg = req0->autoneg;
3260 cmd->base.speed = le32_to_cpu(req0->speed);
3261 cmd->base.duplex = req0->duplex;
3262 cmd->base.port = req0->port;
3263 cmd->base.transceiver = req0->transceiver;
3264 cmd->base.phy_address = req0->phy_address;
3265 cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3266 cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3267 supported = le32_to_cpu(req0->supported);
3268 advertising = le32_to_cpu(req0->advertising);
3269 lp_advertising = le32_to_cpu(req0->lp_advertising);
3270 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3272 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3274 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3277 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3278 cmd->base.master_slave_cfg = req1->master_slave_cfg;
3279 cmd->base.master_slave_state = req1->master_slave_state;
3285 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3286 const struct ethtool_link_ksettings *cmd)
3288 struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3289 struct hclge_vport *vport = hclge_get_vport(handle);
3290 struct hclge_phy_link_ksetting_0_cmd *req0;
3291 struct hclge_phy_link_ksetting_1_cmd *req1;
3292 struct hclge_dev *hdev = vport->back;
3296 if (cmd->base.autoneg == AUTONEG_DISABLE &&
3297 ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3298 (cmd->base.duplex != DUPLEX_HALF &&
3299 cmd->base.duplex != DUPLEX_FULL)))
3302 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3304 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
3305 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3308 req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3309 req0->autoneg = cmd->base.autoneg;
3310 req0->speed = cpu_to_le32(cmd->base.speed);
3311 req0->duplex = cmd->base.duplex;
3312 ethtool_convert_link_mode_to_legacy_u32(&advertising,
3313 cmd->link_modes.advertising);
3314 req0->advertising = cpu_to_le32(advertising);
3315 req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3317 req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3318 req1->master_slave_cfg = cmd->base.master_slave_cfg;
3320 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3322 dev_err(&hdev->pdev->dev,
3323 "failed to set phy link ksettings, ret = %d.\n", ret);
3327 hdev->hw.mac.autoneg = cmd->base.autoneg;
3328 hdev->hw.mac.speed = cmd->base.speed;
3329 hdev->hw.mac.duplex = cmd->base.duplex;
3330 linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3335 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3337 struct ethtool_link_ksettings cmd;
3340 if (!hnae3_dev_phy_imp_supported(hdev))
3343 ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3347 hdev->hw.mac.autoneg = cmd.base.autoneg;
3348 hdev->hw.mac.speed = cmd.base.speed;
3349 hdev->hw.mac.duplex = cmd.base.duplex;
3350 linkmode_copy(hdev->hw.mac.advertising, cmd.link_modes.advertising);
3355 static int hclge_tp_port_init(struct hclge_dev *hdev)
3357 struct ethtool_link_ksettings cmd;
3359 if (!hnae3_dev_phy_imp_supported(hdev))
3362 cmd.base.autoneg = hdev->hw.mac.autoneg;
3363 cmd.base.speed = hdev->hw.mac.speed;
3364 cmd.base.duplex = hdev->hw.mac.duplex;
3365 linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3367 return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3370 static int hclge_update_port_info(struct hclge_dev *hdev)
3372 struct hclge_mac *mac = &hdev->hw.mac;
3376 /* get the port info from SFP cmd if not copper port */
3377 if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3378 return hclge_update_tp_port_info(hdev);
3380 /* if IMP does not support get SFP/qSFP info, return directly */
3381 if (!hdev->support_sfp_query)
3384 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3386 ret = hclge_get_sfp_info(hdev, mac);
3388 speed = HCLGE_MAC_SPEED_UNKNOWN;
3389 ret = hclge_get_sfp_speed(hdev, &speed);
3392 if (ret == -EOPNOTSUPP) {
3393 hdev->support_sfp_query = false;
3399 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3400 if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3401 hclge_update_port_capability(hdev, mac);
3402 if (mac->speed != speed)
3403 (void)hclge_tm_port_shaper_cfg(hdev);
3406 return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3407 HCLGE_MAC_FULL, mac->lane_num);
3409 if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3410 return 0; /* do nothing if no SFP */
3412 /* must config full duplex for SFP */
3413 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL, 0);
3417 static int hclge_get_status(struct hnae3_handle *handle)
3419 struct hclge_vport *vport = hclge_get_vport(handle);
3420 struct hclge_dev *hdev = vport->back;
3422 hclge_update_link_status(hdev);
3424 return hdev->hw.mac.link;
3427 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3429 if (!pci_num_vf(hdev->pdev)) {
3430 dev_err(&hdev->pdev->dev,
3431 "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3435 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3436 dev_err(&hdev->pdev->dev,
3437 "vf id(%d) is out of range(0 <= vfid < %d)\n",
3438 vf, pci_num_vf(hdev->pdev));
3442 /* VF start from 1 in vport */
3443 vf += HCLGE_VF_VPORT_START_NUM;
3444 return &hdev->vport[vf];
3447 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3448 struct ifla_vf_info *ivf)
3450 struct hclge_vport *vport = hclge_get_vport(handle);
3451 struct hclge_dev *hdev = vport->back;
3453 vport = hclge_get_vf_vport(hdev, vf);
3458 ivf->linkstate = vport->vf_info.link_state;
3459 ivf->spoofchk = vport->vf_info.spoofchk;
3460 ivf->trusted = vport->vf_info.trusted;
3461 ivf->min_tx_rate = 0;
3462 ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3463 ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3464 ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3465 ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3466 ether_addr_copy(ivf->mac, vport->vf_info.mac);
3471 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3474 struct hclge_vport *vport = hclge_get_vport(handle);
3475 struct hclge_dev *hdev = vport->back;
3479 vport = hclge_get_vf_vport(hdev, vf);
3483 link_state_old = vport->vf_info.link_state;
3484 vport->vf_info.link_state = link_state;
3486 /* return success directly if the VF is unalive, VF will
3487 * query link state itself when it starts work.
3489 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3492 ret = hclge_push_vf_link_status(vport);
3494 vport->vf_info.link_state = link_state_old;
3495 dev_err(&hdev->pdev->dev,
3496 "failed to push vf%d link status, ret = %d\n", vf, ret);
3502 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3504 u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
3506 /* fetch the events from their corresponding regs */
3507 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3508 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3509 hw_err_src_reg = hclge_read_dev(&hdev->hw,
3510 HCLGE_RAS_PF_OTHER_INT_STS_REG);
3512 /* Assumption: If by any chance reset and mailbox events are reported
3513 * together then we will only process reset event in this go and will
3514 * defer the processing of the mailbox events. Since, we would have not
3515 * cleared RX CMDQ event this time we would receive again another
3516 * interrupt from H/W just for the mailbox.
3518 * check for vector0 reset event sources
3520 if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3521 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3522 set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3523 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
3524 *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3525 hdev->rst_stats.imp_rst_cnt++;
3526 return HCLGE_VECTOR0_EVENT_RST;
3529 if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3530 dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3531 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
3532 set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3533 *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3534 hdev->rst_stats.global_rst_cnt++;
3535 return HCLGE_VECTOR0_EVENT_RST;
3538 /* check for vector0 msix event and hardware error event source */
3539 if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK ||
3540 hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK)
3541 return HCLGE_VECTOR0_EVENT_ERR;
3543 /* check for vector0 ptp event source */
3544 if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) {
3545 *clearval = msix_src_reg;
3546 return HCLGE_VECTOR0_EVENT_PTP;
3549 /* check for vector0 mailbox(=CMDQ RX) event source */
3550 if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3551 cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3552 *clearval = cmdq_src_reg;
3553 return HCLGE_VECTOR0_EVENT_MBX;
3556 /* print other vector0 event source */
3557 dev_info(&hdev->pdev->dev,
3558 "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n",
3559 cmdq_src_reg, hw_err_src_reg, msix_src_reg);
3561 return HCLGE_VECTOR0_EVENT_OTHER;
3564 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3567 #define HCLGE_IMP_RESET_DELAY 5
3569 switch (event_type) {
3570 case HCLGE_VECTOR0_EVENT_PTP:
3571 case HCLGE_VECTOR0_EVENT_RST:
3572 if (regclr == BIT(HCLGE_VECTOR0_IMPRESET_INT_B))
3573 mdelay(HCLGE_IMP_RESET_DELAY);
3575 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3577 case HCLGE_VECTOR0_EVENT_MBX:
3578 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3585 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3587 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3588 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3589 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3590 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3591 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3594 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3596 writel(enable ? 1 : 0, vector->addr);
3599 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3601 struct hclge_dev *hdev = data;
3602 unsigned long flags;
3606 hclge_enable_vector(&hdev->misc_vector, false);
3607 event_cause = hclge_check_event_cause(hdev, &clearval);
3609 /* vector 0 interrupt is shared with reset and mailbox source events. */
3610 switch (event_cause) {
3611 case HCLGE_VECTOR0_EVENT_ERR:
3612 hclge_errhand_task_schedule(hdev);
3614 case HCLGE_VECTOR0_EVENT_RST:
3615 hclge_reset_task_schedule(hdev);
3617 case HCLGE_VECTOR0_EVENT_PTP:
3618 spin_lock_irqsave(&hdev->ptp->lock, flags);
3619 hclge_ptp_clean_tx_hwts(hdev);
3620 spin_unlock_irqrestore(&hdev->ptp->lock, flags);
3622 case HCLGE_VECTOR0_EVENT_MBX:
3623 /* If we are here then,
3624 * 1. Either we are not handling any mbx task and we are not
3627 * 2. We could be handling a mbx task but nothing more is
3629 * In both cases, we should schedule mbx task as there are more
3630 * mbx messages reported by this interrupt.
3632 hclge_mbx_task_schedule(hdev);
3635 dev_warn(&hdev->pdev->dev,
3636 "received unknown or unhandled event of vector0\n");
3640 hclge_clear_event_cause(hdev, event_cause, clearval);
3642 /* Enable interrupt if it is not caused by reset event or error event */
3643 if (event_cause == HCLGE_VECTOR0_EVENT_PTP ||
3644 event_cause == HCLGE_VECTOR0_EVENT_MBX ||
3645 event_cause == HCLGE_VECTOR0_EVENT_OTHER)
3646 hclge_enable_vector(&hdev->misc_vector, true);
3651 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3653 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3654 dev_warn(&hdev->pdev->dev,
3655 "vector(vector_id %d) has been freed.\n", vector_id);
3659 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3660 hdev->num_msi_left += 1;
3661 hdev->num_msi_used -= 1;
3664 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3666 struct hclge_misc_vector *vector = &hdev->misc_vector;
3668 vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3670 vector->addr = hdev->hw.hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3671 hdev->vector_status[0] = 0;
3673 hdev->num_msi_left -= 1;
3674 hdev->num_msi_used += 1;
3677 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3681 hclge_get_misc_vector(hdev);
3683 /* this would be explicitly freed in the end */
3684 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3685 HCLGE_NAME, pci_name(hdev->pdev));
3686 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3687 0, hdev->misc_vector.name, hdev);
3689 hclge_free_vector(hdev, 0);
3690 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3691 hdev->misc_vector.vector_irq);
3697 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3699 free_irq(hdev->misc_vector.vector_irq, hdev);
3700 hclge_free_vector(hdev, 0);
3703 int hclge_notify_client(struct hclge_dev *hdev,
3704 enum hnae3_reset_notify_type type)
3706 struct hnae3_handle *handle = &hdev->vport[0].nic;
3707 struct hnae3_client *client = hdev->nic_client;
3710 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3713 if (!client->ops->reset_notify)
3716 ret = client->ops->reset_notify(handle, type);
3718 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3724 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3725 enum hnae3_reset_notify_type type)
3727 struct hnae3_handle *handle = &hdev->vport[0].roce;
3728 struct hnae3_client *client = hdev->roce_client;
3731 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3734 if (!client->ops->reset_notify)
3737 ret = client->ops->reset_notify(handle, type);
3739 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3745 static int hclge_reset_wait(struct hclge_dev *hdev)
3747 #define HCLGE_RESET_WATI_MS 100
3748 #define HCLGE_RESET_WAIT_CNT 350
3750 u32 val, reg, reg_bit;
3753 switch (hdev->reset_type) {
3754 case HNAE3_IMP_RESET:
3755 reg = HCLGE_GLOBAL_RESET_REG;
3756 reg_bit = HCLGE_IMP_RESET_BIT;
3758 case HNAE3_GLOBAL_RESET:
3759 reg = HCLGE_GLOBAL_RESET_REG;
3760 reg_bit = HCLGE_GLOBAL_RESET_BIT;
3762 case HNAE3_FUNC_RESET:
3763 reg = HCLGE_FUN_RST_ING;
3764 reg_bit = HCLGE_FUN_RST_ING_B;
3767 dev_err(&hdev->pdev->dev,
3768 "Wait for unsupported reset type: %d\n",
3773 val = hclge_read_dev(&hdev->hw, reg);
3774 while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3775 msleep(HCLGE_RESET_WATI_MS);
3776 val = hclge_read_dev(&hdev->hw, reg);
3780 if (cnt >= HCLGE_RESET_WAIT_CNT) {
3781 dev_warn(&hdev->pdev->dev,
3782 "Wait for reset timeout: %d\n", hdev->reset_type);
3789 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3791 struct hclge_vf_rst_cmd *req;
3792 struct hclge_desc desc;
3794 req = (struct hclge_vf_rst_cmd *)desc.data;
3795 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3796 req->dest_vfid = func_id;
3801 return hclge_cmd_send(&hdev->hw, &desc, 1);
3804 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3808 for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3809 struct hclge_vport *vport = &hdev->vport[i];
3812 /* Send cmd to set/clear VF's FUNC_RST_ING */
3813 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3815 dev_err(&hdev->pdev->dev,
3816 "set vf(%u) rst failed %d!\n",
3817 vport->vport_id - HCLGE_VF_VPORT_START_NUM,
3823 !test_bit(HCLGE_VPORT_STATE_INITED, &vport->state))
3826 if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) &&
3827 hdev->reset_type == HNAE3_FUNC_RESET) {
3828 set_bit(HCLGE_VPORT_NEED_NOTIFY_RESET,
3829 &vport->need_notify);
3833 /* Inform VF to process the reset.
3834 * hclge_inform_reset_assert_to_vf may fail if VF
3835 * driver is not loaded.
3837 ret = hclge_inform_reset_assert_to_vf(vport);
3839 dev_warn(&hdev->pdev->dev,
3840 "inform reset to vf(%u) failed %d!\n",
3841 vport->vport_id - HCLGE_VF_VPORT_START_NUM,
3848 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3850 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3851 test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state) ||
3852 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3855 if (time_is_before_jiffies(hdev->last_mbx_scheduled +
3856 HCLGE_MBX_SCHED_TIMEOUT))
3857 dev_warn(&hdev->pdev->dev,
3858 "mbx service task is scheduled after %ums on cpu%u!\n",
3859 jiffies_to_msecs(jiffies - hdev->last_mbx_scheduled),
3860 smp_processor_id());
3862 hclge_mbx_handler(hdev);
3864 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3867 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3869 struct hclge_pf_rst_sync_cmd *req;
3870 struct hclge_desc desc;
3874 req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3875 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3878 /* vf need to down netdev by mbx during PF or FLR reset */
3879 hclge_mailbox_service_task(hdev);
3881 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3882 /* for compatible with old firmware, wait
3883 * 100 ms for VF to stop IO
3885 if (ret == -EOPNOTSUPP) {
3886 msleep(HCLGE_RESET_SYNC_TIME);
3889 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3892 } else if (req->all_vf_ready) {
3895 msleep(HCLGE_PF_RESET_SYNC_TIME);
3896 hclge_comm_cmd_reuse_desc(&desc, true);
3897 } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3899 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3902 void hclge_report_hw_error(struct hclge_dev *hdev,
3903 enum hnae3_hw_error_type type)
3905 struct hnae3_client *client = hdev->nic_client;
3907 if (!client || !client->ops->process_hw_error ||
3908 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3911 client->ops->process_hw_error(&hdev->vport[0].nic, type);
3914 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3918 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3919 if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3920 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3921 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3922 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3925 if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3926 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3927 reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3928 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3932 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3934 struct hclge_desc desc;
3935 struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3938 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3939 hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3940 req->fun_reset_vfid = func_id;
3942 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3944 dev_err(&hdev->pdev->dev,
3945 "send function reset cmd fail, status =%d\n", ret);
3950 static void hclge_do_reset(struct hclge_dev *hdev)
3952 struct hnae3_handle *handle = &hdev->vport[0].nic;
3953 struct pci_dev *pdev = hdev->pdev;
3956 if (hclge_get_hw_reset_stat(handle)) {
3957 dev_info(&pdev->dev, "hardware reset not finish\n");
3958 dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3959 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3960 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3964 switch (hdev->reset_type) {
3965 case HNAE3_IMP_RESET:
3966 dev_info(&pdev->dev, "IMP reset requested\n");
3967 val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3968 hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1);
3969 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val);
3971 case HNAE3_GLOBAL_RESET:
3972 dev_info(&pdev->dev, "global reset requested\n");
3973 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3974 hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3975 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3977 case HNAE3_FUNC_RESET:
3978 dev_info(&pdev->dev, "PF reset requested\n");
3979 /* schedule again to check later */
3980 set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3981 hclge_reset_task_schedule(hdev);
3984 dev_warn(&pdev->dev,
3985 "unsupported reset type: %d\n", hdev->reset_type);
3990 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3991 unsigned long *addr)
3993 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3994 struct hclge_dev *hdev = ae_dev->priv;
3996 /* return the highest priority reset level amongst all */
3997 if (test_bit(HNAE3_IMP_RESET, addr)) {
3998 rst_level = HNAE3_IMP_RESET;
3999 clear_bit(HNAE3_IMP_RESET, addr);
4000 clear_bit(HNAE3_GLOBAL_RESET, addr);
4001 clear_bit(HNAE3_FUNC_RESET, addr);
4002 } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
4003 rst_level = HNAE3_GLOBAL_RESET;
4004 clear_bit(HNAE3_GLOBAL_RESET, addr);
4005 clear_bit(HNAE3_FUNC_RESET, addr);
4006 } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
4007 rst_level = HNAE3_FUNC_RESET;
4008 clear_bit(HNAE3_FUNC_RESET, addr);
4009 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
4010 rst_level = HNAE3_FLR_RESET;
4011 clear_bit(HNAE3_FLR_RESET, addr);
4014 if (hdev->reset_type != HNAE3_NONE_RESET &&
4015 rst_level < hdev->reset_type)
4016 return HNAE3_NONE_RESET;
4021 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
4025 switch (hdev->reset_type) {
4026 case HNAE3_IMP_RESET:
4027 clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
4029 case HNAE3_GLOBAL_RESET:
4030 clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
4039 /* For revision 0x20, the reset interrupt source
4040 * can only be cleared after hardware reset done
4042 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
4043 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
4046 hclge_enable_vector(&hdev->misc_vector, true);
4049 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
4053 reg_val = hclge_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG);
4055 reg_val |= HCLGE_COMM_NIC_SW_RST_RDY;
4057 reg_val &= ~HCLGE_COMM_NIC_SW_RST_RDY;
4059 hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val);
4062 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
4066 ret = hclge_set_all_vf_rst(hdev, true);
4070 hclge_func_reset_sync_vf(hdev);
4075 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
4080 switch (hdev->reset_type) {
4081 case HNAE3_FUNC_RESET:
4082 ret = hclge_func_reset_notify_vf(hdev);
4086 ret = hclge_func_reset_cmd(hdev, 0);
4088 dev_err(&hdev->pdev->dev,
4089 "asserting function reset fail %d!\n", ret);
4093 /* After performaning pf reset, it is not necessary to do the
4094 * mailbox handling or send any command to firmware, because
4095 * any mailbox handling or command to firmware is only valid
4096 * after hclge_comm_cmd_init is called.
4098 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
4099 hdev->rst_stats.pf_rst_cnt++;
4101 case HNAE3_FLR_RESET:
4102 ret = hclge_func_reset_notify_vf(hdev);
4106 case HNAE3_IMP_RESET:
4107 hclge_handle_imp_error(hdev);
4108 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
4109 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
4110 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
4116 /* inform hardware that preparatory work is done */
4117 msleep(HCLGE_RESET_SYNC_TIME);
4118 hclge_reset_handshake(hdev, true);
4119 dev_info(&hdev->pdev->dev, "prepare wait ok\n");
4124 static void hclge_show_rst_info(struct hclge_dev *hdev)
4128 buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL);
4132 hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
4134 dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
4139 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
4141 #define MAX_RESET_FAIL_CNT 5
4143 if (hdev->reset_pending) {
4144 dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
4145 hdev->reset_pending);
4147 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
4148 HCLGE_RESET_INT_M) {
4149 dev_info(&hdev->pdev->dev,
4150 "reset failed because new reset interrupt\n");
4151 hclge_clear_reset_cause(hdev);
4153 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
4154 hdev->rst_stats.reset_fail_cnt++;
4155 set_bit(hdev->reset_type, &hdev->reset_pending);
4156 dev_info(&hdev->pdev->dev,
4157 "re-schedule reset task(%u)\n",
4158 hdev->rst_stats.reset_fail_cnt);
4162 hclge_clear_reset_cause(hdev);
4164 /* recover the handshake status when reset fail */
4165 hclge_reset_handshake(hdev, true);
4167 dev_err(&hdev->pdev->dev, "Reset fail!\n");
4169 hclge_show_rst_info(hdev);
4171 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4176 static void hclge_update_reset_level(struct hclge_dev *hdev)
4178 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4179 enum hnae3_reset_type reset_level;
4181 /* reset request will not be set during reset, so clear
4182 * pending reset request to avoid unnecessary reset
4183 * caused by the same reason.
4185 hclge_get_reset_level(ae_dev, &hdev->reset_request);
4187 /* if default_reset_request has a higher level reset request,
4188 * it should be handled as soon as possible. since some errors
4189 * need this kind of reset to fix.
4191 reset_level = hclge_get_reset_level(ae_dev,
4192 &hdev->default_reset_request);
4193 if (reset_level != HNAE3_NONE_RESET)
4194 set_bit(reset_level, &hdev->reset_request);
4197 static int hclge_set_rst_done(struct hclge_dev *hdev)
4199 struct hclge_pf_rst_done_cmd *req;
4200 struct hclge_desc desc;
4203 req = (struct hclge_pf_rst_done_cmd *)desc.data;
4204 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
4205 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
4207 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4208 /* To be compatible with the old firmware, which does not support
4209 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
4212 if (ret == -EOPNOTSUPP) {
4213 dev_warn(&hdev->pdev->dev,
4214 "current firmware does not support command(0x%x)!\n",
4215 HCLGE_OPC_PF_RST_DONE);
4218 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
4225 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
4229 switch (hdev->reset_type) {
4230 case HNAE3_FUNC_RESET:
4231 case HNAE3_FLR_RESET:
4232 ret = hclge_set_all_vf_rst(hdev, false);
4234 case HNAE3_GLOBAL_RESET:
4235 case HNAE3_IMP_RESET:
4236 ret = hclge_set_rst_done(hdev);
4242 /* clear up the handshake status after re-initialize done */
4243 hclge_reset_handshake(hdev, false);
4248 static int hclge_reset_stack(struct hclge_dev *hdev)
4252 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4256 ret = hclge_reset_ae_dev(hdev->ae_dev);
4260 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4263 static int hclge_reset_prepare(struct hclge_dev *hdev)
4267 hdev->rst_stats.reset_cnt++;
4268 /* perform reset of the stack & ae device for a client */
4269 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4274 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4279 return hclge_reset_prepare_wait(hdev);
4282 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4286 hdev->rst_stats.hw_reset_done_cnt++;
4288 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4293 ret = hclge_reset_stack(hdev);
4298 hclge_clear_reset_cause(hdev);
4300 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4301 /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4305 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4308 ret = hclge_reset_prepare_up(hdev);
4313 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4318 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4322 hdev->last_reset_time = jiffies;
4323 hdev->rst_stats.reset_fail_cnt = 0;
4324 hdev->rst_stats.reset_done_cnt++;
4325 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4327 hclge_update_reset_level(hdev);
4332 static void hclge_reset(struct hclge_dev *hdev)
4334 if (hclge_reset_prepare(hdev))
4337 if (hclge_reset_wait(hdev))
4340 if (hclge_reset_rebuild(hdev))
4346 if (hclge_reset_err_handle(hdev))
4347 hclge_reset_task_schedule(hdev);
4350 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4352 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4353 struct hclge_dev *hdev = ae_dev->priv;
4355 /* We might end up getting called broadly because of 2 below cases:
4356 * 1. Recoverable error was conveyed through APEI and only way to bring
4357 * normalcy is to reset.
4358 * 2. A new reset request from the stack due to timeout
4360 * check if this is a new reset request and we are not here just because
4361 * last reset attempt did not succeed and watchdog hit us again. We will
4362 * know this if last reset request did not occur very recently (watchdog
4363 * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4364 * In case of new request we reset the "reset level" to PF reset.
4365 * And if it is a repeat reset request of the most recent one then we
4366 * want to make sure we throttle the reset request. Therefore, we will
4367 * not allow it again before 3*HZ times.
4370 if (time_before(jiffies, (hdev->last_reset_time +
4371 HCLGE_RESET_INTERVAL))) {
4372 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4376 if (hdev->default_reset_request) {
4378 hclge_get_reset_level(ae_dev,
4379 &hdev->default_reset_request);
4380 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4381 hdev->reset_level = HNAE3_FUNC_RESET;
4384 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4387 /* request reset & schedule reset task */
4388 set_bit(hdev->reset_level, &hdev->reset_request);
4389 hclge_reset_task_schedule(hdev);
4391 if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4392 hdev->reset_level++;
4395 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4396 enum hnae3_reset_type rst_type)
4398 struct hclge_dev *hdev = ae_dev->priv;
4400 set_bit(rst_type, &hdev->default_reset_request);
4403 static void hclge_reset_timer(struct timer_list *t)
4405 struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4407 /* if default_reset_request has no value, it means that this reset
4408 * request has already be handled, so just return here
4410 if (!hdev->default_reset_request)
4413 dev_info(&hdev->pdev->dev,
4414 "triggering reset in reset timer\n");
4415 hclge_reset_event(hdev->pdev, NULL);
4418 static void hclge_reset_subtask(struct hclge_dev *hdev)
4420 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4422 /* check if there is any ongoing reset in the hardware. This status can
4423 * be checked from reset_pending. If there is then, we need to wait for
4424 * hardware to complete reset.
4425 * a. If we are able to figure out in reasonable time that hardware
4426 * has fully resetted then, we can proceed with driver, client
4428 * b. else, we can come back later to check this status so re-sched
4431 hdev->last_reset_time = jiffies;
4432 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4433 if (hdev->reset_type != HNAE3_NONE_RESET)
4436 /* check if we got any *new* reset requests to be honored */
4437 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4438 if (hdev->reset_type != HNAE3_NONE_RESET)
4439 hclge_do_reset(hdev);
4441 hdev->reset_type = HNAE3_NONE_RESET;
4444 static void hclge_handle_err_reset_request(struct hclge_dev *hdev)
4446 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4447 enum hnae3_reset_type reset_type;
4449 if (ae_dev->hw_err_reset_req) {
4450 reset_type = hclge_get_reset_level(ae_dev,
4451 &ae_dev->hw_err_reset_req);
4452 hclge_set_def_reset_request(ae_dev, reset_type);
4455 if (hdev->default_reset_request && ae_dev->ops->reset_event)
4456 ae_dev->ops->reset_event(hdev->pdev, NULL);
4458 /* enable interrupt after error handling complete */
4459 hclge_enable_vector(&hdev->misc_vector, true);
4462 static void hclge_handle_err_recovery(struct hclge_dev *hdev)
4464 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4466 ae_dev->hw_err_reset_req = 0;
4468 if (hclge_find_error_source(hdev)) {
4469 hclge_handle_error_info_log(ae_dev);
4470 hclge_handle_mac_tnl(hdev);
4473 hclge_handle_err_reset_request(hdev);
4476 static void hclge_misc_err_recovery(struct hclge_dev *hdev)
4478 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4479 struct device *dev = &hdev->pdev->dev;
4482 msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
4483 if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
4484 if (hclge_handle_hw_msix_error
4485 (hdev, &hdev->default_reset_request))
4486 dev_info(dev, "received msix interrupt 0x%x\n",
4490 hclge_handle_hw_ras_error(ae_dev);
4492 hclge_handle_err_reset_request(hdev);
4495 static void hclge_errhand_service_task(struct hclge_dev *hdev)
4497 if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
4500 if (hnae3_dev_ras_imp_supported(hdev))
4501 hclge_handle_err_recovery(hdev);
4503 hclge_misc_err_recovery(hdev);
4506 static void hclge_reset_service_task(struct hclge_dev *hdev)
4508 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4511 if (time_is_before_jiffies(hdev->last_rst_scheduled +
4512 HCLGE_RESET_SCHED_TIMEOUT))
4513 dev_warn(&hdev->pdev->dev,
4514 "reset service task is scheduled after %ums on cpu%u!\n",
4515 jiffies_to_msecs(jiffies - hdev->last_rst_scheduled),
4516 smp_processor_id());
4518 down(&hdev->reset_sem);
4519 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4521 hclge_reset_subtask(hdev);
4523 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4524 up(&hdev->reset_sem);
4527 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4529 #define HCLGE_ALIVE_SECONDS_NORMAL 8
4531 unsigned long alive_time = HCLGE_ALIVE_SECONDS_NORMAL * HZ;
4534 /* start from vport 1 for PF is always alive */
4535 for (i = 1; i < hdev->num_alloc_vport; i++) {
4536 struct hclge_vport *vport = &hdev->vport[i];
4538 if (!test_bit(HCLGE_VPORT_STATE_INITED, &vport->state) ||
4539 !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4541 if (time_after(jiffies, vport->last_active_jiffies +
4543 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4544 dev_warn(&hdev->pdev->dev,
4545 "VF %u heartbeat timeout\n",
4546 i - HCLGE_VF_VPORT_START_NUM);
4551 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4553 unsigned long delta = round_jiffies_relative(HZ);
4555 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4558 /* Always handle the link updating to make sure link state is
4559 * updated when it is triggered by mbx.
4561 hclge_update_link_status(hdev);
4562 hclge_sync_mac_table(hdev);
4563 hclge_sync_promisc_mode(hdev);
4564 hclge_sync_fd_table(hdev);
4566 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4567 delta = jiffies - hdev->last_serv_processed;
4569 if (delta < round_jiffies_relative(HZ)) {
4570 delta = round_jiffies_relative(HZ) - delta;
4575 hdev->serv_processed_cnt++;
4576 hclge_update_vport_alive(hdev);
4578 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4579 hdev->last_serv_processed = jiffies;
4583 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4584 hclge_update_stats_for_all(hdev);
4586 hclge_update_port_info(hdev);
4587 hclge_sync_vlan_filter(hdev);
4589 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4590 hclge_rfs_filter_expire(hdev);
4592 hdev->last_serv_processed = jiffies;
4595 hclge_task_schedule(hdev, delta);
4598 static void hclge_ptp_service_task(struct hclge_dev *hdev)
4600 unsigned long flags;
4602 if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) ||
4603 !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) ||
4604 !time_is_before_jiffies(hdev->ptp->tx_start + HZ))
4607 /* to prevent concurrence with the irq handler */
4608 spin_lock_irqsave(&hdev->ptp->lock, flags);
4610 /* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq
4611 * handler may handle it just before spin_lock_irqsave().
4613 if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state))
4614 hclge_ptp_clean_tx_hwts(hdev);
4616 spin_unlock_irqrestore(&hdev->ptp->lock, flags);
4619 static void hclge_service_task(struct work_struct *work)
4621 struct hclge_dev *hdev =
4622 container_of(work, struct hclge_dev, service_task.work);
4624 hclge_errhand_service_task(hdev);
4625 hclge_reset_service_task(hdev);
4626 hclge_ptp_service_task(hdev);
4627 hclge_mailbox_service_task(hdev);
4628 hclge_periodic_service_task(hdev);
4630 /* Handle error recovery, reset and mbx again in case periodical task
4631 * delays the handling by calling hclge_task_schedule() in
4632 * hclge_periodic_service_task().
4634 hclge_errhand_service_task(hdev);
4635 hclge_reset_service_task(hdev);
4636 hclge_mailbox_service_task(hdev);
4639 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4641 /* VF handle has no client */
4642 if (!handle->client)
4643 return container_of(handle, struct hclge_vport, nic);
4644 else if (handle->client->type == HNAE3_CLIENT_ROCE)
4645 return container_of(handle, struct hclge_vport, roce);
4647 return container_of(handle, struct hclge_vport, nic);
4650 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4651 struct hnae3_vector_info *vector_info)
4653 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 64
4655 vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4657 /* need an extend offset to config vector >= 64 */
4658 if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4659 vector_info->io_addr = hdev->hw.hw.io_base +
4660 HCLGE_VECTOR_REG_BASE +
4661 (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4663 vector_info->io_addr = hdev->hw.hw.io_base +
4664 HCLGE_VECTOR_EXT_REG_BASE +
4665 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4666 HCLGE_VECTOR_REG_OFFSET_H +
4667 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4668 HCLGE_VECTOR_REG_OFFSET;
4670 hdev->vector_status[idx] = hdev->vport[0].vport_id;
4671 hdev->vector_irq[idx] = vector_info->vector;
4674 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4675 struct hnae3_vector_info *vector_info)
4677 struct hclge_vport *vport = hclge_get_vport(handle);
4678 struct hnae3_vector_info *vector = vector_info;
4679 struct hclge_dev *hdev = vport->back;
4684 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4685 vector_num = min(hdev->num_msi_left, vector_num);
4687 for (j = 0; j < vector_num; j++) {
4688 while (++i < hdev->num_nic_msi) {
4689 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4690 hclge_get_vector_info(hdev, i, vector);
4698 hdev->num_msi_left -= alloc;
4699 hdev->num_msi_used += alloc;
4704 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4708 for (i = 0; i < hdev->num_msi; i++)
4709 if (vector == hdev->vector_irq[i])
4715 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4717 struct hclge_vport *vport = hclge_get_vport(handle);
4718 struct hclge_dev *hdev = vport->back;
4721 vector_id = hclge_get_vector_index(hdev, vector);
4722 if (vector_id < 0) {
4723 dev_err(&hdev->pdev->dev,
4724 "Get vector index fail. vector = %d\n", vector);
4728 hclge_free_vector(hdev, vector_id);
4733 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4736 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4737 struct hclge_vport *vport = hclge_get_vport(handle);
4738 struct hclge_comm_rss_cfg *rss_cfg = &vport->back->rss_cfg;
4740 hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc);
4742 hclge_comm_get_rss_indir_tbl(rss_cfg, indir,
4743 ae_dev->dev_specs.rss_ind_tbl_size);
4748 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4749 const u8 *key, const u8 hfunc)
4751 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4752 struct hclge_vport *vport = hclge_get_vport(handle);
4753 struct hclge_dev *hdev = vport->back;
4754 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
4757 ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key, hfunc);
4759 dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
4763 /* Update the shadow RSS table with user specified qids */
4764 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4765 rss_cfg->rss_indirection_tbl[i] = indir[i];
4767 /* Update the hardware */
4768 return hclge_comm_set_rss_indir_table(ae_dev, &hdev->hw.hw,
4769 rss_cfg->rss_indirection_tbl);
4772 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4773 struct ethtool_rxnfc *nfc)
4775 struct hclge_vport *vport = hclge_get_vport(handle);
4776 struct hclge_dev *hdev = vport->back;
4779 ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw,
4780 &hdev->rss_cfg, nfc);
4782 dev_err(&hdev->pdev->dev,
4783 "failed to set rss tuple, ret = %d.\n", ret);
4790 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4791 struct ethtool_rxnfc *nfc)
4793 struct hclge_vport *vport = hclge_get_vport(handle);
4799 ret = hclge_comm_get_rss_tuple(&vport->back->rss_cfg, nfc->flow_type,
4801 if (ret || !tuple_sets)
4804 nfc->data = hclge_comm_convert_rss_tuple(tuple_sets);
4809 static int hclge_get_tc_size(struct hnae3_handle *handle)
4811 struct hclge_vport *vport = hclge_get_vport(handle);
4812 struct hclge_dev *hdev = vport->back;
4814 return hdev->pf_rss_size_max;
4817 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4819 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4820 struct hclge_vport *vport = hdev->vport;
4821 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4822 u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4823 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4824 struct hnae3_tc_info *tc_info;
4829 tc_info = &vport->nic.kinfo.tc_info;
4830 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4831 rss_size = tc_info->tqp_count[i];
4834 if (!(hdev->hw_tc_map & BIT(i)))
4837 /* tc_size set to hardware is the log2 of roundup power of two
4838 * of rss_size, the acutal queue size is limited by indirection
4841 if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4843 dev_err(&hdev->pdev->dev,
4844 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4849 roundup_size = roundup_pow_of_two(rss_size);
4850 roundup_size = ilog2(roundup_size);
4853 tc_size[i] = roundup_size;
4854 tc_offset[i] = tc_info->tqp_offset[i];
4857 return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid,
4861 int hclge_rss_init_hw(struct hclge_dev *hdev)
4863 u16 *rss_indir = hdev->rss_cfg.rss_indirection_tbl;
4864 u8 *key = hdev->rss_cfg.rss_hash_key;
4865 u8 hfunc = hdev->rss_cfg.rss_algo;
4868 ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
4873 ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, hfunc, key);
4877 ret = hclge_comm_set_rss_input_tuple(&hdev->hw.hw, &hdev->rss_cfg);
4881 return hclge_init_rss_tc_mode(hdev);
4884 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4885 int vector_id, bool en,
4886 struct hnae3_ring_chain_node *ring_chain)
4888 struct hclge_dev *hdev = vport->back;
4889 struct hnae3_ring_chain_node *node;
4890 struct hclge_desc desc;
4891 struct hclge_ctrl_vector_chain_cmd *req =
4892 (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4893 enum hclge_comm_cmd_status status;
4894 enum hclge_opcode_type op;
4895 u16 tqp_type_and_id;
4898 op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4899 hclge_cmd_setup_basic_desc(&desc, op, false);
4900 req->int_vector_id_l = hnae3_get_field(vector_id,
4901 HCLGE_VECTOR_ID_L_M,
4902 HCLGE_VECTOR_ID_L_S);
4903 req->int_vector_id_h = hnae3_get_field(vector_id,
4904 HCLGE_VECTOR_ID_H_M,
4905 HCLGE_VECTOR_ID_H_S);
4908 for (node = ring_chain; node; node = node->next) {
4909 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4910 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
4912 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4913 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4914 HCLGE_TQP_ID_S, node->tqp_index);
4915 hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4917 hnae3_get_field(node->int_gl_idx,
4918 HNAE3_RING_GL_IDX_M,
4919 HNAE3_RING_GL_IDX_S));
4920 req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4921 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4922 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4923 req->vfid = vport->vport_id;
4925 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4927 dev_err(&hdev->pdev->dev,
4928 "Map TQP fail, status is %d.\n",
4934 hclge_cmd_setup_basic_desc(&desc,
4937 req->int_vector_id_l =
4938 hnae3_get_field(vector_id,
4939 HCLGE_VECTOR_ID_L_M,
4940 HCLGE_VECTOR_ID_L_S);
4941 req->int_vector_id_h =
4942 hnae3_get_field(vector_id,
4943 HCLGE_VECTOR_ID_H_M,
4944 HCLGE_VECTOR_ID_H_S);
4949 req->int_cause_num = i;
4950 req->vfid = vport->vport_id;
4951 status = hclge_cmd_send(&hdev->hw, &desc, 1);
4953 dev_err(&hdev->pdev->dev,
4954 "Map TQP fail, status is %d.\n", status);
4962 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4963 struct hnae3_ring_chain_node *ring_chain)
4965 struct hclge_vport *vport = hclge_get_vport(handle);
4966 struct hclge_dev *hdev = vport->back;
4969 vector_id = hclge_get_vector_index(hdev, vector);
4970 if (vector_id < 0) {
4971 dev_err(&hdev->pdev->dev,
4972 "failed to get vector index. vector=%d\n", vector);
4976 return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4979 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4980 struct hnae3_ring_chain_node *ring_chain)
4982 struct hclge_vport *vport = hclge_get_vport(handle);
4983 struct hclge_dev *hdev = vport->back;
4986 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4989 vector_id = hclge_get_vector_index(hdev, vector);
4990 if (vector_id < 0) {
4991 dev_err(&handle->pdev->dev,
4992 "Get vector index fail. ret =%d\n", vector_id);
4996 ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4998 dev_err(&handle->pdev->dev,
4999 "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
5005 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
5006 bool en_uc, bool en_mc, bool en_bc)
5008 struct hclge_vport *vport = &hdev->vport[vf_id];
5009 struct hnae3_handle *handle = &vport->nic;
5010 struct hclge_promisc_cfg_cmd *req;
5011 struct hclge_desc desc;
5012 bool uc_tx_en = en_uc;
5016 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
5018 req = (struct hclge_promisc_cfg_cmd *)desc.data;
5021 if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
5024 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
5025 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
5026 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
5027 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
5028 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
5029 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
5030 req->extend_promisc = promisc_cfg;
5032 /* to be compatible with DEVICE_VERSION_V1/2 */
5034 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
5035 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
5036 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
5037 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
5038 hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
5039 req->promisc = promisc_cfg;
5041 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5043 dev_err(&hdev->pdev->dev,
5044 "failed to set vport %u promisc mode, ret = %d.\n",
5050 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
5051 bool en_mc_pmc, bool en_bc_pmc)
5053 return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
5054 en_uc_pmc, en_mc_pmc, en_bc_pmc);
5057 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
5060 struct hclge_vport *vport = hclge_get_vport(handle);
5061 struct hclge_dev *hdev = vport->back;
5062 bool en_bc_pmc = true;
5064 /* For device whose version below V2, if broadcast promisc enabled,
5065 * vlan filter is always bypassed. So broadcast promisc should be
5066 * disabled until user enable promisc mode
5068 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
5069 en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
5071 return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
5075 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
5077 struct hclge_vport *vport = hclge_get_vport(handle);
5079 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
5082 static void hclge_sync_fd_state(struct hclge_dev *hdev)
5084 if (hlist_empty(&hdev->fd_rule_list))
5085 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
5088 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
5090 if (!test_bit(location, hdev->fd_bmap)) {
5091 set_bit(location, hdev->fd_bmap);
5092 hdev->hclge_fd_rule_num++;
5096 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
5098 if (test_bit(location, hdev->fd_bmap)) {
5099 clear_bit(location, hdev->fd_bmap);
5100 hdev->hclge_fd_rule_num--;
5104 static void hclge_fd_free_node(struct hclge_dev *hdev,
5105 struct hclge_fd_rule *rule)
5107 hlist_del(&rule->rule_node);
5109 hclge_sync_fd_state(hdev);
5112 static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
5113 struct hclge_fd_rule *old_rule,
5114 struct hclge_fd_rule *new_rule,
5115 enum HCLGE_FD_NODE_STATE state)
5118 case HCLGE_FD_TO_ADD:
5119 case HCLGE_FD_ACTIVE:
5120 /* 1) if the new state is TO_ADD, just replace the old rule
5121 * with the same location, no matter its state, because the
5122 * new rule will be configured to the hardware.
5123 * 2) if the new state is ACTIVE, it means the new rule
5124 * has been configured to the hardware, so just replace
5125 * the old rule node with the same location.
5126 * 3) for it doesn't add a new node to the list, so it's
5127 * unnecessary to update the rule number and fd_bmap.
5129 new_rule->rule_node.next = old_rule->rule_node.next;
5130 new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5131 memcpy(old_rule, new_rule, sizeof(*old_rule));
5134 case HCLGE_FD_DELETED:
5135 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5136 hclge_fd_free_node(hdev, old_rule);
5138 case HCLGE_FD_TO_DEL:
5139 /* if new request is TO_DEL, and old rule is existent
5140 * 1) the state of old rule is TO_DEL, we need do nothing,
5141 * because we delete rule by location, other rule content
5143 * 2) the state of old rule is ACTIVE, we need to change its
5144 * state to TO_DEL, so the rule will be deleted when periodic
5145 * task being scheduled.
5146 * 3) the state of old rule is TO_ADD, it means the rule hasn't
5147 * been added to hardware, so we just delete the rule node from
5148 * fd_rule_list directly.
5150 if (old_rule->state == HCLGE_FD_TO_ADD) {
5151 hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5152 hclge_fd_free_node(hdev, old_rule);
5155 old_rule->state = HCLGE_FD_TO_DEL;
5160 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5162 struct hclge_fd_rule **parent)
5164 struct hclge_fd_rule *rule;
5165 struct hlist_node *node;
5167 hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5168 if (rule->location == location)
5170 else if (rule->location > location)
5172 /* record the parent node, use to keep the nodes in fd_rule_list
5181 /* insert fd rule node in ascend order according to rule->location */
5182 static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5183 struct hclge_fd_rule *rule,
5184 struct hclge_fd_rule *parent)
5186 INIT_HLIST_NODE(&rule->rule_node);
5189 hlist_add_behind(&rule->rule_node, &parent->rule_node);
5191 hlist_add_head(&rule->rule_node, hlist);
5194 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5195 struct hclge_fd_user_def_cfg *cfg)
5197 struct hclge_fd_user_def_cfg_cmd *req;
5198 struct hclge_desc desc;
5202 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5204 req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5206 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5207 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5208 HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5209 req->ol2_cfg = cpu_to_le16(data);
5212 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5213 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5214 HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5215 req->ol3_cfg = cpu_to_le16(data);
5218 hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5219 hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5220 HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5221 req->ol4_cfg = cpu_to_le16(data);
5223 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5225 dev_err(&hdev->pdev->dev,
5226 "failed to set fd user def data, ret= %d\n", ret);
5230 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5234 if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5238 spin_lock_bh(&hdev->fd_rule_lock);
5240 ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5242 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5245 spin_unlock_bh(&hdev->fd_rule_lock);
5248 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5249 struct hclge_fd_rule *rule)
5251 struct hlist_head *hlist = &hdev->fd_rule_list;
5252 struct hclge_fd_rule *fd_rule, *parent = NULL;
5253 struct hclge_fd_user_def_info *info, *old_info;
5254 struct hclge_fd_user_def_cfg *cfg;
5256 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5257 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5260 /* for valid layer is start from 1, so need minus 1 to get the cfg */
5261 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5262 info = &rule->ep.user_def;
5264 if (!cfg->ref_cnt || cfg->offset == info->offset)
5267 if (cfg->ref_cnt > 1)
5270 fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5272 old_info = &fd_rule->ep.user_def;
5273 if (info->layer == old_info->layer)
5278 dev_err(&hdev->pdev->dev,
5279 "No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5284 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5285 struct hclge_fd_rule *rule)
5287 struct hclge_fd_user_def_cfg *cfg;
5289 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5290 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5293 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5294 if (!cfg->ref_cnt) {
5295 cfg->offset = rule->ep.user_def.offset;
5296 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5301 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5302 struct hclge_fd_rule *rule)
5304 struct hclge_fd_user_def_cfg *cfg;
5306 if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5307 rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5310 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5315 if (!cfg->ref_cnt) {
5317 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5321 static void hclge_update_fd_list(struct hclge_dev *hdev,
5322 enum HCLGE_FD_NODE_STATE state, u16 location,
5323 struct hclge_fd_rule *new_rule)
5325 struct hlist_head *hlist = &hdev->fd_rule_list;
5326 struct hclge_fd_rule *fd_rule, *parent = NULL;
5328 fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5330 hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5331 if (state == HCLGE_FD_ACTIVE)
5332 hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5333 hclge_sync_fd_user_def_cfg(hdev, true);
5335 hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5339 /* it's unlikely to fail here, because we have checked the rule
5342 if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5343 dev_warn(&hdev->pdev->dev,
5344 "failed to delete fd rule %u, it's inexistent\n",
5349 hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5350 hclge_sync_fd_user_def_cfg(hdev, true);
5352 hclge_fd_insert_rule_node(hlist, new_rule, parent);
5353 hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5355 if (state == HCLGE_FD_TO_ADD) {
5356 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5357 hclge_task_schedule(hdev, 0);
5361 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5363 struct hclge_get_fd_mode_cmd *req;
5364 struct hclge_desc desc;
5367 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5369 req = (struct hclge_get_fd_mode_cmd *)desc.data;
5371 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5373 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5377 *fd_mode = req->mode;
5382 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5383 u32 *stage1_entry_num,
5384 u32 *stage2_entry_num,
5385 u16 *stage1_counter_num,
5386 u16 *stage2_counter_num)
5388 struct hclge_get_fd_allocation_cmd *req;
5389 struct hclge_desc desc;
5392 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5394 req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5396 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5398 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5403 *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5404 *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5405 *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5406 *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5411 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5412 enum HCLGE_FD_STAGE stage_num)
5414 struct hclge_set_fd_key_config_cmd *req;
5415 struct hclge_fd_key_cfg *stage;
5416 struct hclge_desc desc;
5419 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5421 req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5422 stage = &hdev->fd_cfg.key_cfg[stage_num];
5423 req->stage = stage_num;
5424 req->key_select = stage->key_sel;
5425 req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5426 req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5427 req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5428 req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5429 req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5430 req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5432 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5434 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5439 static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5441 struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5443 spin_lock_bh(&hdev->fd_rule_lock);
5444 memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5445 spin_unlock_bh(&hdev->fd_rule_lock);
5447 hclge_fd_set_user_def_cmd(hdev, cfg);
5450 static int hclge_init_fd_config(struct hclge_dev *hdev)
5452 #define LOW_2_WORDS 0x03
5453 struct hclge_fd_key_cfg *key_cfg;
5456 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
5459 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5463 switch (hdev->fd_cfg.fd_mode) {
5464 case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5465 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5467 case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5468 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5471 dev_err(&hdev->pdev->dev,
5472 "Unsupported flow director mode %u\n",
5473 hdev->fd_cfg.fd_mode);
5477 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5478 key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5479 key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5480 key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5481 key_cfg->outer_sipv6_word_en = 0;
5482 key_cfg->outer_dipv6_word_en = 0;
5484 key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5485 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5486 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5487 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5489 /* If use max 400bit key, we can support tuples for ether type */
5490 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5491 key_cfg->tuple_active |=
5492 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5493 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5494 key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5497 /* roce_type is used to filter roce frames
5498 * dst_vport is used to specify the rule
5500 key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5502 ret = hclge_get_fd_allocation(hdev,
5503 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5504 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5505 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5506 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5510 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5513 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5514 int loc, u8 *key, bool is_add)
5516 struct hclge_fd_tcam_config_1_cmd *req1;
5517 struct hclge_fd_tcam_config_2_cmd *req2;
5518 struct hclge_fd_tcam_config_3_cmd *req3;
5519 struct hclge_desc desc[3];
5522 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5523 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
5524 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5525 desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
5526 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5528 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5529 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5530 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5532 req1->stage = stage;
5533 req1->xy_sel = sel_x ? 1 : 0;
5534 hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5535 req1->index = cpu_to_le32(loc);
5536 req1->entry_vld = sel_x ? is_add : 0;
5539 memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5540 memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5541 sizeof(req2->tcam_data));
5542 memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5543 sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5546 ret = hclge_cmd_send(&hdev->hw, desc, 3);
5548 dev_err(&hdev->pdev->dev,
5549 "config tcam key fail, ret=%d\n",
5555 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5556 struct hclge_fd_ad_data *action)
5558 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5559 struct hclge_fd_ad_config_cmd *req;
5560 struct hclge_desc desc;
5564 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5566 req = (struct hclge_fd_ad_config_cmd *)desc.data;
5567 req->index = cpu_to_le32(loc);
5570 hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5571 action->write_rule_id_to_bd);
5572 hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5574 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5575 hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5576 action->override_tc);
5577 hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5578 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5581 hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5582 hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5583 action->forward_to_direct_queue);
5584 hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5586 hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5587 hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5588 HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5589 hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5590 hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5591 action->counter_id);
5593 req->ad_data = cpu_to_le64(ad_data);
5594 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5596 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5601 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5602 struct hclge_fd_rule *rule)
5604 int offset, moffset, ip_offset;
5605 enum HCLGE_FD_KEY_OPT key_opt;
5606 u16 tmp_x_s, tmp_y_s;
5607 u32 tmp_x_l, tmp_y_l;
5611 if (rule->unused_tuple & BIT(tuple_bit))
5614 key_opt = tuple_key_info[tuple_bit].key_opt;
5615 offset = tuple_key_info[tuple_bit].offset;
5616 moffset = tuple_key_info[tuple_bit].moffset;
5620 calc_x(*key_x, p[offset], p[moffset]);
5621 calc_y(*key_y, p[offset], p[moffset]);
5625 calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5626 calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5627 *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5628 *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5632 calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5633 calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5634 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5635 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5639 for (i = 0; i < ETH_ALEN; i++) {
5640 calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
5642 calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
5648 ip_offset = IPV4_INDEX * sizeof(u32);
5649 calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
5650 *(u32 *)(&p[moffset + ip_offset]));
5651 calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
5652 *(u32 *)(&p[moffset + ip_offset]));
5653 *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5654 *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5662 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5663 u8 vf_id, u8 network_port_id)
5665 u32 port_number = 0;
5667 if (port_type == HOST_PORT) {
5668 hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5670 hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5672 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5674 hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5675 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5676 hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5682 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5683 __le32 *key_x, __le32 *key_y,
5684 struct hclge_fd_rule *rule)
5686 u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5687 u8 cur_pos = 0, tuple_size, shift_bits;
5690 for (i = 0; i < MAX_META_DATA; i++) {
5691 tuple_size = meta_data_key_info[i].key_length;
5692 tuple_bit = key_cfg->meta_data_active & BIT(i);
5694 switch (tuple_bit) {
5695 case BIT(ROCE_TYPE):
5696 hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5697 cur_pos += tuple_size;
5699 case BIT(DST_VPORT):
5700 port_number = hclge_get_port_number(HOST_PORT, 0,
5702 hnae3_set_field(meta_data,
5703 GENMASK(cur_pos + tuple_size, cur_pos),
5704 cur_pos, port_number);
5705 cur_pos += tuple_size;
5712 calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5713 calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5714 shift_bits = sizeof(meta_data) * 8 - cur_pos;
5716 *key_x = cpu_to_le32(tmp_x << shift_bits);
5717 *key_y = cpu_to_le32(tmp_y << shift_bits);
5720 /* A complete key is combined with meta data key and tuple key.
5721 * Meta data key is stored at the MSB region, and tuple key is stored at
5722 * the LSB region, unused bits will be filled 0.
5724 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5725 struct hclge_fd_rule *rule)
5727 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5728 u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5729 u8 *cur_key_x, *cur_key_y;
5730 u8 meta_data_region;
5735 memset(key_x, 0, sizeof(key_x));
5736 memset(key_y, 0, sizeof(key_y));
5740 for (i = 0; i < MAX_TUPLE; i++) {
5743 tuple_size = tuple_key_info[i].key_length / 8;
5744 if (!(key_cfg->tuple_active & BIT(i)))
5747 tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
5750 cur_key_x += tuple_size;
5751 cur_key_y += tuple_size;
5755 meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5756 MAX_META_DATA_LENGTH / 8;
5758 hclge_fd_convert_meta_data(key_cfg,
5759 (__le32 *)(key_x + meta_data_region),
5760 (__le32 *)(key_y + meta_data_region),
5763 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5766 dev_err(&hdev->pdev->dev,
5767 "fd key_y config fail, loc=%u, ret=%d\n",
5768 rule->queue_id, ret);
5772 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5775 dev_err(&hdev->pdev->dev,
5776 "fd key_x config fail, loc=%u, ret=%d\n",
5777 rule->queue_id, ret);
5781 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5782 struct hclge_fd_rule *rule)
5784 struct hclge_vport *vport = hdev->vport;
5785 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5786 struct hclge_fd_ad_data ad_data;
5788 memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5789 ad_data.ad_id = rule->location;
5791 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5792 ad_data.drop_packet = true;
5793 } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5794 ad_data.override_tc = true;
5796 kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5798 ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
5800 ad_data.forward_to_direct_queue = true;
5801 ad_data.queue_id = rule->queue_id;
5804 if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) {
5805 ad_data.use_counter = true;
5806 ad_data.counter_id = rule->vf_id %
5807 hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1];
5809 ad_data.use_counter = false;
5810 ad_data.counter_id = 0;
5813 ad_data.use_next_stage = false;
5814 ad_data.next_input_key = 0;
5816 ad_data.write_rule_id_to_bd = true;
5817 ad_data.rule_id = rule->location;
5819 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5822 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5825 if (!spec || !unused_tuple)
5828 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5831 *unused_tuple |= BIT(INNER_SRC_IP);
5834 *unused_tuple |= BIT(INNER_DST_IP);
5837 *unused_tuple |= BIT(INNER_SRC_PORT);
5840 *unused_tuple |= BIT(INNER_DST_PORT);
5843 *unused_tuple |= BIT(INNER_IP_TOS);
5848 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5851 if (!spec || !unused_tuple)
5854 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5855 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5858 *unused_tuple |= BIT(INNER_SRC_IP);
5861 *unused_tuple |= BIT(INNER_DST_IP);
5864 *unused_tuple |= BIT(INNER_IP_TOS);
5867 *unused_tuple |= BIT(INNER_IP_PROTO);
5869 if (spec->l4_4_bytes)
5872 if (spec->ip_ver != ETH_RX_NFC_IP4)
5878 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5881 if (!spec || !unused_tuple)
5884 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5886 /* check whether src/dst ip address used */
5887 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5888 *unused_tuple |= BIT(INNER_SRC_IP);
5890 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5891 *unused_tuple |= BIT(INNER_DST_IP);
5894 *unused_tuple |= BIT(INNER_SRC_PORT);
5897 *unused_tuple |= BIT(INNER_DST_PORT);
5900 *unused_tuple |= BIT(INNER_IP_TOS);
5905 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5908 if (!spec || !unused_tuple)
5911 *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5912 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5914 /* check whether src/dst ip address used */
5915 if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5916 *unused_tuple |= BIT(INNER_SRC_IP);
5918 if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5919 *unused_tuple |= BIT(INNER_DST_IP);
5921 if (!spec->l4_proto)
5922 *unused_tuple |= BIT(INNER_IP_PROTO);
5925 *unused_tuple |= BIT(INNER_IP_TOS);
5927 if (spec->l4_4_bytes)
5933 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5935 if (!spec || !unused_tuple)
5938 *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5939 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5940 BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5942 if (is_zero_ether_addr(spec->h_source))
5943 *unused_tuple |= BIT(INNER_SRC_MAC);
5945 if (is_zero_ether_addr(spec->h_dest))
5946 *unused_tuple |= BIT(INNER_DST_MAC);
5949 *unused_tuple |= BIT(INNER_ETH_TYPE);
5954 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5955 struct ethtool_rx_flow_spec *fs,
5958 if (fs->flow_type & FLOW_EXT) {
5959 if (fs->h_ext.vlan_etype) {
5960 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5964 if (!fs->h_ext.vlan_tci)
5965 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5967 if (fs->m_ext.vlan_tci &&
5968 be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5969 dev_err(&hdev->pdev->dev,
5970 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
5971 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5975 *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5978 if (fs->flow_type & FLOW_MAC_EXT) {
5979 if (hdev->fd_cfg.fd_mode !=
5980 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5981 dev_err(&hdev->pdev->dev,
5982 "FLOW_MAC_EXT is not supported in current fd mode!\n");
5986 if (is_zero_ether_addr(fs->h_ext.h_dest))
5987 *unused_tuple |= BIT(INNER_DST_MAC);
5989 *unused_tuple &= ~BIT(INNER_DST_MAC);
5995 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
5996 struct hclge_fd_user_def_info *info)
5998 switch (flow_type) {
6000 info->layer = HCLGE_FD_USER_DEF_L2;
6001 *unused_tuple &= ~BIT(INNER_L2_RSV);
6004 case IPV6_USER_FLOW:
6005 info->layer = HCLGE_FD_USER_DEF_L3;
6006 *unused_tuple &= ~BIT(INNER_L3_RSV);
6012 info->layer = HCLGE_FD_USER_DEF_L4;
6013 *unused_tuple &= ~BIT(INNER_L4_RSV);
6022 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
6024 return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
6027 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
6028 struct ethtool_rx_flow_spec *fs,
6030 struct hclge_fd_user_def_info *info)
6032 u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
6033 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6034 u16 data, offset, data_mask, offset_mask;
6037 info->layer = HCLGE_FD_USER_DEF_NONE;
6038 *unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
6040 if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
6043 /* user-def data from ethtool is 64 bit value, the bit0~15 is used
6044 * for data, and bit32~47 is used for offset.
6046 data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6047 data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
6048 offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6049 offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
6051 if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
6052 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
6056 if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
6057 dev_err(&hdev->pdev->dev,
6058 "user-def offset[%u] should be no more than %u\n",
6059 offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
6063 if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
6064 dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
6068 ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
6070 dev_err(&hdev->pdev->dev,
6071 "unsupported flow type for user-def bytes, ret = %d\n",
6077 info->data_mask = data_mask;
6078 info->offset = offset;
6083 static int hclge_fd_check_spec(struct hclge_dev *hdev,
6084 struct ethtool_rx_flow_spec *fs,
6086 struct hclge_fd_user_def_info *info)
6091 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6092 dev_err(&hdev->pdev->dev,
6093 "failed to config fd rules, invalid rule location: %u, max is %u\n.",
6095 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
6099 ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
6103 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6104 switch (flow_type) {
6108 ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
6112 ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
6118 ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6121 case IPV6_USER_FLOW:
6122 ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6126 if (hdev->fd_cfg.fd_mode !=
6127 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6128 dev_err(&hdev->pdev->dev,
6129 "ETHER_FLOW is not supported in current fd mode!\n");
6133 ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6137 dev_err(&hdev->pdev->dev,
6138 "unsupported protocol type, protocol type = %#x\n",
6144 dev_err(&hdev->pdev->dev,
6145 "failed to check flow union tuple, ret = %d\n",
6150 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6153 static void hclge_fd_get_tcpip4_tuple(struct ethtool_rx_flow_spec *fs,
6154 struct hclge_fd_rule *rule, u8 ip_proto)
6156 rule->tuples.src_ip[IPV4_INDEX] =
6157 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6158 rule->tuples_mask.src_ip[IPV4_INDEX] =
6159 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6161 rule->tuples.dst_ip[IPV4_INDEX] =
6162 be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6163 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6164 be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6166 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6167 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6169 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6170 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6172 rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6173 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6175 rule->tuples.ether_proto = ETH_P_IP;
6176 rule->tuples_mask.ether_proto = 0xFFFF;
6178 rule->tuples.ip_proto = ip_proto;
6179 rule->tuples_mask.ip_proto = 0xFF;
6182 static void hclge_fd_get_ip4_tuple(struct ethtool_rx_flow_spec *fs,
6183 struct hclge_fd_rule *rule)
6185 rule->tuples.src_ip[IPV4_INDEX] =
6186 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6187 rule->tuples_mask.src_ip[IPV4_INDEX] =
6188 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6190 rule->tuples.dst_ip[IPV4_INDEX] =
6191 be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6192 rule->tuples_mask.dst_ip[IPV4_INDEX] =
6193 be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6195 rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6196 rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6198 rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6199 rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6201 rule->tuples.ether_proto = ETH_P_IP;
6202 rule->tuples_mask.ether_proto = 0xFFFF;
6205 static void hclge_fd_get_tcpip6_tuple(struct ethtool_rx_flow_spec *fs,
6206 struct hclge_fd_rule *rule, u8 ip_proto)
6208 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
6210 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6213 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6215 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6218 rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6219 rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6221 rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6222 rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6224 rule->tuples.ether_proto = ETH_P_IPV6;
6225 rule->tuples_mask.ether_proto = 0xFFFF;
6227 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6228 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6230 rule->tuples.ip_proto = ip_proto;
6231 rule->tuples_mask.ip_proto = 0xFF;
6234 static void hclge_fd_get_ip6_tuple(struct ethtool_rx_flow_spec *fs,
6235 struct hclge_fd_rule *rule)
6237 be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6239 be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6242 be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6244 be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6247 rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6248 rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6250 rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6251 rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6253 rule->tuples.ether_proto = ETH_P_IPV6;
6254 rule->tuples_mask.ether_proto = 0xFFFF;
6257 static void hclge_fd_get_ether_tuple(struct ethtool_rx_flow_spec *fs,
6258 struct hclge_fd_rule *rule)
6260 ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6261 ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6263 ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6264 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6266 rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6267 rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6270 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6271 struct hclge_fd_rule *rule)
6273 switch (info->layer) {
6274 case HCLGE_FD_USER_DEF_L2:
6275 rule->tuples.l2_user_def = info->data;
6276 rule->tuples_mask.l2_user_def = info->data_mask;
6278 case HCLGE_FD_USER_DEF_L3:
6279 rule->tuples.l3_user_def = info->data;
6280 rule->tuples_mask.l3_user_def = info->data_mask;
6282 case HCLGE_FD_USER_DEF_L4:
6283 rule->tuples.l4_user_def = (u32)info->data << 16;
6284 rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6290 rule->ep.user_def = *info;
6293 static int hclge_fd_get_tuple(struct ethtool_rx_flow_spec *fs,
6294 struct hclge_fd_rule *rule,
6295 struct hclge_fd_user_def_info *info)
6297 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6299 switch (flow_type) {
6301 hclge_fd_get_tcpip4_tuple(fs, rule, IPPROTO_SCTP);
6304 hclge_fd_get_tcpip4_tuple(fs, rule, IPPROTO_TCP);
6307 hclge_fd_get_tcpip4_tuple(fs, rule, IPPROTO_UDP);
6310 hclge_fd_get_ip4_tuple(fs, rule);
6313 hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_SCTP);
6316 hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_TCP);
6319 hclge_fd_get_tcpip6_tuple(fs, rule, IPPROTO_UDP);
6321 case IPV6_USER_FLOW:
6322 hclge_fd_get_ip6_tuple(fs, rule);
6325 hclge_fd_get_ether_tuple(fs, rule);
6331 if (fs->flow_type & FLOW_EXT) {
6332 rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6333 rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6334 hclge_fd_get_user_def_tuple(info, rule);
6337 if (fs->flow_type & FLOW_MAC_EXT) {
6338 ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6339 ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6345 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6346 struct hclge_fd_rule *rule)
6350 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6354 return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6357 static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6358 struct hclge_fd_rule *rule)
6362 spin_lock_bh(&hdev->fd_rule_lock);
6364 if (hdev->fd_active_type != rule->rule_type &&
6365 (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6366 hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6367 dev_err(&hdev->pdev->dev,
6368 "mode conflict(new type %d, active type %d), please delete existent rules first\n",
6369 rule->rule_type, hdev->fd_active_type);
6370 spin_unlock_bh(&hdev->fd_rule_lock);
6374 ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6378 ret = hclge_clear_arfs_rules(hdev);
6382 ret = hclge_fd_config_rule(hdev, rule);
6386 rule->state = HCLGE_FD_ACTIVE;
6387 hdev->fd_active_type = rule->rule_type;
6388 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6391 spin_unlock_bh(&hdev->fd_rule_lock);
6395 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6397 struct hclge_vport *vport = hclge_get_vport(handle);
6398 struct hclge_dev *hdev = vport->back;
6400 return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6403 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6404 u16 *vport_id, u8 *action, u16 *queue_id)
6406 struct hclge_vport *vport = hdev->vport;
6408 if (ring_cookie == RX_CLS_FLOW_DISC) {
6409 *action = HCLGE_FD_ACTION_DROP_PACKET;
6411 u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6412 u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6415 /* To keep consistent with user's configuration, minus 1 when
6416 * printing 'vf', because vf id from ethtool is added 1 for vf.
6418 if (vf > hdev->num_req_vfs) {
6419 dev_err(&hdev->pdev->dev,
6420 "Error: vf id (%u) should be less than %u\n",
6421 vf - 1U, hdev->num_req_vfs);
6425 *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6426 tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6429 dev_err(&hdev->pdev->dev,
6430 "Error: queue id (%u) > max tqp num (%u)\n",
6435 *action = HCLGE_FD_ACTION_SELECT_QUEUE;
6442 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6443 struct ethtool_rxnfc *cmd)
6445 struct hclge_vport *vport = hclge_get_vport(handle);
6446 struct hclge_dev *hdev = vport->back;
6447 struct hclge_fd_user_def_info info;
6448 u16 dst_vport_id = 0, q_index = 0;
6449 struct ethtool_rx_flow_spec *fs;
6450 struct hclge_fd_rule *rule;
6455 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
6456 dev_err(&hdev->pdev->dev,
6457 "flow table director is not supported\n");
6462 dev_err(&hdev->pdev->dev,
6463 "please enable flow director first\n");
6467 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6469 ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6473 ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6478 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6482 ret = hclge_fd_get_tuple(fs, rule, &info);
6488 rule->flow_type = fs->flow_type;
6489 rule->location = fs->location;
6490 rule->unused_tuple = unused;
6491 rule->vf_id = dst_vport_id;
6492 rule->queue_id = q_index;
6493 rule->action = action;
6494 rule->rule_type = HCLGE_FD_EP_ACTIVE;
6496 ret = hclge_add_fd_entry_common(hdev, rule);
6503 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6504 struct ethtool_rxnfc *cmd)
6506 struct hclge_vport *vport = hclge_get_vport(handle);
6507 struct hclge_dev *hdev = vport->back;
6508 struct ethtool_rx_flow_spec *fs;
6511 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
6514 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6516 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6519 spin_lock_bh(&hdev->fd_rule_lock);
6520 if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6521 !test_bit(fs->location, hdev->fd_bmap)) {
6522 dev_err(&hdev->pdev->dev,
6523 "Delete fail, rule %u is inexistent\n", fs->location);
6524 spin_unlock_bh(&hdev->fd_rule_lock);
6528 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6533 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6536 spin_unlock_bh(&hdev->fd_rule_lock);
6540 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6543 struct hclge_fd_rule *rule;
6544 struct hlist_node *node;
6547 spin_lock_bh(&hdev->fd_rule_lock);
6549 for_each_set_bit(location, hdev->fd_bmap,
6550 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6551 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6555 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6557 hlist_del(&rule->rule_node);
6560 hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6561 hdev->hclge_fd_rule_num = 0;
6562 bitmap_zero(hdev->fd_bmap,
6563 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6566 spin_unlock_bh(&hdev->fd_rule_lock);
6569 static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6571 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
6574 hclge_clear_fd_rules_in_list(hdev, true);
6575 hclge_fd_disable_user_def(hdev);
6578 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6580 struct hclge_vport *vport = hclge_get_vport(handle);
6581 struct hclge_dev *hdev = vport->back;
6582 struct hclge_fd_rule *rule;
6583 struct hlist_node *node;
6585 /* Return ok here, because reset error handling will check this
6586 * return value. If error is returned here, the reset process will
6589 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
6592 /* if fd is disabled, should not restore it when reset */
6596 spin_lock_bh(&hdev->fd_rule_lock);
6597 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6598 if (rule->state == HCLGE_FD_ACTIVE)
6599 rule->state = HCLGE_FD_TO_ADD;
6601 spin_unlock_bh(&hdev->fd_rule_lock);
6602 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6607 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6608 struct ethtool_rxnfc *cmd)
6610 struct hclge_vport *vport = hclge_get_vport(handle);
6611 struct hclge_dev *hdev = vport->back;
6613 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev) || hclge_is_cls_flower_active(handle))
6616 cmd->rule_cnt = hdev->hclge_fd_rule_num;
6617 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6622 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6623 struct ethtool_tcpip4_spec *spec,
6624 struct ethtool_tcpip4_spec *spec_mask)
6626 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6627 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6628 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6630 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6631 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6632 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6634 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6635 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6636 0 : cpu_to_be16(rule->tuples_mask.src_port);
6638 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6639 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6640 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6642 spec->tos = rule->tuples.ip_tos;
6643 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6644 0 : rule->tuples_mask.ip_tos;
6647 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6648 struct ethtool_usrip4_spec *spec,
6649 struct ethtool_usrip4_spec *spec_mask)
6651 spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6652 spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6653 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6655 spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6656 spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6657 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6659 spec->tos = rule->tuples.ip_tos;
6660 spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6661 0 : rule->tuples_mask.ip_tos;
6663 spec->proto = rule->tuples.ip_proto;
6664 spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6665 0 : rule->tuples_mask.ip_proto;
6667 spec->ip_ver = ETH_RX_NFC_IP4;
6670 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6671 struct ethtool_tcpip6_spec *spec,
6672 struct ethtool_tcpip6_spec *spec_mask)
6674 cpu_to_be32_array(spec->ip6src,
6675 rule->tuples.src_ip, IPV6_SIZE);
6676 cpu_to_be32_array(spec->ip6dst,
6677 rule->tuples.dst_ip, IPV6_SIZE);
6678 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6679 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6681 cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6684 if (rule->unused_tuple & BIT(INNER_DST_IP))
6685 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6687 cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6690 spec->tclass = rule->tuples.ip_tos;
6691 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6692 0 : rule->tuples_mask.ip_tos;
6694 spec->psrc = cpu_to_be16(rule->tuples.src_port);
6695 spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6696 0 : cpu_to_be16(rule->tuples_mask.src_port);
6698 spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6699 spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6700 0 : cpu_to_be16(rule->tuples_mask.dst_port);
6703 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6704 struct ethtool_usrip6_spec *spec,
6705 struct ethtool_usrip6_spec *spec_mask)
6707 cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6708 cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6709 if (rule->unused_tuple & BIT(INNER_SRC_IP))
6710 memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6712 cpu_to_be32_array(spec_mask->ip6src,
6713 rule->tuples_mask.src_ip, IPV6_SIZE);
6715 if (rule->unused_tuple & BIT(INNER_DST_IP))
6716 memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6718 cpu_to_be32_array(spec_mask->ip6dst,
6719 rule->tuples_mask.dst_ip, IPV6_SIZE);
6721 spec->tclass = rule->tuples.ip_tos;
6722 spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6723 0 : rule->tuples_mask.ip_tos;
6725 spec->l4_proto = rule->tuples.ip_proto;
6726 spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6727 0 : rule->tuples_mask.ip_proto;
6730 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6731 struct ethhdr *spec,
6732 struct ethhdr *spec_mask)
6734 ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6735 ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6737 if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6738 eth_zero_addr(spec_mask->h_source);
6740 ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6742 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6743 eth_zero_addr(spec_mask->h_dest);
6745 ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6747 spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6748 spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6749 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6752 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
6753 struct hclge_fd_rule *rule)
6755 if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
6756 HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
6757 fs->h_ext.data[0] = 0;
6758 fs->h_ext.data[1] = 0;
6759 fs->m_ext.data[0] = 0;
6760 fs->m_ext.data[1] = 0;
6762 fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
6763 fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
6765 cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
6766 fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
6770 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6771 struct hclge_fd_rule *rule)
6773 if (fs->flow_type & FLOW_EXT) {
6774 fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6775 fs->m_ext.vlan_tci =
6776 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6777 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6779 hclge_fd_get_user_def_info(fs, rule);
6782 if (fs->flow_type & FLOW_MAC_EXT) {
6783 ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6784 if (rule->unused_tuple & BIT(INNER_DST_MAC))
6785 eth_zero_addr(fs->m_u.ether_spec.h_dest);
6787 ether_addr_copy(fs->m_u.ether_spec.h_dest,
6788 rule->tuples_mask.dst_mac);
6792 static struct hclge_fd_rule *hclge_get_fd_rule(struct hclge_dev *hdev,
6795 struct hclge_fd_rule *rule = NULL;
6796 struct hlist_node *node2;
6798 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6799 if (rule->location == location)
6801 else if (rule->location > location)
6808 static void hclge_fd_get_ring_cookie(struct ethtool_rx_flow_spec *fs,
6809 struct hclge_fd_rule *rule)
6811 if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6812 fs->ring_cookie = RX_CLS_FLOW_DISC;
6816 fs->ring_cookie = rule->queue_id;
6817 vf_id = rule->vf_id;
6818 vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6819 fs->ring_cookie |= vf_id;
6823 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6824 struct ethtool_rxnfc *cmd)
6826 struct hclge_vport *vport = hclge_get_vport(handle);
6827 struct hclge_fd_rule *rule = NULL;
6828 struct hclge_dev *hdev = vport->back;
6829 struct ethtool_rx_flow_spec *fs;
6831 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
6834 fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6836 spin_lock_bh(&hdev->fd_rule_lock);
6838 rule = hclge_get_fd_rule(hdev, fs->location);
6840 spin_unlock_bh(&hdev->fd_rule_lock);
6844 fs->flow_type = rule->flow_type;
6845 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6849 hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6850 &fs->m_u.tcp_ip4_spec);
6853 hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6854 &fs->m_u.usr_ip4_spec);
6859 hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6860 &fs->m_u.tcp_ip6_spec);
6862 case IPV6_USER_FLOW:
6863 hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6864 &fs->m_u.usr_ip6_spec);
6866 /* The flow type of fd rule has been checked before adding in to rule
6867 * list. As other flow types have been handled, it must be ETHER_FLOW
6868 * for the default case
6871 hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6872 &fs->m_u.ether_spec);
6876 hclge_fd_get_ext_info(fs, rule);
6878 hclge_fd_get_ring_cookie(fs, rule);
6880 spin_unlock_bh(&hdev->fd_rule_lock);
6885 static int hclge_get_all_rules(struct hnae3_handle *handle,
6886 struct ethtool_rxnfc *cmd, u32 *rule_locs)
6888 struct hclge_vport *vport = hclge_get_vport(handle);
6889 struct hclge_dev *hdev = vport->back;
6890 struct hclge_fd_rule *rule;
6891 struct hlist_node *node2;
6894 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
6897 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6899 spin_lock_bh(&hdev->fd_rule_lock);
6900 hlist_for_each_entry_safe(rule, node2,
6901 &hdev->fd_rule_list, rule_node) {
6902 if (cnt == cmd->rule_cnt) {
6903 spin_unlock_bh(&hdev->fd_rule_lock);
6907 if (rule->state == HCLGE_FD_TO_DEL)
6910 rule_locs[cnt] = rule->location;
6914 spin_unlock_bh(&hdev->fd_rule_lock);
6916 cmd->rule_cnt = cnt;
6921 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6922 struct hclge_fd_rule_tuples *tuples)
6924 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6925 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6927 tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6928 tuples->ip_proto = fkeys->basic.ip_proto;
6929 tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6931 if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6932 tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6933 tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6937 for (i = 0; i < IPV6_SIZE; i++) {
6938 tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6939 tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6944 /* traverse all rules, check whether an existed rule has the same tuples */
6945 static struct hclge_fd_rule *
6946 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6947 const struct hclge_fd_rule_tuples *tuples)
6949 struct hclge_fd_rule *rule = NULL;
6950 struct hlist_node *node;
6952 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6953 if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6960 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6961 struct hclge_fd_rule *rule)
6963 rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6964 BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6965 BIT(INNER_SRC_PORT);
6968 rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6969 rule->state = HCLGE_FD_TO_ADD;
6970 if (tuples->ether_proto == ETH_P_IP) {
6971 if (tuples->ip_proto == IPPROTO_TCP)
6972 rule->flow_type = TCP_V4_FLOW;
6974 rule->flow_type = UDP_V4_FLOW;
6976 if (tuples->ip_proto == IPPROTO_TCP)
6977 rule->flow_type = TCP_V6_FLOW;
6979 rule->flow_type = UDP_V6_FLOW;
6981 memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6982 memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6985 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6986 u16 flow_id, struct flow_keys *fkeys)
6988 struct hclge_vport *vport = hclge_get_vport(handle);
6989 struct hclge_fd_rule_tuples new_tuples = {};
6990 struct hclge_dev *hdev = vport->back;
6991 struct hclge_fd_rule *rule;
6994 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
6997 /* when there is already fd rule existed add by user,
6998 * arfs should not work
7000 spin_lock_bh(&hdev->fd_rule_lock);
7001 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
7002 hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
7003 spin_unlock_bh(&hdev->fd_rule_lock);
7007 hclge_fd_get_flow_tuples(fkeys, &new_tuples);
7009 /* check is there flow director filter existed for this flow,
7010 * if not, create a new filter for it;
7011 * if filter exist with different queue id, modify the filter;
7012 * if filter exist with same queue id, do nothing
7014 rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
7016 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
7017 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7018 spin_unlock_bh(&hdev->fd_rule_lock);
7022 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
7024 spin_unlock_bh(&hdev->fd_rule_lock);
7028 rule->location = bit_id;
7029 rule->arfs.flow_id = flow_id;
7030 rule->queue_id = queue_id;
7031 hclge_fd_build_arfs_rule(&new_tuples, rule);
7032 hclge_update_fd_list(hdev, rule->state, rule->location, rule);
7033 hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
7034 } else if (rule->queue_id != queue_id) {
7035 rule->queue_id = queue_id;
7036 rule->state = HCLGE_FD_TO_ADD;
7037 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7038 hclge_task_schedule(hdev, 0);
7040 spin_unlock_bh(&hdev->fd_rule_lock);
7041 return rule->location;
7044 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
7046 #ifdef CONFIG_RFS_ACCEL
7047 struct hnae3_handle *handle = &hdev->vport[0].nic;
7048 struct hclge_fd_rule *rule;
7049 struct hlist_node *node;
7051 spin_lock_bh(&hdev->fd_rule_lock);
7052 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
7053 spin_unlock_bh(&hdev->fd_rule_lock);
7056 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7057 if (rule->state != HCLGE_FD_ACTIVE)
7059 if (rps_may_expire_flow(handle->netdev, rule->queue_id,
7060 rule->arfs.flow_id, rule->location)) {
7061 rule->state = HCLGE_FD_TO_DEL;
7062 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7065 spin_unlock_bh(&hdev->fd_rule_lock);
7069 /* make sure being called after lock up with fd_rule_lock */
7070 static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
7072 #ifdef CONFIG_RFS_ACCEL
7073 struct hclge_fd_rule *rule;
7074 struct hlist_node *node;
7077 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
7080 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7081 switch (rule->state) {
7082 case HCLGE_FD_TO_DEL:
7083 case HCLGE_FD_ACTIVE:
7084 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7085 rule->location, NULL, false);
7089 case HCLGE_FD_TO_ADD:
7090 hclge_fd_dec_rule_cnt(hdev, rule->location);
7091 hlist_del(&rule->rule_node);
7098 hclge_sync_fd_state(hdev);
7104 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
7105 struct hclge_fd_rule *rule)
7107 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
7108 struct flow_match_basic match;
7109 u16 ethtype_key, ethtype_mask;
7111 flow_rule_match_basic(flow, &match);
7112 ethtype_key = ntohs(match.key->n_proto);
7113 ethtype_mask = ntohs(match.mask->n_proto);
7115 if (ethtype_key == ETH_P_ALL) {
7119 rule->tuples.ether_proto = ethtype_key;
7120 rule->tuples_mask.ether_proto = ethtype_mask;
7121 rule->tuples.ip_proto = match.key->ip_proto;
7122 rule->tuples_mask.ip_proto = match.mask->ip_proto;
7124 rule->unused_tuple |= BIT(INNER_IP_PROTO);
7125 rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7129 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7130 struct hclge_fd_rule *rule)
7132 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7133 struct flow_match_eth_addrs match;
7135 flow_rule_match_eth_addrs(flow, &match);
7136 ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7137 ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7138 ether_addr_copy(rule->tuples.src_mac, match.key->src);
7139 ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7141 rule->unused_tuple |= BIT(INNER_DST_MAC);
7142 rule->unused_tuple |= BIT(INNER_SRC_MAC);
7146 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7147 struct hclge_fd_rule *rule)
7149 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7150 struct flow_match_vlan match;
7152 flow_rule_match_vlan(flow, &match);
7153 rule->tuples.vlan_tag1 = match.key->vlan_id |
7154 (match.key->vlan_priority << VLAN_PRIO_SHIFT);
7155 rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7156 (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7158 rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7162 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
7163 struct hclge_fd_rule *rule)
7167 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7168 struct flow_match_control match;
7170 flow_rule_match_control(flow, &match);
7171 addr_type = match.key->addr_type;
7174 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7175 struct flow_match_ipv4_addrs match;
7177 flow_rule_match_ipv4_addrs(flow, &match);
7178 rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7179 rule->tuples_mask.src_ip[IPV4_INDEX] =
7180 be32_to_cpu(match.mask->src);
7181 rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7182 rule->tuples_mask.dst_ip[IPV4_INDEX] =
7183 be32_to_cpu(match.mask->dst);
7184 } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7185 struct flow_match_ipv6_addrs match;
7187 flow_rule_match_ipv6_addrs(flow, &match);
7188 be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
7190 be32_to_cpu_array(rule->tuples_mask.src_ip,
7191 match.mask->src.s6_addr32, IPV6_SIZE);
7192 be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
7194 be32_to_cpu_array(rule->tuples_mask.dst_ip,
7195 match.mask->dst.s6_addr32, IPV6_SIZE);
7197 rule->unused_tuple |= BIT(INNER_SRC_IP);
7198 rule->unused_tuple |= BIT(INNER_DST_IP);
7202 static void hclge_get_cls_key_port(const struct flow_rule *flow,
7203 struct hclge_fd_rule *rule)
7205 if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7206 struct flow_match_ports match;
7208 flow_rule_match_ports(flow, &match);
7210 rule->tuples.src_port = be16_to_cpu(match.key->src);
7211 rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7212 rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7213 rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7215 rule->unused_tuple |= BIT(INNER_SRC_PORT);
7216 rule->unused_tuple |= BIT(INNER_DST_PORT);
7220 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7221 struct flow_cls_offload *cls_flower,
7222 struct hclge_fd_rule *rule)
7224 struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7225 struct flow_dissector *dissector = flow->match.dissector;
7227 if (dissector->used_keys &
7228 ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
7229 BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
7230 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7231 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
7232 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7233 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7234 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS))) {
7235 dev_err(&hdev->pdev->dev, "unsupported key set: %#llx\n",
7236 dissector->used_keys);
7240 hclge_get_cls_key_basic(flow, rule);
7241 hclge_get_cls_key_mac(flow, rule);
7242 hclge_get_cls_key_vlan(flow, rule);
7243 hclge_get_cls_key_ip(flow, rule);
7244 hclge_get_cls_key_port(flow, rule);
7249 static int hclge_check_cls_flower(struct hclge_dev *hdev,
7250 struct flow_cls_offload *cls_flower, int tc)
7252 u32 prio = cls_flower->common.prio;
7254 if (tc < 0 || tc > hdev->tc_max) {
7255 dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7260 prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7261 dev_err(&hdev->pdev->dev,
7262 "prio %u should be in range[1, %u]\n",
7263 prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7267 if (test_bit(prio - 1, hdev->fd_bmap)) {
7268 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7274 static int hclge_add_cls_flower(struct hnae3_handle *handle,
7275 struct flow_cls_offload *cls_flower,
7278 struct hclge_vport *vport = hclge_get_vport(handle);
7279 struct hclge_dev *hdev = vport->back;
7280 struct hclge_fd_rule *rule;
7283 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
7284 dev_err(&hdev->pdev->dev,
7285 "cls flower is not supported\n");
7289 ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7291 dev_err(&hdev->pdev->dev,
7292 "failed to check cls flower params, ret = %d\n", ret);
7296 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7300 ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7306 rule->action = HCLGE_FD_ACTION_SELECT_TC;
7307 rule->cls_flower.tc = tc;
7308 rule->location = cls_flower->common.prio - 1;
7310 rule->cls_flower.cookie = cls_flower->cookie;
7311 rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7313 ret = hclge_add_fd_entry_common(hdev, rule);
7320 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7321 unsigned long cookie)
7323 struct hclge_fd_rule *rule;
7324 struct hlist_node *node;
7326 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7327 if (rule->cls_flower.cookie == cookie)
7334 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7335 struct flow_cls_offload *cls_flower)
7337 struct hclge_vport *vport = hclge_get_vport(handle);
7338 struct hclge_dev *hdev = vport->back;
7339 struct hclge_fd_rule *rule;
7342 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
7345 spin_lock_bh(&hdev->fd_rule_lock);
7347 rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7349 spin_unlock_bh(&hdev->fd_rule_lock);
7353 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7356 /* if tcam config fail, set rule state to TO_DEL,
7357 * so the rule will be deleted when periodic
7358 * task being scheduled.
7360 hclge_update_fd_list(hdev, HCLGE_FD_TO_DEL, rule->location, NULL);
7361 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7362 spin_unlock_bh(&hdev->fd_rule_lock);
7366 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7367 spin_unlock_bh(&hdev->fd_rule_lock);
7372 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7374 struct hclge_fd_rule *rule;
7375 struct hlist_node *node;
7378 if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7381 spin_lock_bh(&hdev->fd_rule_lock);
7383 hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7384 switch (rule->state) {
7385 case HCLGE_FD_TO_ADD:
7386 ret = hclge_fd_config_rule(hdev, rule);
7389 rule->state = HCLGE_FD_ACTIVE;
7391 case HCLGE_FD_TO_DEL:
7392 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7393 rule->location, NULL, false);
7396 hclge_fd_dec_rule_cnt(hdev, rule->location);
7397 hclge_fd_free_node(hdev, rule);
7406 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7408 spin_unlock_bh(&hdev->fd_rule_lock);
7411 static void hclge_sync_fd_table(struct hclge_dev *hdev)
7413 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
7416 if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7417 bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7419 hclge_clear_fd_rules_in_list(hdev, clear_list);
7422 hclge_sync_fd_user_def_cfg(hdev, false);
7424 hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7427 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7429 struct hclge_vport *vport = hclge_get_vport(handle);
7430 struct hclge_dev *hdev = vport->back;
7432 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7433 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7436 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7438 struct hclge_vport *vport = hclge_get_vport(handle);
7439 struct hclge_dev *hdev = vport->back;
7441 return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
7444 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7446 struct hclge_vport *vport = hclge_get_vport(handle);
7447 struct hclge_dev *hdev = vport->back;
7449 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7452 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7454 struct hclge_vport *vport = hclge_get_vport(handle);
7455 struct hclge_dev *hdev = vport->back;
7457 return hdev->rst_stats.hw_reset_done_cnt;
7460 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7462 struct hclge_vport *vport = hclge_get_vport(handle);
7463 struct hclge_dev *hdev = vport->back;
7465 hdev->fd_en = enable;
7468 set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7470 hclge_restore_fd_entries(handle);
7472 hclge_task_schedule(hdev, 0);
7475 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7477 #define HCLGE_LINK_STATUS_WAIT_CNT 3
7479 struct hclge_desc desc;
7480 struct hclge_config_mac_mode_cmd *req =
7481 (struct hclge_config_mac_mode_cmd *)desc.data;
7485 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7488 hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7489 hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7490 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7491 hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7492 hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7493 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7494 hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7495 hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7496 hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7497 hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7500 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7502 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7504 dev_err(&hdev->pdev->dev,
7505 "mac enable fail, ret =%d.\n", ret);
7510 hclge_mac_link_status_wait(hdev, HCLGE_LINK_STATUS_DOWN,
7511 HCLGE_LINK_STATUS_WAIT_CNT);
7514 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7515 u8 switch_param, u8 param_mask)
7517 struct hclge_mac_vlan_switch_cmd *req;
7518 struct hclge_desc desc;
7522 func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7523 req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7525 /* read current config parameter */
7526 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7528 req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7529 req->func_id = cpu_to_le32(func_id);
7531 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7533 dev_err(&hdev->pdev->dev,
7534 "read mac vlan switch parameter fail, ret = %d\n", ret);
7538 /* modify and write new config parameter */
7539 hclge_comm_cmd_reuse_desc(&desc, false);
7540 req->switch_param = (req->switch_param & param_mask) | switch_param;
7541 req->param_mask = param_mask;
7543 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7545 dev_err(&hdev->pdev->dev,
7546 "set mac vlan switch parameter fail, ret = %d\n", ret);
7550 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7553 #define HCLGE_PHY_LINK_STATUS_NUM 200
7555 struct phy_device *phydev = hdev->hw.mac.phydev;
7560 ret = phy_read_status(phydev);
7562 dev_err(&hdev->pdev->dev,
7563 "phy update link status fail, ret = %d\n", ret);
7567 if (phydev->link == link_ret)
7570 msleep(HCLGE_LINK_STATUS_MS);
7571 } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7574 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
7582 ret = hclge_get_mac_link_status(hdev, &link_status);
7585 if (link_status == link_ret)
7588 msleep(HCLGE_LINK_STATUS_MS);
7589 } while (++i < wait_cnt);
7593 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7596 #define HCLGE_MAC_LINK_STATUS_NUM 100
7600 link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7603 hclge_phy_link_status_wait(hdev, link_ret);
7605 return hclge_mac_link_status_wait(hdev, link_ret,
7606 HCLGE_MAC_LINK_STATUS_NUM);
7609 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7611 struct hclge_config_mac_mode_cmd *req;
7612 struct hclge_desc desc;
7616 req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7617 /* 1 Read out the MAC mode config at first */
7618 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7619 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7621 dev_err(&hdev->pdev->dev,
7622 "mac loopback get fail, ret =%d.\n", ret);
7626 /* 2 Then setup the loopback flag */
7627 loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7628 hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7630 req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7632 /* 3 Config mac work mode with loopback flag
7633 * and its original configure parameters
7635 hclge_comm_cmd_reuse_desc(&desc, false);
7636 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7638 dev_err(&hdev->pdev->dev,
7639 "mac loopback set fail, ret =%d.\n", ret);
7643 static int hclge_cfg_common_loopback_cmd_send(struct hclge_dev *hdev, bool en,
7644 enum hnae3_loop loop_mode)
7646 struct hclge_common_lb_cmd *req;
7647 struct hclge_desc desc;
7651 req = (struct hclge_common_lb_cmd *)desc.data;
7652 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7654 switch (loop_mode) {
7655 case HNAE3_LOOP_SERIAL_SERDES:
7656 loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7658 case HNAE3_LOOP_PARALLEL_SERDES:
7659 loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7661 case HNAE3_LOOP_PHY:
7662 loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7665 dev_err(&hdev->pdev->dev,
7666 "unsupported loopback mode %d\n", loop_mode);
7670 req->mask = loop_mode_b;
7672 req->enable = loop_mode_b;
7674 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7676 dev_err(&hdev->pdev->dev,
7677 "failed to send loopback cmd, loop_mode = %d, ret = %d\n",
7683 static int hclge_cfg_common_loopback_wait(struct hclge_dev *hdev)
7685 #define HCLGE_COMMON_LB_RETRY_MS 10
7686 #define HCLGE_COMMON_LB_RETRY_NUM 100
7688 struct hclge_common_lb_cmd *req;
7689 struct hclge_desc desc;
7693 req = (struct hclge_common_lb_cmd *)desc.data;
7696 msleep(HCLGE_COMMON_LB_RETRY_MS);
7697 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7699 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7701 dev_err(&hdev->pdev->dev,
7702 "failed to get loopback done status, ret = %d\n",
7706 } while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7707 !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7709 if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7710 dev_err(&hdev->pdev->dev, "wait loopback timeout\n");
7712 } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7713 dev_err(&hdev->pdev->dev, "failed to do loopback test\n");
7720 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7721 enum hnae3_loop loop_mode)
7725 ret = hclge_cfg_common_loopback_cmd_send(hdev, en, loop_mode);
7729 return hclge_cfg_common_loopback_wait(hdev);
7732 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7733 enum hnae3_loop loop_mode)
7737 ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7741 hclge_cfg_mac_mode(hdev, en);
7743 ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7745 dev_err(&hdev->pdev->dev,
7746 "serdes loopback config mac mode timeout\n");
7751 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7752 struct phy_device *phydev)
7756 if (!phydev->suspended) {
7757 ret = phy_suspend(phydev);
7762 ret = phy_resume(phydev);
7766 return phy_loopback(phydev, true);
7769 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7770 struct phy_device *phydev)
7774 ret = phy_loopback(phydev, false);
7778 return phy_suspend(phydev);
7781 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7783 struct phy_device *phydev = hdev->hw.mac.phydev;
7787 if (hnae3_dev_phy_imp_supported(hdev))
7788 return hclge_set_common_loopback(hdev, en,
7794 ret = hclge_enable_phy_loopback(hdev, phydev);
7796 ret = hclge_disable_phy_loopback(hdev, phydev);
7798 dev_err(&hdev->pdev->dev,
7799 "set phy loopback fail, ret = %d\n", ret);
7803 hclge_cfg_mac_mode(hdev, en);
7805 ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7807 dev_err(&hdev->pdev->dev,
7808 "phy loopback config mac mode timeout\n");
7813 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
7814 u16 stream_id, bool enable)
7816 struct hclge_desc desc;
7817 struct hclge_cfg_com_tqp_queue_cmd *req =
7818 (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7820 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7821 req->tqp_id = cpu_to_le16(tqp_id);
7822 req->stream_id = cpu_to_le16(stream_id);
7824 req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7826 return hclge_cmd_send(&hdev->hw, &desc, 1);
7829 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
7831 struct hclge_vport *vport = hclge_get_vport(handle);
7832 struct hclge_dev *hdev = vport->back;
7836 for (i = 0; i < handle->kinfo.num_tqps; i++) {
7837 ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
7844 static int hclge_set_loopback(struct hnae3_handle *handle,
7845 enum hnae3_loop loop_mode, bool en)
7847 struct hclge_vport *vport = hclge_get_vport(handle);
7848 struct hclge_dev *hdev = vport->back;
7851 /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7852 * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7853 * the same, the packets are looped back in the SSU. If SSU loopback
7854 * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7856 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7857 u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7859 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7860 HCLGE_SWITCH_ALW_LPBK_MASK);
7865 switch (loop_mode) {
7866 case HNAE3_LOOP_APP:
7867 ret = hclge_set_app_loopback(hdev, en);
7869 case HNAE3_LOOP_SERIAL_SERDES:
7870 case HNAE3_LOOP_PARALLEL_SERDES:
7871 ret = hclge_set_common_loopback(hdev, en, loop_mode);
7873 case HNAE3_LOOP_PHY:
7874 ret = hclge_set_phy_loopback(hdev, en);
7876 case HNAE3_LOOP_EXTERNAL:
7880 dev_err(&hdev->pdev->dev,
7881 "loop_mode %d is not supported\n", loop_mode);
7888 ret = hclge_tqp_enable(handle, en);
7890 dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
7891 en ? "enable" : "disable", ret);
7896 static int hclge_set_default_loopback(struct hclge_dev *hdev)
7900 ret = hclge_set_app_loopback(hdev, false);
7904 ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
7908 return hclge_cfg_common_loopback(hdev, false,
7909 HNAE3_LOOP_PARALLEL_SERDES);
7912 static void hclge_flush_link_update(struct hclge_dev *hdev)
7914 #define HCLGE_FLUSH_LINK_TIMEOUT 100000
7916 unsigned long last = hdev->serv_processed_cnt;
7919 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
7920 i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
7921 last == hdev->serv_processed_cnt)
7925 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
7927 struct hclge_vport *vport = hclge_get_vport(handle);
7928 struct hclge_dev *hdev = vport->back;
7931 hclge_task_schedule(hdev, 0);
7933 /* Set the DOWN flag here to disable link updating */
7934 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7936 /* flush memory to make sure DOWN is seen by service task */
7937 smp_mb__before_atomic();
7938 hclge_flush_link_update(hdev);
7942 static int hclge_ae_start(struct hnae3_handle *handle)
7944 struct hclge_vport *vport = hclge_get_vport(handle);
7945 struct hclge_dev *hdev = vport->back;
7948 hclge_cfg_mac_mode(hdev, true);
7949 clear_bit(HCLGE_STATE_DOWN, &hdev->state);
7950 hdev->hw.mac.link = 0;
7952 /* reset tqp stats */
7953 hclge_comm_reset_tqp_stats(handle);
7955 hclge_mac_start_phy(hdev);
7960 static void hclge_ae_stop(struct hnae3_handle *handle)
7962 struct hclge_vport *vport = hclge_get_vport(handle);
7963 struct hclge_dev *hdev = vport->back;
7965 set_bit(HCLGE_STATE_DOWN, &hdev->state);
7966 spin_lock_bh(&hdev->fd_rule_lock);
7967 hclge_clear_arfs_rules(hdev);
7968 spin_unlock_bh(&hdev->fd_rule_lock);
7970 /* If it is not PF reset or FLR, the firmware will disable the MAC,
7971 * so it only need to stop phy here.
7973 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
7974 hclge_pfc_pause_en_cfg(hdev, HCLGE_PFC_TX_RX_DISABLE,
7976 if (hdev->reset_type != HNAE3_FUNC_RESET &&
7977 hdev->reset_type != HNAE3_FLR_RESET) {
7978 hclge_mac_stop_phy(hdev);
7979 hclge_update_link_status(hdev);
7984 hclge_reset_tqp(handle);
7986 hclge_config_mac_tnl_int(hdev, false);
7989 hclge_cfg_mac_mode(hdev, false);
7991 hclge_mac_stop_phy(hdev);
7993 /* reset tqp stats */
7994 hclge_comm_reset_tqp_stats(handle);
7995 hclge_update_link_status(hdev);
7998 int hclge_vport_start(struct hclge_vport *vport)
8000 struct hclge_dev *hdev = vport->back;
8002 set_bit(HCLGE_VPORT_STATE_INITED, &vport->state);
8003 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8004 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
8005 vport->last_active_jiffies = jiffies;
8006 vport->need_notify = 0;
8008 if (test_bit(vport->vport_id, hdev->vport_config_block)) {
8009 if (vport->vport_id) {
8010 hclge_restore_mac_table_common(vport);
8011 hclge_restore_vport_vlan_table(vport);
8013 hclge_restore_hw_table(hdev);
8017 clear_bit(vport->vport_id, hdev->vport_config_block);
8022 void hclge_vport_stop(struct hclge_vport *vport)
8024 clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state);
8025 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
8026 vport->need_notify = 0;
8029 static int hclge_client_start(struct hnae3_handle *handle)
8031 struct hclge_vport *vport = hclge_get_vport(handle);
8033 return hclge_vport_start(vport);
8036 static void hclge_client_stop(struct hnae3_handle *handle)
8038 struct hclge_vport *vport = hclge_get_vport(handle);
8040 hclge_vport_stop(vport);
8043 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
8044 u16 cmdq_resp, u8 resp_code,
8045 enum hclge_mac_vlan_tbl_opcode op)
8047 struct hclge_dev *hdev = vport->back;
8050 dev_err(&hdev->pdev->dev,
8051 "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
8056 if (op == HCLGE_MAC_VLAN_ADD) {
8057 if (!resp_code || resp_code == 1)
8059 else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
8060 resp_code == HCLGE_ADD_MC_OVERFLOW)
8063 dev_err(&hdev->pdev->dev,
8064 "add mac addr failed for undefined, code=%u.\n",
8067 } else if (op == HCLGE_MAC_VLAN_REMOVE) {
8070 } else if (resp_code == 1) {
8071 dev_dbg(&hdev->pdev->dev,
8072 "remove mac addr failed for miss.\n");
8076 dev_err(&hdev->pdev->dev,
8077 "remove mac addr failed for undefined, code=%u.\n",
8080 } else if (op == HCLGE_MAC_VLAN_LKUP) {
8083 } else if (resp_code == 1) {
8084 dev_dbg(&hdev->pdev->dev,
8085 "lookup mac addr failed for miss.\n");
8089 dev_err(&hdev->pdev->dev,
8090 "lookup mac addr failed for undefined, code=%u.\n",
8095 dev_err(&hdev->pdev->dev,
8096 "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
8101 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
8103 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
8105 unsigned int word_num;
8106 unsigned int bit_num;
8108 if (vfid > 255 || vfid < 0)
8111 if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
8112 word_num = vfid / 32;
8113 bit_num = vfid % 32;
8115 desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8117 desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
8119 word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
8120 bit_num = vfid % 32;
8122 desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
8124 desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
8130 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
8132 #define HCLGE_DESC_NUMBER 3
8133 #define HCLGE_FUNC_NUMBER_PER_DESC 6
8136 for (i = 1; i < HCLGE_DESC_NUMBER; i++)
8137 for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
8138 if (desc[i].data[j])
8144 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
8145 const u8 *addr, bool is_mc)
8147 const unsigned char *mac_addr = addr;
8148 u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
8149 (mac_addr[0]) | (mac_addr[1] << 8);
8150 u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
8152 hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8154 hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8155 hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8158 new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8159 new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8162 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8163 struct hclge_mac_vlan_tbl_entry_cmd *req)
8165 struct hclge_dev *hdev = vport->back;
8166 struct hclge_desc desc;
8171 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8173 memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8175 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8177 dev_err(&hdev->pdev->dev,
8178 "del mac addr failed for cmd_send, ret =%d.\n",
8182 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8183 retval = le16_to_cpu(desc.retval);
8185 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8186 HCLGE_MAC_VLAN_REMOVE);
8189 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8190 struct hclge_mac_vlan_tbl_entry_cmd *req,
8191 struct hclge_desc *desc,
8194 struct hclge_dev *hdev = vport->back;
8199 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8201 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
8202 memcpy(desc[0].data,
8204 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8205 hclge_cmd_setup_basic_desc(&desc[1],
8206 HCLGE_OPC_MAC_VLAN_ADD,
8208 desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
8209 hclge_cmd_setup_basic_desc(&desc[2],
8210 HCLGE_OPC_MAC_VLAN_ADD,
8212 ret = hclge_cmd_send(&hdev->hw, desc, 3);
8214 memcpy(desc[0].data,
8216 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8217 ret = hclge_cmd_send(&hdev->hw, desc, 1);
8220 dev_err(&hdev->pdev->dev,
8221 "lookup mac addr failed for cmd_send, ret =%d.\n",
8225 resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8226 retval = le16_to_cpu(desc[0].retval);
8228 return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8229 HCLGE_MAC_VLAN_LKUP);
8232 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8233 struct hclge_mac_vlan_tbl_entry_cmd *req,
8234 struct hclge_desc *mc_desc)
8236 struct hclge_dev *hdev = vport->back;
8243 struct hclge_desc desc;
8245 hclge_cmd_setup_basic_desc(&desc,
8246 HCLGE_OPC_MAC_VLAN_ADD,
8248 memcpy(desc.data, req,
8249 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8250 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8251 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8252 retval = le16_to_cpu(desc.retval);
8254 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8256 HCLGE_MAC_VLAN_ADD);
8258 hclge_comm_cmd_reuse_desc(&mc_desc[0], false);
8259 mc_desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
8260 hclge_comm_cmd_reuse_desc(&mc_desc[1], false);
8261 mc_desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
8262 hclge_comm_cmd_reuse_desc(&mc_desc[2], false);
8263 mc_desc[2].flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_NEXT);
8264 memcpy(mc_desc[0].data, req,
8265 sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8266 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8267 resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8268 retval = le16_to_cpu(mc_desc[0].retval);
8270 cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8272 HCLGE_MAC_VLAN_ADD);
8276 dev_err(&hdev->pdev->dev,
8277 "add mac addr failed for cmd_send, ret =%d.\n",
8285 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8286 u16 *allocated_size)
8288 struct hclge_umv_spc_alc_cmd *req;
8289 struct hclge_desc desc;
8292 req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8293 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8295 req->space_size = cpu_to_le32(space_size);
8297 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8299 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8304 *allocated_size = le32_to_cpu(desc.data[1]);
8309 static int hclge_init_umv_space(struct hclge_dev *hdev)
8311 u16 allocated_size = 0;
8314 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8318 if (allocated_size < hdev->wanted_umv_size)
8319 dev_warn(&hdev->pdev->dev,
8320 "failed to alloc umv space, want %u, get %u\n",
8321 hdev->wanted_umv_size, allocated_size);
8323 hdev->max_umv_size = allocated_size;
8324 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8325 hdev->share_umv_size = hdev->priv_umv_size +
8326 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8328 if (hdev->ae_dev->dev_specs.mc_mac_size)
8329 set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps);
8334 static void hclge_reset_umv_space(struct hclge_dev *hdev)
8336 struct hclge_vport *vport;
8339 for (i = 0; i < hdev->num_alloc_vport; i++) {
8340 vport = &hdev->vport[i];
8341 vport->used_umv_num = 0;
8344 mutex_lock(&hdev->vport_lock);
8345 hdev->share_umv_size = hdev->priv_umv_size +
8346 hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8347 mutex_unlock(&hdev->vport_lock);
8349 hdev->used_mc_mac_num = 0;
8352 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8354 struct hclge_dev *hdev = vport->back;
8358 mutex_lock(&hdev->vport_lock);
8360 is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8361 hdev->share_umv_size == 0);
8364 mutex_unlock(&hdev->vport_lock);
8369 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8371 struct hclge_dev *hdev = vport->back;
8374 if (vport->used_umv_num > hdev->priv_umv_size)
8375 hdev->share_umv_size++;
8377 if (vport->used_umv_num > 0)
8378 vport->used_umv_num--;
8380 if (vport->used_umv_num >= hdev->priv_umv_size &&
8381 hdev->share_umv_size > 0)
8382 hdev->share_umv_size--;
8383 vport->used_umv_num++;
8387 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8390 struct hclge_mac_node *mac_node, *tmp;
8392 list_for_each_entry_safe(mac_node, tmp, list, node)
8393 if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8399 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8400 enum HCLGE_MAC_NODE_STATE state)
8403 /* from set_rx_mode or tmp_add_list */
8404 case HCLGE_MAC_TO_ADD:
8405 if (mac_node->state == HCLGE_MAC_TO_DEL)
8406 mac_node->state = HCLGE_MAC_ACTIVE;
8408 /* only from set_rx_mode */
8409 case HCLGE_MAC_TO_DEL:
8410 if (mac_node->state == HCLGE_MAC_TO_ADD) {
8411 list_del(&mac_node->node);
8414 mac_node->state = HCLGE_MAC_TO_DEL;
8417 /* only from tmp_add_list, the mac_node->state won't be
8420 case HCLGE_MAC_ACTIVE:
8421 if (mac_node->state == HCLGE_MAC_TO_ADD)
8422 mac_node->state = HCLGE_MAC_ACTIVE;
8428 int hclge_update_mac_list(struct hclge_vport *vport,
8429 enum HCLGE_MAC_NODE_STATE state,
8430 enum HCLGE_MAC_ADDR_TYPE mac_type,
8431 const unsigned char *addr)
8433 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8434 struct hclge_dev *hdev = vport->back;
8435 struct hclge_mac_node *mac_node;
8436 struct list_head *list;
8438 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8439 &vport->uc_mac_list : &vport->mc_mac_list;
8441 spin_lock_bh(&vport->mac_list_lock);
8443 /* if the mac addr is already in the mac list, no need to add a new
8444 * one into it, just check the mac addr state, convert it to a new
8445 * state, or just remove it, or do nothing.
8447 mac_node = hclge_find_mac_node(list, addr);
8449 hclge_update_mac_node(mac_node, state);
8450 spin_unlock_bh(&vport->mac_list_lock);
8451 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8455 /* if this address is never added, unnecessary to delete */
8456 if (state == HCLGE_MAC_TO_DEL) {
8457 spin_unlock_bh(&vport->mac_list_lock);
8458 hnae3_format_mac_addr(format_mac_addr, addr);
8459 dev_err(&hdev->pdev->dev,
8460 "failed to delete address %s from mac list\n",
8465 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8467 spin_unlock_bh(&vport->mac_list_lock);
8471 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8473 mac_node->state = state;
8474 ether_addr_copy(mac_node->mac_addr, addr);
8475 list_add_tail(&mac_node->node, list);
8477 spin_unlock_bh(&vport->mac_list_lock);
8482 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8483 const unsigned char *addr)
8485 struct hclge_vport *vport = hclge_get_vport(handle);
8487 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8491 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8492 const unsigned char *addr)
8494 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8495 struct hclge_dev *hdev = vport->back;
8496 struct hclge_mac_vlan_tbl_entry_cmd req;
8497 struct hclge_desc desc;
8498 u16 egress_port = 0;
8501 /* mac addr check */
8502 if (is_zero_ether_addr(addr) ||
8503 is_broadcast_ether_addr(addr) ||
8504 is_multicast_ether_addr(addr)) {
8505 hnae3_format_mac_addr(format_mac_addr, addr);
8506 dev_err(&hdev->pdev->dev,
8507 "Set_uc mac err! invalid mac:%s. is_zero:%d,is_br=%d,is_mul=%d\n",
8508 format_mac_addr, is_zero_ether_addr(addr),
8509 is_broadcast_ether_addr(addr),
8510 is_multicast_ether_addr(addr));
8514 memset(&req, 0, sizeof(req));
8516 hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8517 HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8519 req.egress_port = cpu_to_le16(egress_port);
8521 hclge_prepare_mac_addr(&req, addr, false);
8523 /* Lookup the mac address in the mac_vlan table, and add
8524 * it if the entry is inexistent. Repeated unicast entry
8525 * is not allowed in the mac vlan table.
8527 ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8528 if (ret == -ENOENT) {
8529 mutex_lock(&hdev->vport_lock);
8530 if (!hclge_is_umv_space_full(vport, false)) {
8531 ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8533 hclge_update_umv_space(vport, false);
8534 mutex_unlock(&hdev->vport_lock);
8537 mutex_unlock(&hdev->vport_lock);
8539 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8540 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8541 hdev->priv_umv_size);
8546 /* check if we just hit the duplicate */
8553 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8554 const unsigned char *addr)
8556 struct hclge_vport *vport = hclge_get_vport(handle);
8558 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8562 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8563 const unsigned char *addr)
8565 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8566 struct hclge_dev *hdev = vport->back;
8567 struct hclge_mac_vlan_tbl_entry_cmd req;
8570 /* mac addr check */
8571 if (is_zero_ether_addr(addr) ||
8572 is_broadcast_ether_addr(addr) ||
8573 is_multicast_ether_addr(addr)) {
8574 hnae3_format_mac_addr(format_mac_addr, addr);
8575 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%s.\n",
8580 memset(&req, 0, sizeof(req));
8581 hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8582 hclge_prepare_mac_addr(&req, addr, false);
8583 ret = hclge_remove_mac_vlan_tbl(vport, &req);
8584 if (!ret || ret == -ENOENT) {
8585 mutex_lock(&hdev->vport_lock);
8586 hclge_update_umv_space(vport, true);
8587 mutex_unlock(&hdev->vport_lock);
8594 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8595 const unsigned char *addr)
8597 struct hclge_vport *vport = hclge_get_vport(handle);
8599 return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8603 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8604 const unsigned char *addr)
8606 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8607 struct hclge_dev *hdev = vport->back;
8608 struct hclge_mac_vlan_tbl_entry_cmd req;
8609 struct hclge_desc desc[3];
8610 bool is_new_addr = false;
8613 /* mac addr check */
8614 if (!is_multicast_ether_addr(addr)) {
8615 hnae3_format_mac_addr(format_mac_addr, addr);
8616 dev_err(&hdev->pdev->dev,
8617 "Add mc mac err! invalid mac:%s.\n",
8621 memset(&req, 0, sizeof(req));
8622 hclge_prepare_mac_addr(&req, addr, true);
8623 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8625 if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) &&
8626 hdev->used_mc_mac_num >=
8627 hdev->ae_dev->dev_specs.mc_mac_size)
8632 /* This mac addr do not exist, add new entry for it */
8633 memset(desc[0].data, 0, sizeof(desc[0].data));
8634 memset(desc[1].data, 0, sizeof(desc[0].data));
8635 memset(desc[2].data, 0, sizeof(desc[0].data));
8637 status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8640 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8641 if (status == -ENOSPC)
8643 else if (!status && is_new_addr)
8644 hdev->used_mc_mac_num++;
8649 /* if already overflow, not to print each time */
8650 if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE)) {
8651 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8652 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8658 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8659 const unsigned char *addr)
8661 struct hclge_vport *vport = hclge_get_vport(handle);
8663 return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8667 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8668 const unsigned char *addr)
8670 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8671 struct hclge_dev *hdev = vport->back;
8672 struct hclge_mac_vlan_tbl_entry_cmd req;
8673 enum hclge_comm_cmd_status status;
8674 struct hclge_desc desc[3];
8676 /* mac addr check */
8677 if (!is_multicast_ether_addr(addr)) {
8678 hnae3_format_mac_addr(format_mac_addr, addr);
8679 dev_dbg(&hdev->pdev->dev,
8680 "Remove mc mac err! invalid mac:%s.\n",
8685 memset(&req, 0, sizeof(req));
8686 hclge_prepare_mac_addr(&req, addr, true);
8687 status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8689 /* This mac addr exist, remove this handle's VFID for it */
8690 status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8694 if (hclge_is_all_function_id_zero(desc)) {
8695 /* All the vfid is zero, so need to delete this entry */
8696 status = hclge_remove_mac_vlan_tbl(vport, &req);
8698 hdev->used_mc_mac_num--;
8700 /* Not all the vfid is zero, update the vfid */
8701 status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8703 } else if (status == -ENOENT) {
8710 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8711 struct list_head *list,
8712 enum HCLGE_MAC_ADDR_TYPE mac_type)
8714 int (*sync)(struct hclge_vport *vport, const unsigned char *addr);
8715 struct hclge_mac_node *mac_node, *tmp;
8718 if (mac_type == HCLGE_MAC_ADDR_UC)
8719 sync = hclge_add_uc_addr_common;
8721 sync = hclge_add_mc_addr_common;
8723 list_for_each_entry_safe(mac_node, tmp, list, node) {
8724 ret = sync(vport, mac_node->mac_addr);
8726 mac_node->state = HCLGE_MAC_ACTIVE;
8728 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8731 /* If one unicast mac address is existing in hardware,
8732 * we need to try whether other unicast mac addresses
8733 * are new addresses that can be added.
8734 * Multicast mac address can be reusable, even though
8735 * there is no space to add new multicast mac address,
8736 * we should check whether other mac addresses are
8737 * existing in hardware for reuse.
8739 if ((mac_type == HCLGE_MAC_ADDR_UC && ret != -EEXIST) ||
8740 (mac_type == HCLGE_MAC_ADDR_MC && ret != -ENOSPC))
8746 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8747 struct list_head *list,
8748 enum HCLGE_MAC_ADDR_TYPE mac_type)
8750 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
8751 struct hclge_mac_node *mac_node, *tmp;
8754 if (mac_type == HCLGE_MAC_ADDR_UC)
8755 unsync = hclge_rm_uc_addr_common;
8757 unsync = hclge_rm_mc_addr_common;
8759 list_for_each_entry_safe(mac_node, tmp, list, node) {
8760 ret = unsync(vport, mac_node->mac_addr);
8761 if (!ret || ret == -ENOENT) {
8762 list_del(&mac_node->node);
8765 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8772 static bool hclge_sync_from_add_list(struct list_head *add_list,
8773 struct list_head *mac_list)
8775 struct hclge_mac_node *mac_node, *tmp, *new_node;
8776 bool all_added = true;
8778 list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8779 if (mac_node->state == HCLGE_MAC_TO_ADD)
8782 /* if the mac address from tmp_add_list is not in the
8783 * uc/mc_mac_list, it means have received a TO_DEL request
8784 * during the time window of adding the mac address into mac
8785 * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8786 * then it will be removed at next time. else it must be TO_ADD,
8787 * this address hasn't been added into mac table,
8788 * so just remove the mac node.
8790 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8792 hclge_update_mac_node(new_node, mac_node->state);
8793 list_del(&mac_node->node);
8795 } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8796 mac_node->state = HCLGE_MAC_TO_DEL;
8797 list_move_tail(&mac_node->node, mac_list);
8799 list_del(&mac_node->node);
8807 static void hclge_sync_from_del_list(struct list_head *del_list,
8808 struct list_head *mac_list)
8810 struct hclge_mac_node *mac_node, *tmp, *new_node;
8812 list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8813 new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8815 /* If the mac addr exists in the mac list, it means
8816 * received a new TO_ADD request during the time window
8817 * of configuring the mac address. For the mac node
8818 * state is TO_ADD, and the address is already in the
8819 * in the hardware(due to delete fail), so we just need
8820 * to change the mac node state to ACTIVE.
8822 new_node->state = HCLGE_MAC_ACTIVE;
8823 list_del(&mac_node->node);
8826 list_move_tail(&mac_node->node, mac_list);
8831 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8832 enum HCLGE_MAC_ADDR_TYPE mac_type,
8835 if (mac_type == HCLGE_MAC_ADDR_UC) {
8837 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8838 else if (hclge_is_umv_space_full(vport, true))
8839 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8842 vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8844 vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8848 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8849 enum HCLGE_MAC_ADDR_TYPE mac_type)
8851 struct hclge_mac_node *mac_node, *tmp, *new_node;
8852 struct list_head tmp_add_list, tmp_del_list;
8853 struct list_head *list;
8856 INIT_LIST_HEAD(&tmp_add_list);
8857 INIT_LIST_HEAD(&tmp_del_list);
8859 /* move the mac addr to the tmp_add_list and tmp_del_list, then
8860 * we can add/delete these mac addr outside the spin lock
8862 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8863 &vport->uc_mac_list : &vport->mc_mac_list;
8865 spin_lock_bh(&vport->mac_list_lock);
8867 list_for_each_entry_safe(mac_node, tmp, list, node) {
8868 switch (mac_node->state) {
8869 case HCLGE_MAC_TO_DEL:
8870 list_move_tail(&mac_node->node, &tmp_del_list);
8872 case HCLGE_MAC_TO_ADD:
8873 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8876 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8877 new_node->state = mac_node->state;
8878 list_add_tail(&new_node->node, &tmp_add_list);
8886 spin_unlock_bh(&vport->mac_list_lock);
8888 /* delete first, in order to get max mac table space for adding */
8889 hclge_unsync_vport_mac_list(vport, &tmp_del_list, mac_type);
8890 hclge_sync_vport_mac_list(vport, &tmp_add_list, mac_type);
8892 /* if some mac addresses were added/deleted fail, move back to the
8893 * mac_list, and retry at next time.
8895 spin_lock_bh(&vport->mac_list_lock);
8897 hclge_sync_from_del_list(&tmp_del_list, list);
8898 all_added = hclge_sync_from_add_list(&tmp_add_list, list);
8900 spin_unlock_bh(&vport->mac_list_lock);
8902 hclge_update_overflow_flags(vport, mac_type, all_added);
8905 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
8907 struct hclge_dev *hdev = vport->back;
8909 if (test_bit(vport->vport_id, hdev->vport_config_block))
8912 if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
8918 static void hclge_sync_mac_table(struct hclge_dev *hdev)
8922 for (i = 0; i < hdev->num_alloc_vport; i++) {
8923 struct hclge_vport *vport = &hdev->vport[i];
8925 if (!hclge_need_sync_mac_table(vport))
8928 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
8929 hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
8933 static void hclge_build_del_list(struct list_head *list,
8935 struct list_head *tmp_del_list)
8937 struct hclge_mac_node *mac_cfg, *tmp;
8939 list_for_each_entry_safe(mac_cfg, tmp, list, node) {
8940 switch (mac_cfg->state) {
8941 case HCLGE_MAC_TO_DEL:
8942 case HCLGE_MAC_ACTIVE:
8943 list_move_tail(&mac_cfg->node, tmp_del_list);
8945 case HCLGE_MAC_TO_ADD:
8947 list_del(&mac_cfg->node);
8955 static void hclge_unsync_del_list(struct hclge_vport *vport,
8956 int (*unsync)(struct hclge_vport *vport,
8957 const unsigned char *addr),
8959 struct list_head *tmp_del_list)
8961 struct hclge_mac_node *mac_cfg, *tmp;
8964 list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
8965 ret = unsync(vport, mac_cfg->mac_addr);
8966 if (!ret || ret == -ENOENT) {
8967 /* clear all mac addr from hardware, but remain these
8968 * mac addr in the mac list, and restore them after
8969 * vf reset finished.
8972 mac_cfg->state == HCLGE_MAC_ACTIVE) {
8973 mac_cfg->state = HCLGE_MAC_TO_ADD;
8975 list_del(&mac_cfg->node);
8978 } else if (is_del_list) {
8979 mac_cfg->state = HCLGE_MAC_TO_DEL;
8984 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
8985 enum HCLGE_MAC_ADDR_TYPE mac_type)
8987 int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
8988 struct hclge_dev *hdev = vport->back;
8989 struct list_head tmp_del_list, *list;
8991 if (mac_type == HCLGE_MAC_ADDR_UC) {
8992 list = &vport->uc_mac_list;
8993 unsync = hclge_rm_uc_addr_common;
8995 list = &vport->mc_mac_list;
8996 unsync = hclge_rm_mc_addr_common;
8999 INIT_LIST_HEAD(&tmp_del_list);
9002 set_bit(vport->vport_id, hdev->vport_config_block);
9004 spin_lock_bh(&vport->mac_list_lock);
9006 hclge_build_del_list(list, is_del_list, &tmp_del_list);
9008 spin_unlock_bh(&vport->mac_list_lock);
9010 hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
9012 spin_lock_bh(&vport->mac_list_lock);
9014 hclge_sync_from_del_list(&tmp_del_list, list);
9016 spin_unlock_bh(&vport->mac_list_lock);
9019 /* remove all mac address when uninitailize */
9020 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
9021 enum HCLGE_MAC_ADDR_TYPE mac_type)
9023 struct hclge_mac_node *mac_node, *tmp;
9024 struct hclge_dev *hdev = vport->back;
9025 struct list_head tmp_del_list, *list;
9027 INIT_LIST_HEAD(&tmp_del_list);
9029 list = (mac_type == HCLGE_MAC_ADDR_UC) ?
9030 &vport->uc_mac_list : &vport->mc_mac_list;
9032 spin_lock_bh(&vport->mac_list_lock);
9034 list_for_each_entry_safe(mac_node, tmp, list, node) {
9035 switch (mac_node->state) {
9036 case HCLGE_MAC_TO_DEL:
9037 case HCLGE_MAC_ACTIVE:
9038 list_move_tail(&mac_node->node, &tmp_del_list);
9040 case HCLGE_MAC_TO_ADD:
9041 list_del(&mac_node->node);
9047 spin_unlock_bh(&vport->mac_list_lock);
9049 hclge_unsync_vport_mac_list(vport, &tmp_del_list, mac_type);
9051 if (!list_empty(&tmp_del_list))
9052 dev_warn(&hdev->pdev->dev,
9053 "uninit %s mac list for vport %u not completely.\n",
9054 mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
9057 list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
9058 list_del(&mac_node->node);
9063 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
9065 struct hclge_vport *vport;
9068 for (i = 0; i < hdev->num_alloc_vport; i++) {
9069 vport = &hdev->vport[i];
9070 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
9071 hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
9075 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
9076 u16 cmdq_resp, u8 resp_code)
9078 #define HCLGE_ETHERTYPE_SUCCESS_ADD 0
9079 #define HCLGE_ETHERTYPE_ALREADY_ADD 1
9080 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
9081 #define HCLGE_ETHERTYPE_KEY_CONFLICT 3
9086 dev_err(&hdev->pdev->dev,
9087 "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
9092 switch (resp_code) {
9093 case HCLGE_ETHERTYPE_SUCCESS_ADD:
9094 case HCLGE_ETHERTYPE_ALREADY_ADD:
9097 case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
9098 dev_err(&hdev->pdev->dev,
9099 "add mac ethertype failed for manager table overflow.\n");
9100 return_status = -EIO;
9102 case HCLGE_ETHERTYPE_KEY_CONFLICT:
9103 dev_err(&hdev->pdev->dev,
9104 "add mac ethertype failed for key conflict.\n");
9105 return_status = -EIO;
9108 dev_err(&hdev->pdev->dev,
9109 "add mac ethertype failed for undefined, code=%u.\n",
9111 return_status = -EIO;
9114 return return_status;
9117 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
9120 struct hclge_vport *vport = hclge_get_vport(handle);
9121 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
9122 struct hclge_dev *hdev = vport->back;
9124 vport = hclge_get_vf_vport(hdev, vf);
9128 hnae3_format_mac_addr(format_mac_addr, mac_addr);
9129 if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
9130 dev_info(&hdev->pdev->dev,
9131 "Specified MAC(=%s) is same as before, no change committed!\n",
9136 ether_addr_copy(vport->vf_info.mac, mac_addr);
9138 /* there is a timewindow for PF to know VF unalive, it may
9139 * cause send mailbox fail, but it doesn't matter, VF will
9140 * query it when reinit.
9142 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
9143 dev_info(&hdev->pdev->dev,
9144 "MAC of VF %d has been set to %s, and it will be reinitialized!\n",
9145 vf, format_mac_addr);
9146 (void)hclge_inform_reset_assert_to_vf(vport);
9150 dev_info(&hdev->pdev->dev,
9151 "MAC of VF %d has been set to %s, will be active after VF reset\n",
9152 vf, format_mac_addr);
9156 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9157 const struct hclge_mac_mgr_tbl_entry_cmd *req)
9159 struct hclge_desc desc;
9164 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9165 memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9167 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9169 dev_err(&hdev->pdev->dev,
9170 "add mac ethertype failed for cmd_send, ret =%d.\n",
9175 resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9176 retval = le16_to_cpu(desc.retval);
9178 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9181 static int init_mgr_tbl(struct hclge_dev *hdev)
9186 for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9187 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9189 dev_err(&hdev->pdev->dev,
9190 "add mac ethertype failed, ret =%d.\n",
9199 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9201 struct hclge_vport *vport = hclge_get_vport(handle);
9202 struct hclge_dev *hdev = vport->back;
9204 ether_addr_copy(p, hdev->hw.mac.mac_addr);
9207 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9208 const u8 *old_addr, const u8 *new_addr)
9210 struct list_head *list = &vport->uc_mac_list;
9211 struct hclge_mac_node *old_node, *new_node;
9213 new_node = hclge_find_mac_node(list, new_addr);
9215 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9219 new_node->state = HCLGE_MAC_TO_ADD;
9220 ether_addr_copy(new_node->mac_addr, new_addr);
9221 list_add(&new_node->node, list);
9223 if (new_node->state == HCLGE_MAC_TO_DEL)
9224 new_node->state = HCLGE_MAC_ACTIVE;
9226 /* make sure the new addr is in the list head, avoid dev
9227 * addr may be not re-added into mac table for the umv space
9228 * limitation after global/imp reset which will clear mac
9229 * table by hardware.
9231 list_move(&new_node->node, list);
9234 if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9235 old_node = hclge_find_mac_node(list, old_addr);
9237 if (old_node->state == HCLGE_MAC_TO_ADD) {
9238 list_del(&old_node->node);
9241 old_node->state = HCLGE_MAC_TO_DEL;
9246 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9251 static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p,
9254 const unsigned char *new_addr = (const unsigned char *)p;
9255 struct hclge_vport *vport = hclge_get_vport(handle);
9256 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
9257 struct hclge_dev *hdev = vport->back;
9258 unsigned char *old_addr = NULL;
9261 /* mac addr check */
9262 if (is_zero_ether_addr(new_addr) ||
9263 is_broadcast_ether_addr(new_addr) ||
9264 is_multicast_ether_addr(new_addr)) {
9265 hnae3_format_mac_addr(format_mac_addr, new_addr);
9266 dev_err(&hdev->pdev->dev,
9267 "change uc mac err! invalid mac: %s.\n",
9272 ret = hclge_pause_addr_cfg(hdev, new_addr);
9274 dev_err(&hdev->pdev->dev,
9275 "failed to configure mac pause address, ret = %d\n",
9281 old_addr = hdev->hw.mac.mac_addr;
9283 spin_lock_bh(&vport->mac_list_lock);
9284 ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9286 hnae3_format_mac_addr(format_mac_addr, new_addr);
9287 dev_err(&hdev->pdev->dev,
9288 "failed to change the mac addr:%s, ret = %d\n",
9289 format_mac_addr, ret);
9290 spin_unlock_bh(&vport->mac_list_lock);
9293 hclge_pause_addr_cfg(hdev, old_addr);
9297 /* we must update dev addr with spin lock protect, preventing dev addr
9298 * being removed by set_rx_mode path.
9300 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9301 spin_unlock_bh(&vport->mac_list_lock);
9303 hclge_task_schedule(hdev, 0);
9308 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9310 struct mii_ioctl_data *data = if_mii(ifr);
9312 if (!hnae3_dev_phy_imp_supported(hdev))
9317 data->phy_id = hdev->hw.mac.phy_addr;
9318 /* this command reads phy id and register at the same time */
9321 data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9325 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9331 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9334 struct hclge_vport *vport = hclge_get_vport(handle);
9335 struct hclge_dev *hdev = vport->back;
9339 return hclge_ptp_get_cfg(hdev, ifr);
9341 return hclge_ptp_set_cfg(hdev, ifr);
9343 if (!hdev->hw.mac.phydev)
9344 return hclge_mii_ioctl(hdev, ifr, cmd);
9347 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9350 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id,
9353 struct hclge_port_vlan_filter_bypass_cmd *req;
9354 struct hclge_desc desc;
9357 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false);
9358 req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
9360 hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B,
9363 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9365 dev_err(&hdev->pdev->dev,
9366 "failed to set vport%u port vlan filter bypass state, ret = %d.\n",
9372 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9373 u8 fe_type, bool filter_en, u8 vf_id)
9375 struct hclge_vlan_filter_ctrl_cmd *req;
9376 struct hclge_desc desc;
9379 /* read current vlan filter parameter */
9380 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9381 req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9382 req->vlan_type = vlan_type;
9385 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9387 dev_err(&hdev->pdev->dev, "failed to get vport%u vlan filter config, ret = %d.\n",
9392 /* modify and write new config parameter */
9393 hclge_comm_cmd_reuse_desc(&desc, false);
9394 req->vlan_fe = filter_en ?
9395 (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9397 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9399 dev_err(&hdev->pdev->dev, "failed to set vport%u vlan filter, ret = %d.\n",
9405 static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
9407 struct hclge_dev *hdev = vport->back;
9408 struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
9411 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9412 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9413 HCLGE_FILTER_FE_EGRESS_V1_B,
9414 enable, vport->vport_id);
9416 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9417 HCLGE_FILTER_FE_EGRESS, enable,
9422 if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) {
9423 ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
9425 } else if (!vport->vport_id) {
9426 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
9429 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9430 HCLGE_FILTER_FE_INGRESS,
9437 static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
9439 struct hnae3_handle *handle = &vport->nic;
9440 struct hclge_vport_vlan_cfg *vlan, *tmp;
9441 struct hclge_dev *hdev = vport->back;
9443 if (vport->vport_id) {
9444 if (vport->port_base_vlan_cfg.state !=
9445 HNAE3_PORT_BASE_VLAN_DISABLE)
9448 if (vport->vf_info.trusted && vport->vf_info.request_uc_en)
9450 } else if (handle->netdev_flags & HNAE3_USER_UPE) {
9454 if (!vport->req_vlan_fltr_en)
9457 /* compatible with former device, always enable vlan filter */
9458 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
9461 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
9462 if (vlan->vlan_id != 0)
9468 int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
9470 struct hclge_dev *hdev = vport->back;
9474 mutex_lock(&hdev->vport_lock);
9476 vport->req_vlan_fltr_en = request_en;
9478 need_en = hclge_need_enable_vport_vlan_filter(vport);
9479 if (need_en == vport->cur_vlan_fltr_en) {
9480 mutex_unlock(&hdev->vport_lock);
9484 ret = hclge_set_vport_vlan_filter(vport, need_en);
9486 mutex_unlock(&hdev->vport_lock);
9490 vport->cur_vlan_fltr_en = need_en;
9492 mutex_unlock(&hdev->vport_lock);
9497 static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9499 struct hclge_vport *vport = hclge_get_vport(handle);
9501 return hclge_enable_vport_vlan_filter(vport, enable);
9504 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9505 bool is_kill, u16 vlan,
9506 struct hclge_desc *desc)
9508 struct hclge_vlan_filter_vf_cfg_cmd *req0;
9509 struct hclge_vlan_filter_vf_cfg_cmd *req1;
9514 hclge_cmd_setup_basic_desc(&desc[0],
9515 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9516 hclge_cmd_setup_basic_desc(&desc[1],
9517 HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9519 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
9521 vf_byte_off = vfid / 8;
9522 vf_byte_val = 1 << (vfid % 8);
9524 req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9525 req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9527 req0->vlan_id = cpu_to_le16(vlan);
9528 req0->vlan_cfg = is_kill;
9530 if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9531 req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9533 req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9535 ret = hclge_cmd_send(&hdev->hw, desc, 2);
9537 dev_err(&hdev->pdev->dev,
9538 "Send vf vlan command fail, ret =%d.\n",
9546 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9547 bool is_kill, struct hclge_desc *desc)
9549 struct hclge_vlan_filter_vf_cfg_cmd *req;
9551 req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9554 #define HCLGE_VF_VLAN_NO_ENTRY 2
9555 if (!req->resp_code || req->resp_code == 1)
9558 if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9559 set_bit(vfid, hdev->vf_vlan_full);
9560 dev_warn(&hdev->pdev->dev,
9561 "vf vlan table is full, vf vlan filter is disabled\n");
9565 dev_err(&hdev->pdev->dev,
9566 "Add vf vlan filter fail, ret =%u.\n",
9569 #define HCLGE_VF_VLAN_DEL_NO_FOUND 1
9570 if (!req->resp_code)
9573 /* vf vlan filter is disabled when vf vlan table is full,
9574 * then new vlan id will not be added into vf vlan table.
9575 * Just return 0 without warning, avoid massive verbose
9576 * print logs when unload.
9578 if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9581 dev_err(&hdev->pdev->dev,
9582 "Kill vf vlan filter fail, ret =%u.\n",
9589 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9590 bool is_kill, u16 vlan)
9592 struct hclge_vport *vport = &hdev->vport[vfid];
9593 struct hclge_desc desc[2];
9596 /* if vf vlan table is full, firmware will close vf vlan filter, it
9597 * is unable and unnecessary to add new vlan id to vf vlan filter.
9598 * If spoof check is enable, and vf vlan is full, it shouldn't add
9599 * new vlan, because tx packets with these vlan id will be dropped.
9601 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9602 if (vport->vf_info.spoofchk && vlan) {
9603 dev_err(&hdev->pdev->dev,
9604 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
9610 ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9614 return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9617 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9618 u16 vlan_id, bool is_kill)
9620 struct hclge_vlan_filter_pf_cfg_cmd *req;
9621 struct hclge_desc desc;
9622 u8 vlan_offset_byte_val;
9623 u8 vlan_offset_byte;
9627 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9629 vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9630 vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9631 HCLGE_VLAN_BYTE_SIZE;
9632 vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9634 req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9635 req->vlan_offset = vlan_offset_160;
9636 req->vlan_cfg = is_kill;
9637 req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9639 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9641 dev_err(&hdev->pdev->dev,
9642 "port vlan command, send fail, ret =%d.\n", ret);
9646 static bool hclge_need_update_port_vlan(struct hclge_dev *hdev, u16 vport_id,
9647 u16 vlan_id, bool is_kill)
9649 /* vlan 0 may be added twice when 8021q module is enabled */
9650 if (!is_kill && !vlan_id &&
9651 test_bit(vport_id, hdev->vlan_table[vlan_id]))
9654 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9655 dev_warn(&hdev->pdev->dev,
9656 "Add port vlan failed, vport %u is already in vlan %u\n",
9662 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9663 dev_warn(&hdev->pdev->dev,
9664 "Delete port vlan failed, vport %u is not in vlan %u\n",
9672 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9673 u16 vport_id, u16 vlan_id,
9676 u16 vport_idx, vport_num = 0;
9679 if (is_kill && !vlan_id)
9682 if (vlan_id >= VLAN_N_VID)
9685 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9687 dev_err(&hdev->pdev->dev,
9688 "Set %u vport vlan filter config fail, ret =%d.\n",
9693 if (!hclge_need_update_port_vlan(hdev, vport_id, vlan_id, is_kill))
9696 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9699 if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9700 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9706 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9708 struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9709 struct hclge_vport_vtag_tx_cfg_cmd *req;
9710 struct hclge_dev *hdev = vport->back;
9711 struct hclge_desc desc;
9715 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9717 req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9718 req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9719 req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9720 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9721 vcfg->accept_tag1 ? 1 : 0);
9722 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9723 vcfg->accept_untag1 ? 1 : 0);
9724 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9725 vcfg->accept_tag2 ? 1 : 0);
9726 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9727 vcfg->accept_untag2 ? 1 : 0);
9728 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9729 vcfg->insert_tag1_en ? 1 : 0);
9730 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9731 vcfg->insert_tag2_en ? 1 : 0);
9732 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9733 vcfg->tag_shift_mode_en ? 1 : 0);
9734 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9736 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9737 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9738 HCLGE_VF_NUM_PER_BYTE;
9739 req->vf_bitmap[bmap_index] =
9740 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9742 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9744 dev_err(&hdev->pdev->dev,
9745 "Send port txvlan cfg command fail, ret =%d\n",
9751 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9753 struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9754 struct hclge_vport_vtag_rx_cfg_cmd *req;
9755 struct hclge_dev *hdev = vport->back;
9756 struct hclge_desc desc;
9760 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9762 req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9763 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9764 vcfg->strip_tag1_en ? 1 : 0);
9765 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9766 vcfg->strip_tag2_en ? 1 : 0);
9767 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9768 vcfg->vlan1_vlan_prionly ? 1 : 0);
9769 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9770 vcfg->vlan2_vlan_prionly ? 1 : 0);
9771 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9772 vcfg->strip_tag1_discard_en ? 1 : 0);
9773 hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9774 vcfg->strip_tag2_discard_en ? 1 : 0);
9776 req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9777 bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9778 HCLGE_VF_NUM_PER_BYTE;
9779 req->vf_bitmap[bmap_index] =
9780 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9782 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9784 dev_err(&hdev->pdev->dev,
9785 "Send port rxvlan cfg command fail, ret =%d\n",
9791 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9792 u16 port_base_vlan_state,
9793 u16 vlan_tag, u8 qos)
9797 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9798 vport->txvlan_cfg.accept_tag1 = true;
9799 vport->txvlan_cfg.insert_tag1_en = false;
9800 vport->txvlan_cfg.default_tag1 = 0;
9802 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9804 vport->txvlan_cfg.accept_tag1 =
9805 ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9806 vport->txvlan_cfg.insert_tag1_en = true;
9807 vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) |
9811 vport->txvlan_cfg.accept_untag1 = true;
9813 /* accept_tag2 and accept_untag2 are not supported on
9814 * pdev revision(0x20), new revision support them,
9815 * this two fields can not be configured by user.
9817 vport->txvlan_cfg.accept_tag2 = true;
9818 vport->txvlan_cfg.accept_untag2 = true;
9819 vport->txvlan_cfg.insert_tag2_en = false;
9820 vport->txvlan_cfg.default_tag2 = 0;
9821 vport->txvlan_cfg.tag_shift_mode_en = true;
9823 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9824 vport->rxvlan_cfg.strip_tag1_en = false;
9825 vport->rxvlan_cfg.strip_tag2_en =
9826 vport->rxvlan_cfg.rx_vlan_offload_en;
9827 vport->rxvlan_cfg.strip_tag2_discard_en = false;
9829 vport->rxvlan_cfg.strip_tag1_en =
9830 vport->rxvlan_cfg.rx_vlan_offload_en;
9831 vport->rxvlan_cfg.strip_tag2_en = true;
9832 vport->rxvlan_cfg.strip_tag2_discard_en = true;
9835 vport->rxvlan_cfg.strip_tag1_discard_en = false;
9836 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9837 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9839 ret = hclge_set_vlan_tx_offload_cfg(vport);
9843 return hclge_set_vlan_rx_offload_cfg(vport);
9846 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9848 struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9849 struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9850 struct hclge_desc desc;
9853 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9854 rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9855 rx_req->ot_fst_vlan_type =
9856 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9857 rx_req->ot_sec_vlan_type =
9858 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9859 rx_req->in_fst_vlan_type =
9860 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9861 rx_req->in_sec_vlan_type =
9862 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9864 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9866 dev_err(&hdev->pdev->dev,
9867 "Send rxvlan protocol type command fail, ret =%d\n",
9872 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9874 tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
9875 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9876 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9878 status = hclge_cmd_send(&hdev->hw, &desc, 1);
9880 dev_err(&hdev->pdev->dev,
9881 "Send txvlan protocol type command fail, ret =%d\n",
9887 static int hclge_init_vlan_filter(struct hclge_dev *hdev)
9889 struct hclge_vport *vport;
9893 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9894 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9895 HCLGE_FILTER_FE_EGRESS_V1_B,
9898 /* for revision 0x21, vf vlan filter is per function */
9899 for (i = 0; i < hdev->num_alloc_vport; i++) {
9900 vport = &hdev->vport[i];
9901 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9902 HCLGE_FILTER_FE_EGRESS, true,
9906 vport->cur_vlan_fltr_en = true;
9909 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9910 HCLGE_FILTER_FE_INGRESS, true, 0);
9913 static int hclge_init_vlan_type(struct hclge_dev *hdev)
9915 hdev->vlan_type_cfg.rx_in_fst_vlan_type = ETH_P_8021Q;
9916 hdev->vlan_type_cfg.rx_in_sec_vlan_type = ETH_P_8021Q;
9917 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = ETH_P_8021Q;
9918 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = ETH_P_8021Q;
9919 hdev->vlan_type_cfg.tx_ot_vlan_type = ETH_P_8021Q;
9920 hdev->vlan_type_cfg.tx_in_vlan_type = ETH_P_8021Q;
9922 return hclge_set_vlan_protocol_type(hdev);
9925 static int hclge_init_vport_vlan_offload(struct hclge_dev *hdev)
9927 struct hclge_port_base_vlan_config *cfg;
9928 struct hclge_vport *vport;
9932 for (i = 0; i < hdev->num_alloc_vport; i++) {
9933 vport = &hdev->vport[i];
9934 cfg = &vport->port_base_vlan_cfg;
9936 ret = hclge_vlan_offload_cfg(vport, cfg->state,
9937 cfg->vlan_info.vlan_tag,
9938 cfg->vlan_info.qos);
9945 static int hclge_init_vlan_config(struct hclge_dev *hdev)
9947 struct hnae3_handle *handle = &hdev->vport[0].nic;
9950 ret = hclge_init_vlan_filter(hdev);
9954 ret = hclge_init_vlan_type(hdev);
9958 ret = hclge_init_vport_vlan_offload(hdev);
9962 return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
9965 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9968 struct hclge_vport_vlan_cfg *vlan, *tmp;
9969 struct hclge_dev *hdev = vport->back;
9971 mutex_lock(&hdev->vport_lock);
9973 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9974 if (vlan->vlan_id == vlan_id) {
9975 mutex_unlock(&hdev->vport_lock);
9980 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
9982 mutex_unlock(&hdev->vport_lock);
9986 vlan->hd_tbl_status = writen_to_tbl;
9987 vlan->vlan_id = vlan_id;
9989 list_add_tail(&vlan->node, &vport->vlan_list);
9990 mutex_unlock(&hdev->vport_lock);
9993 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
9995 struct hclge_vport_vlan_cfg *vlan, *tmp;
9996 struct hclge_dev *hdev = vport->back;
9999 mutex_lock(&hdev->vport_lock);
10001 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10002 if (!vlan->hd_tbl_status) {
10003 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10005 vlan->vlan_id, false);
10007 dev_err(&hdev->pdev->dev,
10008 "restore vport vlan list failed, ret=%d\n",
10011 mutex_unlock(&hdev->vport_lock);
10015 vlan->hd_tbl_status = true;
10018 mutex_unlock(&hdev->vport_lock);
10023 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
10026 struct hclge_vport_vlan_cfg *vlan, *tmp;
10027 struct hclge_dev *hdev = vport->back;
10029 mutex_lock(&hdev->vport_lock);
10031 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10032 if (vlan->vlan_id == vlan_id) {
10033 if (is_write_tbl && vlan->hd_tbl_status)
10034 hclge_set_vlan_filter_hw(hdev,
10035 htons(ETH_P_8021Q),
10040 list_del(&vlan->node);
10046 mutex_unlock(&hdev->vport_lock);
10049 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
10051 struct hclge_vport_vlan_cfg *vlan, *tmp;
10052 struct hclge_dev *hdev = vport->back;
10054 mutex_lock(&hdev->vport_lock);
10056 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10057 if (vlan->hd_tbl_status)
10058 hclge_set_vlan_filter_hw(hdev,
10059 htons(ETH_P_8021Q),
10064 vlan->hd_tbl_status = false;
10066 list_del(&vlan->node);
10070 clear_bit(vport->vport_id, hdev->vf_vlan_full);
10071 mutex_unlock(&hdev->vport_lock);
10074 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
10076 struct hclge_vport_vlan_cfg *vlan, *tmp;
10077 struct hclge_vport *vport;
10080 mutex_lock(&hdev->vport_lock);
10082 for (i = 0; i < hdev->num_alloc_vport; i++) {
10083 vport = &hdev->vport[i];
10084 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10085 list_del(&vlan->node);
10090 mutex_unlock(&hdev->vport_lock);
10093 void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev)
10095 struct hclge_vlan_info *vlan_info;
10096 struct hclge_vport *vport;
10103 /* PF should restore all vfs port base vlan */
10104 for (vf_id = 0; vf_id < hdev->num_alloc_vfs; vf_id++) {
10105 vport = &hdev->vport[vf_id + HCLGE_VF_VPORT_START_NUM];
10106 vlan_info = vport->port_base_vlan_cfg.tbl_sta ?
10107 &vport->port_base_vlan_cfg.vlan_info :
10108 &vport->port_base_vlan_cfg.old_vlan_info;
10110 vlan_id = vlan_info->vlan_tag;
10111 vlan_proto = vlan_info->vlan_proto;
10112 state = vport->port_base_vlan_cfg.state;
10114 if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
10115 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
10116 ret = hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
10119 vport->port_base_vlan_cfg.tbl_sta = ret == 0;
10124 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
10126 struct hclge_vport_vlan_cfg *vlan, *tmp;
10127 struct hclge_dev *hdev = vport->back;
10130 mutex_lock(&hdev->vport_lock);
10132 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10133 list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
10134 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10136 vlan->vlan_id, false);
10139 vlan->hd_tbl_status = true;
10143 mutex_unlock(&hdev->vport_lock);
10146 /* For global reset and imp reset, hardware will clear the mac table,
10147 * so we change the mac address state from ACTIVE to TO_ADD, then they
10148 * can be restored in the service task after reset complete. Furtherly,
10149 * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
10150 * be restored after reset, so just remove these mac nodes from mac_list.
10152 static void hclge_mac_node_convert_for_reset(struct list_head *list)
10154 struct hclge_mac_node *mac_node, *tmp;
10156 list_for_each_entry_safe(mac_node, tmp, list, node) {
10157 if (mac_node->state == HCLGE_MAC_ACTIVE) {
10158 mac_node->state = HCLGE_MAC_TO_ADD;
10159 } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
10160 list_del(&mac_node->node);
10166 void hclge_restore_mac_table_common(struct hclge_vport *vport)
10168 spin_lock_bh(&vport->mac_list_lock);
10170 hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
10171 hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
10172 set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
10174 spin_unlock_bh(&vport->mac_list_lock);
10177 static void hclge_restore_hw_table(struct hclge_dev *hdev)
10179 struct hclge_vport *vport = &hdev->vport[0];
10180 struct hnae3_handle *handle = &vport->nic;
10182 hclge_restore_mac_table_common(vport);
10183 hclge_restore_vport_port_base_vlan_config(hdev);
10184 hclge_restore_vport_vlan_table(vport);
10185 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10186 hclge_restore_fd_entries(handle);
10189 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10191 struct hclge_vport *vport = hclge_get_vport(handle);
10193 if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10194 vport->rxvlan_cfg.strip_tag1_en = false;
10195 vport->rxvlan_cfg.strip_tag2_en = enable;
10196 vport->rxvlan_cfg.strip_tag2_discard_en = false;
10198 vport->rxvlan_cfg.strip_tag1_en = enable;
10199 vport->rxvlan_cfg.strip_tag2_en = true;
10200 vport->rxvlan_cfg.strip_tag2_discard_en = true;
10203 vport->rxvlan_cfg.strip_tag1_discard_en = false;
10204 vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10205 vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10206 vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10208 return hclge_set_vlan_rx_offload_cfg(vport);
10211 static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport)
10213 struct hclge_dev *hdev = vport->back;
10215 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
10216 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state);
10219 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10220 u16 port_base_vlan_state,
10221 struct hclge_vlan_info *new_info,
10222 struct hclge_vlan_info *old_info)
10224 struct hclge_dev *hdev = vport->back;
10227 if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10228 hclge_rm_vport_all_vlan_table(vport, false);
10229 /* force clear VLAN 0 */
10230 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0);
10233 return hclge_set_vlan_filter_hw(hdev,
10234 htons(new_info->vlan_proto),
10236 new_info->vlan_tag,
10240 vport->port_base_vlan_cfg.tbl_sta = false;
10242 /* force add VLAN 0 */
10243 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
10247 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10248 vport->vport_id, old_info->vlan_tag,
10253 return hclge_add_vport_all_vlan_table(vport);
10256 static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
10257 const struct hclge_vlan_info *old_cfg)
10259 if (new_cfg->vlan_tag != old_cfg->vlan_tag)
10262 if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0))
10268 static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport,
10269 struct hclge_vlan_info *new_info,
10270 struct hclge_vlan_info *old_info)
10272 struct hclge_dev *hdev = vport->back;
10275 /* add new VLAN tag */
10276 ret = hclge_set_vlan_filter_hw(hdev, htons(new_info->vlan_proto),
10277 vport->vport_id, new_info->vlan_tag,
10282 vport->port_base_vlan_cfg.tbl_sta = false;
10283 /* remove old VLAN tag */
10284 if (old_info->vlan_tag == 0)
10285 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
10288 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10290 old_info->vlan_tag, true);
10292 dev_err(&hdev->pdev->dev,
10293 "failed to clear vport%u port base vlan %u, ret = %d.\n",
10294 vport->vport_id, old_info->vlan_tag, ret);
10299 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10300 struct hclge_vlan_info *vlan_info)
10302 struct hnae3_handle *nic = &vport->nic;
10303 struct hclge_vlan_info *old_vlan_info;
10306 old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10308 ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
10313 if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
10316 if (state == HNAE3_PORT_BASE_VLAN_MODIFY)
10317 ret = hclge_modify_port_base_vlan_tag(vport, vlan_info,
10320 ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10326 vport->port_base_vlan_cfg.state = state;
10327 if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10328 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10330 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10332 vport->port_base_vlan_cfg.old_vlan_info = *old_vlan_info;
10333 vport->port_base_vlan_cfg.vlan_info = *vlan_info;
10334 vport->port_base_vlan_cfg.tbl_sta = true;
10335 hclge_set_vport_vlan_fltr_change(vport);
10340 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10341 enum hnae3_port_base_vlan_state state,
10344 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10346 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10348 return HNAE3_PORT_BASE_VLAN_ENABLE;
10352 return HNAE3_PORT_BASE_VLAN_DISABLE;
10354 if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan &&
10355 vport->port_base_vlan_cfg.vlan_info.qos == qos)
10356 return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10358 return HNAE3_PORT_BASE_VLAN_MODIFY;
10361 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10362 u16 vlan, u8 qos, __be16 proto)
10364 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10365 struct hclge_vport *vport = hclge_get_vport(handle);
10366 struct hclge_dev *hdev = vport->back;
10367 struct hclge_vlan_info vlan_info;
10371 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10372 return -EOPNOTSUPP;
10374 vport = hclge_get_vf_vport(hdev, vfid);
10378 /* qos is a 3 bits value, so can not be bigger than 7 */
10379 if (vlan > VLAN_N_VID - 1 || qos > 7)
10381 if (proto != htons(ETH_P_8021Q))
10382 return -EPROTONOSUPPORT;
10384 state = hclge_get_port_base_vlan_state(vport,
10385 vport->port_base_vlan_cfg.state,
10387 if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10390 vlan_info.vlan_tag = vlan;
10391 vlan_info.qos = qos;
10392 vlan_info.vlan_proto = ntohs(proto);
10394 ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10396 dev_err(&hdev->pdev->dev,
10397 "failed to update port base vlan for vf %d, ret = %d\n",
10402 /* there is a timewindow for PF to know VF unalive, it may
10403 * cause send mailbox fail, but it doesn't matter, VF will
10404 * query it when reinit.
10405 * for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10408 if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
10409 if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10410 (void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10415 set_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN,
10416 &vport->need_notify);
10421 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10423 struct hclge_vlan_info *vlan_info;
10424 struct hclge_vport *vport;
10428 /* clear port base vlan for all vf */
10429 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10430 vport = &hdev->vport[vf];
10431 vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10433 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10435 vlan_info->vlan_tag, true);
10437 dev_err(&hdev->pdev->dev,
10438 "failed to clear vf vlan for vf%d, ret = %d\n",
10439 vf - HCLGE_VF_VPORT_START_NUM, ret);
10443 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10444 u16 vlan_id, bool is_kill)
10446 struct hclge_vport *vport = hclge_get_vport(handle);
10447 struct hclge_dev *hdev = vport->back;
10448 bool writen_to_tbl = false;
10451 /* When device is resetting or reset failed, firmware is unable to
10452 * handle mailbox. Just record the vlan id, and remove it after
10455 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10456 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10457 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10461 /* when port base vlan enabled, we use port base vlan as the vlan
10462 * filter entry. In this case, we don't update vlan filter table
10463 * when user add new vlan or remove exist vlan, just update the vport
10464 * vlan list. The vlan id in vlan list will be writen in vlan filter
10465 * table until port base vlan disabled
10467 if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10468 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10470 writen_to_tbl = true;
10475 hclge_add_vport_vlan_table(vport, vlan_id,
10477 else if (is_kill && vlan_id != 0)
10478 hclge_rm_vport_vlan_table(vport, vlan_id, false);
10479 } else if (is_kill) {
10480 /* when remove hw vlan filter failed, record the vlan id,
10481 * and try to remove it from hw later, to be consistence
10484 set_bit(vlan_id, vport->vlan_del_fail_bmap);
10487 hclge_set_vport_vlan_fltr_change(vport);
10492 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
10494 struct hclge_vport *vport;
10498 for (i = 0; i < hdev->num_alloc_vport; i++) {
10499 vport = &hdev->vport[i];
10500 if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10504 ret = hclge_enable_vport_vlan_filter(vport,
10505 vport->req_vlan_fltr_en);
10507 dev_err(&hdev->pdev->dev,
10508 "failed to sync vlan filter state for vport%u, ret = %d\n",
10509 vport->vport_id, ret);
10510 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10517 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10519 #define HCLGE_MAX_SYNC_COUNT 60
10521 int i, ret, sync_cnt = 0;
10524 /* start from vport 1 for PF is always alive */
10525 for (i = 0; i < hdev->num_alloc_vport; i++) {
10526 struct hclge_vport *vport = &hdev->vport[i];
10528 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10530 while (vlan_id != VLAN_N_VID) {
10531 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10532 vport->vport_id, vlan_id,
10534 if (ret && ret != -EINVAL)
10537 clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10538 hclge_rm_vport_vlan_table(vport, vlan_id, false);
10539 hclge_set_vport_vlan_fltr_change(vport);
10542 if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
10545 vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10550 hclge_sync_vlan_fltr_state(hdev);
10553 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10555 struct hclge_config_max_frm_size_cmd *req;
10556 struct hclge_desc desc;
10558 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10560 req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10561 req->max_frm_size = cpu_to_le16(new_mps);
10562 req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10564 return hclge_cmd_send(&hdev->hw, &desc, 1);
10567 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10569 struct hclge_vport *vport = hclge_get_vport(handle);
10571 return hclge_set_vport_mtu(vport, new_mtu);
10574 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10576 struct hclge_dev *hdev = vport->back;
10577 int i, max_frm_size, ret;
10579 /* HW supprt 2 layer vlan */
10580 max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10581 if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10582 max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10585 max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10586 mutex_lock(&hdev->vport_lock);
10587 /* VF's mps must fit within hdev->mps */
10588 if (vport->vport_id && max_frm_size > hdev->mps) {
10589 mutex_unlock(&hdev->vport_lock);
10591 } else if (vport->vport_id) {
10592 vport->mps = max_frm_size;
10593 mutex_unlock(&hdev->vport_lock);
10597 /* PF's mps must be greater then VF's mps */
10598 for (i = 1; i < hdev->num_alloc_vport; i++)
10599 if (max_frm_size < hdev->vport[i].mps) {
10600 dev_err(&hdev->pdev->dev,
10601 "failed to set pf mtu for less than vport %d, mps = %u.\n",
10602 i, hdev->vport[i].mps);
10603 mutex_unlock(&hdev->vport_lock);
10607 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10609 ret = hclge_set_mac_mtu(hdev, max_frm_size);
10611 dev_err(&hdev->pdev->dev,
10612 "Change mtu fail, ret =%d\n", ret);
10616 hdev->mps = max_frm_size;
10617 vport->mps = max_frm_size;
10619 ret = hclge_buffer_alloc(hdev);
10621 dev_err(&hdev->pdev->dev,
10622 "Allocate buffer fail, ret =%d\n", ret);
10625 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10626 mutex_unlock(&hdev->vport_lock);
10630 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10633 struct hclge_reset_tqp_queue_cmd *req;
10634 struct hclge_desc desc;
10637 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10639 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10640 req->tqp_id = cpu_to_le16(queue_id);
10642 hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10644 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10646 dev_err(&hdev->pdev->dev,
10647 "Send tqp reset cmd error, status =%d\n", ret);
10654 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id,
10657 struct hclge_reset_tqp_queue_cmd *req;
10658 struct hclge_desc desc;
10661 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10663 req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10664 req->tqp_id = cpu_to_le16(queue_id);
10666 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10668 dev_err(&hdev->pdev->dev,
10669 "Get reset status error, status =%d\n", ret);
10673 *reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10678 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10680 struct hclge_comm_tqp *tqp;
10681 struct hnae3_queue *queue;
10683 queue = handle->kinfo.tqp[queue_id];
10684 tqp = container_of(queue, struct hclge_comm_tqp, q);
10689 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
10691 struct hclge_vport *vport = hclge_get_vport(handle);
10692 struct hclge_dev *hdev = vport->back;
10693 u16 reset_try_times = 0;
10699 for (i = 0; i < handle->kinfo.num_tqps; i++) {
10700 queue_gid = hclge_covert_handle_qid_global(handle, i);
10701 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10703 dev_err(&hdev->pdev->dev,
10704 "failed to send reset tqp cmd, ret = %d\n",
10709 while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10710 ret = hclge_get_reset_status(hdev, queue_gid,
10718 /* Wait for tqp hw reset */
10719 usleep_range(1000, 1200);
10722 if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10723 dev_err(&hdev->pdev->dev,
10724 "wait for tqp hw reset timeout\n");
10728 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10730 dev_err(&hdev->pdev->dev,
10731 "failed to deassert soft reset, ret = %d\n",
10735 reset_try_times = 0;
10740 static int hclge_reset_rcb(struct hnae3_handle *handle)
10742 #define HCLGE_RESET_RCB_NOT_SUPPORT 0U
10743 #define HCLGE_RESET_RCB_SUCCESS 1U
10745 struct hclge_vport *vport = hclge_get_vport(handle);
10746 struct hclge_dev *hdev = vport->back;
10747 struct hclge_reset_cmd *req;
10748 struct hclge_desc desc;
10753 queue_gid = hclge_covert_handle_qid_global(handle, 0);
10755 req = (struct hclge_reset_cmd *)desc.data;
10756 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
10757 hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
10758 req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
10759 req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
10761 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10763 dev_err(&hdev->pdev->dev,
10764 "failed to send rcb reset cmd, ret = %d\n", ret);
10768 return_status = req->fun_reset_rcb_return_status;
10769 if (return_status == HCLGE_RESET_RCB_SUCCESS)
10772 if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
10773 dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
10778 /* if reset rcb cmd is unsupported, we need to send reset tqp cmd
10779 * again to reset all tqps
10781 return hclge_reset_tqp_cmd(handle);
10784 int hclge_reset_tqp(struct hnae3_handle *handle)
10786 struct hclge_vport *vport = hclge_get_vport(handle);
10787 struct hclge_dev *hdev = vport->back;
10790 /* only need to disable PF's tqp */
10791 if (!vport->vport_id) {
10792 ret = hclge_tqp_enable(handle, false);
10794 dev_err(&hdev->pdev->dev,
10795 "failed to disable tqp, ret = %d\n", ret);
10800 return hclge_reset_rcb(handle);
10803 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10805 struct hclge_vport *vport = hclge_get_vport(handle);
10806 struct hclge_dev *hdev = vport->back;
10808 return hdev->fw_version;
10811 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10813 struct phy_device *phydev = hdev->hw.mac.phydev;
10818 phy_set_asym_pause(phydev, rx_en, tx_en);
10821 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10825 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10828 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10830 dev_err(&hdev->pdev->dev,
10831 "configure pauseparam error, ret = %d.\n", ret);
10836 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10838 struct phy_device *phydev = hdev->hw.mac.phydev;
10839 u16 remote_advertising = 0;
10840 u16 local_advertising;
10841 u32 rx_pause, tx_pause;
10847 if (!phydev->autoneg)
10848 return hclge_mac_pause_setup_hw(hdev);
10850 local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10853 remote_advertising = LPA_PAUSE_CAP;
10855 if (phydev->asym_pause)
10856 remote_advertising |= LPA_PAUSE_ASYM;
10858 flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10859 remote_advertising);
10860 tx_pause = flowctl & FLOW_CTRL_TX;
10861 rx_pause = flowctl & FLOW_CTRL_RX;
10863 if (phydev->duplex == HCLGE_MAC_HALF) {
10868 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10871 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10872 u32 *rx_en, u32 *tx_en)
10874 struct hclge_vport *vport = hclge_get_vport(handle);
10875 struct hclge_dev *hdev = vport->back;
10876 u8 media_type = hdev->hw.mac.media_type;
10878 *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
10879 hclge_get_autoneg(handle) : 0;
10881 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10887 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10890 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10893 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10902 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10903 u32 rx_en, u32 tx_en)
10905 if (rx_en && tx_en)
10906 hdev->fc_mode_last_time = HCLGE_FC_FULL;
10907 else if (rx_en && !tx_en)
10908 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10909 else if (!rx_en && tx_en)
10910 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10912 hdev->fc_mode_last_time = HCLGE_FC_NONE;
10914 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10917 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10918 u32 rx_en, u32 tx_en)
10920 struct hclge_vport *vport = hclge_get_vport(handle);
10921 struct hclge_dev *hdev = vport->back;
10922 struct phy_device *phydev = hdev->hw.mac.phydev;
10925 if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
10926 fc_autoneg = hclge_get_autoneg(handle);
10927 if (auto_neg != fc_autoneg) {
10928 dev_info(&hdev->pdev->dev,
10929 "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10930 return -EOPNOTSUPP;
10934 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10935 dev_info(&hdev->pdev->dev,
10936 "Priority flow control enabled. Cannot set link flow control.\n");
10937 return -EOPNOTSUPP;
10940 hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10942 hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10944 if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
10945 return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10948 return phy_start_aneg(phydev);
10950 return -EOPNOTSUPP;
10953 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10954 u8 *auto_neg, u32 *speed, u8 *duplex, u32 *lane_num)
10956 struct hclge_vport *vport = hclge_get_vport(handle);
10957 struct hclge_dev *hdev = vport->back;
10960 *speed = hdev->hw.mac.speed;
10962 *duplex = hdev->hw.mac.duplex;
10964 *auto_neg = hdev->hw.mac.autoneg;
10966 *lane_num = hdev->hw.mac.lane_num;
10969 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
10972 struct hclge_vport *vport = hclge_get_vport(handle);
10973 struct hclge_dev *hdev = vport->back;
10975 /* When nic is down, the service task is not running, doesn't update
10976 * the port information per second. Query the port information before
10977 * return the media type, ensure getting the correct media information.
10979 hclge_update_port_info(hdev);
10982 *media_type = hdev->hw.mac.media_type;
10985 *module_type = hdev->hw.mac.module_type;
10988 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
10989 u8 *tp_mdix_ctrl, u8 *tp_mdix)
10991 struct hclge_vport *vport = hclge_get_vport(handle);
10992 struct hclge_dev *hdev = vport->back;
10993 struct phy_device *phydev = hdev->hw.mac.phydev;
10994 int mdix_ctrl, mdix, is_resolved;
10995 unsigned int retval;
10998 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10999 *tp_mdix = ETH_TP_MDI_INVALID;
11003 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
11005 retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
11006 mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
11007 HCLGE_PHY_MDIX_CTRL_S);
11009 retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
11010 mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
11011 is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
11013 phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
11015 switch (mdix_ctrl) {
11017 *tp_mdix_ctrl = ETH_TP_MDI;
11020 *tp_mdix_ctrl = ETH_TP_MDI_X;
11023 *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
11026 *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
11031 *tp_mdix = ETH_TP_MDI_INVALID;
11033 *tp_mdix = ETH_TP_MDI_X;
11035 *tp_mdix = ETH_TP_MDI;
11038 static void hclge_info_show(struct hclge_dev *hdev)
11040 struct hnae3_handle *handle = &hdev->vport->nic;
11041 struct device *dev = &hdev->pdev->dev;
11043 dev_info(dev, "PF info begin:\n");
11045 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
11046 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
11047 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
11048 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
11049 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
11050 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
11051 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
11052 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
11053 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
11054 dev_info(dev, "This is %s PF\n",
11055 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
11056 dev_info(dev, "DCB %s\n",
11057 handle->kinfo.tc_info.dcb_ets_active ? "enable" : "disable");
11058 dev_info(dev, "MQPRIO %s\n",
11059 handle->kinfo.tc_info.mqprio_active ? "enable" : "disable");
11060 dev_info(dev, "Default tx spare buffer size: %u\n",
11061 hdev->tx_spare_buf_size);
11063 dev_info(dev, "PF info end.\n");
11066 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
11067 struct hclge_vport *vport)
11069 struct hnae3_client *client = vport->nic.client;
11070 struct hclge_dev *hdev = ae_dev->priv;
11071 int rst_cnt = hdev->rst_stats.reset_cnt;
11074 ret = client->ops->init_instance(&vport->nic);
11078 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11079 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11080 rst_cnt != hdev->rst_stats.reset_cnt) {
11085 /* Enable nic hw error interrupts */
11086 ret = hclge_config_nic_hw_error(hdev, true);
11088 dev_err(&ae_dev->pdev->dev,
11089 "fail(%d) to enable hw error interrupts\n", ret);
11093 hnae3_set_client_init_flag(client, ae_dev, 1);
11095 if (netif_msg_drv(&hdev->vport->nic))
11096 hclge_info_show(hdev);
11101 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11102 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11103 msleep(HCLGE_WAIT_RESET_DONE);
11105 client->ops->uninit_instance(&vport->nic, 0);
11110 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
11111 struct hclge_vport *vport)
11113 struct hclge_dev *hdev = ae_dev->priv;
11114 struct hnae3_client *client;
11118 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
11122 client = hdev->roce_client;
11123 ret = hclge_init_roce_base_info(vport);
11127 rst_cnt = hdev->rst_stats.reset_cnt;
11128 ret = client->ops->init_instance(&vport->roce);
11132 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11133 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
11134 rst_cnt != hdev->rst_stats.reset_cnt) {
11136 goto init_roce_err;
11139 /* Enable roce ras interrupts */
11140 ret = hclge_config_rocee_ras_interrupt(hdev, true);
11142 dev_err(&ae_dev->pdev->dev,
11143 "fail(%d) to enable roce ras interrupts\n", ret);
11144 goto init_roce_err;
11147 hnae3_set_client_init_flag(client, ae_dev, 1);
11152 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11153 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11154 msleep(HCLGE_WAIT_RESET_DONE);
11156 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11161 static int hclge_init_client_instance(struct hnae3_client *client,
11162 struct hnae3_ae_dev *ae_dev)
11164 struct hclge_dev *hdev = ae_dev->priv;
11165 struct hclge_vport *vport = &hdev->vport[0];
11168 switch (client->type) {
11169 case HNAE3_CLIENT_KNIC:
11170 hdev->nic_client = client;
11171 vport->nic.client = client;
11172 ret = hclge_init_nic_client_instance(ae_dev, vport);
11176 ret = hclge_init_roce_client_instance(ae_dev, vport);
11181 case HNAE3_CLIENT_ROCE:
11182 if (hnae3_dev_roce_supported(hdev)) {
11183 hdev->roce_client = client;
11184 vport->roce.client = client;
11187 ret = hclge_init_roce_client_instance(ae_dev, vport);
11199 hdev->nic_client = NULL;
11200 vport->nic.client = NULL;
11203 hdev->roce_client = NULL;
11204 vport->roce.client = NULL;
11208 static void hclge_uninit_client_instance(struct hnae3_client *client,
11209 struct hnae3_ae_dev *ae_dev)
11211 struct hclge_dev *hdev = ae_dev->priv;
11212 struct hclge_vport *vport = &hdev->vport[0];
11214 if (hdev->roce_client) {
11215 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11216 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11217 msleep(HCLGE_WAIT_RESET_DONE);
11219 hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11220 hdev->roce_client = NULL;
11221 vport->roce.client = NULL;
11223 if (client->type == HNAE3_CLIENT_ROCE)
11225 if (hdev->nic_client && client->ops->uninit_instance) {
11226 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11227 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11228 msleep(HCLGE_WAIT_RESET_DONE);
11230 client->ops->uninit_instance(&vport->nic, 0);
11231 hdev->nic_client = NULL;
11232 vport->nic.client = NULL;
11236 static int hclge_dev_mem_map(struct hclge_dev *hdev)
11238 struct pci_dev *pdev = hdev->pdev;
11239 struct hclge_hw *hw = &hdev->hw;
11241 /* for device does not have device memory, return directly */
11242 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
11246 devm_ioremap_wc(&pdev->dev,
11247 pci_resource_start(pdev, HCLGE_MEM_BAR),
11248 pci_resource_len(pdev, HCLGE_MEM_BAR));
11249 if (!hw->hw.mem_base) {
11250 dev_err(&pdev->dev, "failed to map device memory\n");
11257 static int hclge_pci_init(struct hclge_dev *hdev)
11259 struct pci_dev *pdev = hdev->pdev;
11260 struct hclge_hw *hw;
11263 ret = pci_enable_device(pdev);
11265 dev_err(&pdev->dev, "failed to enable PCI device\n");
11269 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11271 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11273 dev_err(&pdev->dev,
11274 "can't set consistent PCI DMA");
11275 goto err_disable_device;
11277 dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
11280 ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
11282 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
11283 goto err_disable_device;
11286 pci_set_master(pdev);
11288 hw->hw.io_base = pcim_iomap(pdev, 2, 0);
11289 if (!hw->hw.io_base) {
11290 dev_err(&pdev->dev, "Can't map configuration register space\n");
11292 goto err_release_regions;
11295 ret = hclge_dev_mem_map(hdev);
11297 goto err_unmap_io_base;
11299 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11304 pcim_iounmap(pdev, hdev->hw.hw.io_base);
11305 err_release_regions:
11306 pci_release_regions(pdev);
11307 err_disable_device:
11308 pci_disable_device(pdev);
11313 static void hclge_pci_uninit(struct hclge_dev *hdev)
11315 struct pci_dev *pdev = hdev->pdev;
11317 if (hdev->hw.hw.mem_base)
11318 devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base);
11320 pcim_iounmap(pdev, hdev->hw.hw.io_base);
11321 pci_free_irq_vectors(pdev);
11322 pci_release_mem_regions(pdev);
11323 pci_disable_device(pdev);
11326 static void hclge_state_init(struct hclge_dev *hdev)
11328 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11329 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11330 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11331 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11332 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11333 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11334 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11337 static void hclge_state_uninit(struct hclge_dev *hdev)
11339 set_bit(HCLGE_STATE_DOWN, &hdev->state);
11340 set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11342 if (hdev->reset_timer.function)
11343 del_timer_sync(&hdev->reset_timer);
11344 if (hdev->service_task.work.func)
11345 cancel_delayed_work_sync(&hdev->service_task);
11348 static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
11349 enum hnae3_reset_type rst_type)
11351 #define HCLGE_RESET_RETRY_WAIT_MS 500
11352 #define HCLGE_RESET_RETRY_CNT 5
11354 struct hclge_dev *hdev = ae_dev->priv;
11358 while (retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
11359 down(&hdev->reset_sem);
11360 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11361 hdev->reset_type = rst_type;
11362 ret = hclge_reset_prepare(hdev);
11363 if (!ret && !hdev->reset_pending)
11366 dev_err(&hdev->pdev->dev,
11367 "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n",
11368 ret, hdev->reset_pending, retry_cnt);
11369 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11370 up(&hdev->reset_sem);
11371 msleep(HCLGE_RESET_RETRY_WAIT_MS);
11374 /* disable misc vector before reset done */
11375 hclge_enable_vector(&hdev->misc_vector, false);
11376 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
11378 if (hdev->reset_type == HNAE3_FLR_RESET)
11379 hdev->rst_stats.flr_rst_cnt++;
11382 static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
11384 struct hclge_dev *hdev = ae_dev->priv;
11387 hclge_enable_vector(&hdev->misc_vector, true);
11389 ret = hclge_reset_rebuild(hdev);
11391 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11393 hdev->reset_type = HNAE3_NONE_RESET;
11394 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11395 up(&hdev->reset_sem);
11398 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11402 for (i = 0; i < hdev->num_alloc_vport; i++) {
11403 struct hclge_vport *vport = &hdev->vport[i];
11406 /* Send cmd to clear vport's FUNC_RST_ING */
11407 ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11409 dev_warn(&hdev->pdev->dev,
11410 "clear vport(%u) rst failed %d!\n",
11411 vport->vport_id, ret);
11415 static int hclge_clear_hw_resource(struct hclge_dev *hdev)
11417 struct hclge_desc desc;
11420 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
11422 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11423 /* This new command is only supported by new firmware, it will
11424 * fail with older firmware. Error value -EOPNOSUPP can only be
11425 * returned by older firmware running this command, to keep code
11426 * backward compatible we will override this value and return
11429 if (ret && ret != -EOPNOTSUPP) {
11430 dev_err(&hdev->pdev->dev,
11431 "failed to clear hw resource, ret = %d\n", ret);
11437 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
11439 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11440 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
11443 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
11445 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11446 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
11449 static struct hclge_wol_info *hclge_get_wol_info(struct hnae3_handle *handle)
11451 struct hclge_vport *vport = hclge_get_vport(handle);
11453 return &vport->back->hw.mac.wol;
11456 static int hclge_get_wol_supported_mode(struct hclge_dev *hdev,
11457 u32 *wol_supported)
11459 struct hclge_query_wol_supported_cmd *wol_supported_cmd;
11460 struct hclge_desc desc;
11463 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_WOL_GET_SUPPORTED_MODE,
11465 wol_supported_cmd = (struct hclge_query_wol_supported_cmd *)desc.data;
11467 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11469 dev_err(&hdev->pdev->dev,
11470 "failed to query wol supported, ret = %d\n", ret);
11474 *wol_supported = le32_to_cpu(wol_supported_cmd->supported_wake_mode);
11479 static int hclge_set_wol_cfg(struct hclge_dev *hdev,
11480 struct hclge_wol_info *wol_info)
11482 struct hclge_wol_cfg_cmd *wol_cfg_cmd;
11483 struct hclge_desc desc;
11486 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_WOL_CFG, false);
11487 wol_cfg_cmd = (struct hclge_wol_cfg_cmd *)desc.data;
11488 wol_cfg_cmd->wake_on_lan_mode = cpu_to_le32(wol_info->wol_current_mode);
11489 wol_cfg_cmd->sopass_size = wol_info->wol_sopass_size;
11490 memcpy(wol_cfg_cmd->sopass, wol_info->wol_sopass, SOPASS_MAX);
11492 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11494 dev_err(&hdev->pdev->dev,
11495 "failed to set wol config, ret = %d\n", ret);
11500 static int hclge_update_wol(struct hclge_dev *hdev)
11502 struct hclge_wol_info *wol_info = &hdev->hw.mac.wol;
11504 if (!hnae3_ae_dev_wol_supported(hdev->ae_dev))
11507 return hclge_set_wol_cfg(hdev, wol_info);
11510 static int hclge_init_wol(struct hclge_dev *hdev)
11512 struct hclge_wol_info *wol_info = &hdev->hw.mac.wol;
11515 if (!hnae3_ae_dev_wol_supported(hdev->ae_dev))
11518 memset(wol_info, 0, sizeof(struct hclge_wol_info));
11519 ret = hclge_get_wol_supported_mode(hdev,
11520 &wol_info->wol_support_mode);
11522 wol_info->wol_support_mode = 0;
11526 return hclge_update_wol(hdev);
11529 static void hclge_get_wol(struct hnae3_handle *handle,
11530 struct ethtool_wolinfo *wol)
11532 struct hclge_wol_info *wol_info = hclge_get_wol_info(handle);
11534 wol->supported = wol_info->wol_support_mode;
11535 wol->wolopts = wol_info->wol_current_mode;
11536 if (wol_info->wol_current_mode & WAKE_MAGICSECURE)
11537 memcpy(wol->sopass, wol_info->wol_sopass, SOPASS_MAX);
11540 static int hclge_set_wol(struct hnae3_handle *handle,
11541 struct ethtool_wolinfo *wol)
11543 struct hclge_wol_info *wol_info = hclge_get_wol_info(handle);
11544 struct hclge_vport *vport = hclge_get_vport(handle);
11548 wol_mode = wol->wolopts;
11549 if (wol_mode & ~wol_info->wol_support_mode)
11552 wol_info->wol_current_mode = wol_mode;
11553 if (wol_mode & WAKE_MAGICSECURE) {
11554 memcpy(wol_info->wol_sopass, wol->sopass, SOPASS_MAX);
11555 wol_info->wol_sopass_size = SOPASS_MAX;
11557 wol_info->wol_sopass_size = 0;
11560 ret = hclge_set_wol_cfg(vport->back, wol_info);
11562 wol_info->wol_current_mode = 0;
11567 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11569 struct pci_dev *pdev = ae_dev->pdev;
11570 struct hclge_dev *hdev;
11573 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11578 hdev->ae_dev = ae_dev;
11579 hdev->reset_type = HNAE3_NONE_RESET;
11580 hdev->reset_level = HNAE3_FUNC_RESET;
11581 ae_dev->priv = hdev;
11583 /* HW supprt 2 layer vlan */
11584 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11586 mutex_init(&hdev->vport_lock);
11587 spin_lock_init(&hdev->fd_rule_lock);
11588 sema_init(&hdev->reset_sem, 1);
11590 ret = hclge_pci_init(hdev);
11594 ret = hclge_devlink_init(hdev);
11596 goto err_pci_uninit;
11598 /* Firmware command queue initialize */
11599 ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
11601 goto err_devlink_uninit;
11603 /* Firmware command initialize */
11604 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version,
11605 true, hdev->reset_pending);
11607 goto err_cmd_uninit;
11609 ret = hclge_clear_hw_resource(hdev);
11611 goto err_cmd_uninit;
11613 ret = hclge_get_cap(hdev);
11615 goto err_cmd_uninit;
11617 ret = hclge_query_dev_specs(hdev);
11619 dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11621 goto err_cmd_uninit;
11624 ret = hclge_configure(hdev);
11626 dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11627 goto err_cmd_uninit;
11630 ret = hclge_init_msi(hdev);
11632 dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11633 goto err_cmd_uninit;
11636 ret = hclge_misc_irq_init(hdev);
11638 goto err_msi_uninit;
11640 ret = hclge_alloc_tqps(hdev);
11642 dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11643 goto err_msi_irq_uninit;
11646 ret = hclge_alloc_vport(hdev);
11648 goto err_msi_irq_uninit;
11650 ret = hclge_map_tqp(hdev);
11652 goto err_msi_irq_uninit;
11654 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
11655 if (hnae3_dev_phy_imp_supported(hdev))
11656 ret = hclge_update_tp_port_info(hdev);
11658 ret = hclge_mac_mdio_config(hdev);
11661 goto err_msi_irq_uninit;
11664 ret = hclge_init_umv_space(hdev);
11666 goto err_mdiobus_unreg;
11668 ret = hclge_mac_init(hdev);
11670 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11671 goto err_mdiobus_unreg;
11674 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11676 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11677 goto err_mdiobus_unreg;
11680 ret = hclge_config_gro(hdev);
11682 goto err_mdiobus_unreg;
11684 ret = hclge_init_vlan_config(hdev);
11686 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11687 goto err_mdiobus_unreg;
11690 ret = hclge_tm_schd_init(hdev);
11692 dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11693 goto err_mdiobus_unreg;
11696 ret = hclge_comm_rss_init_cfg(&hdev->vport->nic, hdev->ae_dev,
11699 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11700 goto err_mdiobus_unreg;
11703 ret = hclge_rss_init_hw(hdev);
11705 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11706 goto err_mdiobus_unreg;
11709 ret = init_mgr_tbl(hdev);
11711 dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11712 goto err_mdiobus_unreg;
11715 ret = hclge_init_fd_config(hdev);
11717 dev_err(&pdev->dev,
11718 "fd table init fail, ret=%d\n", ret);
11719 goto err_mdiobus_unreg;
11722 ret = hclge_ptp_init(hdev);
11724 goto err_mdiobus_unreg;
11726 ret = hclge_update_port_info(hdev);
11728 goto err_mdiobus_unreg;
11730 INIT_KFIFO(hdev->mac_tnl_log);
11732 hclge_dcb_ops_set(hdev);
11734 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11735 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11737 hclge_clear_all_event_cause(hdev);
11738 hclge_clear_resetting_state(hdev);
11740 /* Log and clear the hw errors those already occurred */
11741 if (hnae3_dev_ras_imp_supported(hdev))
11742 hclge_handle_occurred_error(hdev);
11744 hclge_handle_all_hns_hw_errors(ae_dev);
11746 /* request delayed reset for the error recovery because an immediate
11747 * global reset on a PF affecting pending initialization of other PFs
11749 if (ae_dev->hw_err_reset_req) {
11750 enum hnae3_reset_type reset_level;
11752 reset_level = hclge_get_reset_level(ae_dev,
11753 &ae_dev->hw_err_reset_req);
11754 hclge_set_def_reset_request(ae_dev, reset_level);
11755 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11758 hclge_init_rxd_adv_layout(hdev);
11760 /* Enable MISC vector(vector0) */
11761 hclge_enable_vector(&hdev->misc_vector, true);
11763 ret = hclge_init_wol(hdev);
11765 dev_warn(&pdev->dev,
11766 "failed to wake on lan init, ret = %d\n", ret);
11768 hclge_state_init(hdev);
11769 hdev->last_reset_time = jiffies;
11771 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11772 HCLGE_DRIVER_NAME);
11774 hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11779 if (hdev->hw.mac.phydev)
11780 mdiobus_unregister(hdev->hw.mac.mdio_bus);
11781 err_msi_irq_uninit:
11782 hclge_misc_irq_uninit(hdev);
11784 pci_free_irq_vectors(pdev);
11786 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
11787 err_devlink_uninit:
11788 hclge_devlink_uninit(hdev);
11790 pcim_iounmap(pdev, hdev->hw.hw.io_base);
11791 pci_release_regions(pdev);
11792 pci_disable_device(pdev);
11794 mutex_destroy(&hdev->vport_lock);
11798 static void hclge_stats_clear(struct hclge_dev *hdev)
11800 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11801 memset(&hdev->fec_stats, 0, sizeof(hdev->fec_stats));
11804 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11806 return hclge_config_switch_param(hdev, vf, enable,
11807 HCLGE_SWITCH_ANTI_SPOOF_MASK);
11810 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11812 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11813 HCLGE_FILTER_FE_NIC_INGRESS_B,
11817 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11821 ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11823 dev_err(&hdev->pdev->dev,
11824 "Set vf %d mac spoof check %s failed, ret=%d\n",
11825 vf, enable ? "on" : "off", ret);
11829 ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11831 dev_err(&hdev->pdev->dev,
11832 "Set vf %d vlan spoof check %s failed, ret=%d\n",
11833 vf, enable ? "on" : "off", ret);
11838 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11841 struct hclge_vport *vport = hclge_get_vport(handle);
11842 struct hclge_dev *hdev = vport->back;
11843 u32 new_spoofchk = enable ? 1 : 0;
11846 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11847 return -EOPNOTSUPP;
11849 vport = hclge_get_vf_vport(hdev, vf);
11853 if (vport->vf_info.spoofchk == new_spoofchk)
11856 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11857 dev_warn(&hdev->pdev->dev,
11858 "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11860 else if (enable && hclge_is_umv_space_full(vport, true))
11861 dev_warn(&hdev->pdev->dev,
11862 "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11865 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11869 vport->vf_info.spoofchk = new_spoofchk;
11873 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11875 struct hclge_vport *vport = hdev->vport;
11879 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11882 /* resume the vf spoof check state after reset */
11883 for (i = 0; i < hdev->num_alloc_vport; i++) {
11884 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11885 vport->vf_info.spoofchk);
11895 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11897 struct hclge_vport *vport = hclge_get_vport(handle);
11898 struct hclge_dev *hdev = vport->back;
11899 u32 new_trusted = enable ? 1 : 0;
11901 vport = hclge_get_vf_vport(hdev, vf);
11905 if (vport->vf_info.trusted == new_trusted)
11908 vport->vf_info.trusted = new_trusted;
11909 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
11910 hclge_task_schedule(hdev, 0);
11915 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11920 /* reset vf rate to default value */
11921 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11922 struct hclge_vport *vport = &hdev->vport[vf];
11924 vport->vf_info.max_tx_rate = 0;
11925 ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11927 dev_err(&hdev->pdev->dev,
11928 "vf%d failed to reset to default, ret=%d\n",
11929 vf - HCLGE_VF_VPORT_START_NUM, ret);
11933 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11934 int min_tx_rate, int max_tx_rate)
11936 if (min_tx_rate != 0 ||
11937 max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11938 dev_err(&hdev->pdev->dev,
11939 "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11940 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11947 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
11948 int min_tx_rate, int max_tx_rate, bool force)
11950 struct hclge_vport *vport = hclge_get_vport(handle);
11951 struct hclge_dev *hdev = vport->back;
11954 ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
11958 vport = hclge_get_vf_vport(hdev, vf);
11962 if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
11965 ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
11969 vport->vf_info.max_tx_rate = max_tx_rate;
11974 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
11976 struct hnae3_handle *handle = &hdev->vport->nic;
11977 struct hclge_vport *vport;
11981 /* resume the vf max_tx_rate after reset */
11982 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
11983 vport = hclge_get_vf_vport(hdev, vf);
11987 /* zero means max rate, after reset, firmware already set it to
11988 * max rate, so just continue.
11990 if (!vport->vf_info.max_tx_rate)
11993 ret = hclge_set_vf_rate(handle, vf, 0,
11994 vport->vf_info.max_tx_rate, true);
11996 dev_err(&hdev->pdev->dev,
11997 "vf%d failed to resume tx_rate:%u, ret=%d\n",
11998 vf, vport->vf_info.max_tx_rate, ret);
12006 static void hclge_reset_vport_state(struct hclge_dev *hdev)
12008 struct hclge_vport *vport = hdev->vport;
12011 for (i = 0; i < hdev->num_alloc_vport; i++) {
12012 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
12017 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
12019 struct hclge_dev *hdev = ae_dev->priv;
12020 struct pci_dev *pdev = ae_dev->pdev;
12023 set_bit(HCLGE_STATE_DOWN, &hdev->state);
12025 hclge_stats_clear(hdev);
12026 /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
12027 * so here should not clean table in memory.
12029 if (hdev->reset_type == HNAE3_IMP_RESET ||
12030 hdev->reset_type == HNAE3_GLOBAL_RESET) {
12031 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
12032 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
12033 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
12034 hclge_reset_umv_space(hdev);
12037 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version,
12038 true, hdev->reset_pending);
12040 dev_err(&pdev->dev, "Cmd queue init failed\n");
12044 ret = hclge_map_tqp(hdev);
12046 dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
12050 ret = hclge_mac_init(hdev);
12052 dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
12056 ret = hclge_tp_port_init(hdev);
12058 dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
12063 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
12065 dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
12069 ret = hclge_config_gro(hdev);
12073 ret = hclge_init_vlan_config(hdev);
12075 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
12079 ret = hclge_tm_init_hw(hdev, true);
12081 dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
12085 ret = hclge_rss_init_hw(hdev);
12087 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
12091 ret = init_mgr_tbl(hdev);
12093 dev_err(&pdev->dev,
12094 "failed to reinit manager table, ret = %d\n", ret);
12098 ret = hclge_init_fd_config(hdev);
12100 dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
12104 ret = hclge_ptp_init(hdev);
12108 /* Log and clear the hw errors those already occurred */
12109 if (hnae3_dev_ras_imp_supported(hdev))
12110 hclge_handle_occurred_error(hdev);
12112 hclge_handle_all_hns_hw_errors(ae_dev);
12114 /* Re-enable the hw error interrupts because
12115 * the interrupts get disabled on global reset.
12117 ret = hclge_config_nic_hw_error(hdev, true);
12119 dev_err(&pdev->dev,
12120 "fail(%d) to re-enable NIC hw error interrupts\n",
12125 if (hdev->roce_client) {
12126 ret = hclge_config_rocee_ras_interrupt(hdev, true);
12128 dev_err(&pdev->dev,
12129 "fail(%d) to re-enable roce ras interrupts\n",
12135 hclge_reset_vport_state(hdev);
12136 ret = hclge_reset_vport_spoofchk(hdev);
12140 ret = hclge_resume_vf_rate(hdev);
12144 hclge_init_rxd_adv_layout(hdev);
12146 ret = hclge_update_wol(hdev);
12148 dev_warn(&pdev->dev,
12149 "failed to update wol config, ret = %d\n", ret);
12151 dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
12152 HCLGE_DRIVER_NAME);
12157 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
12159 struct hclge_dev *hdev = ae_dev->priv;
12160 struct hclge_mac *mac = &hdev->hw.mac;
12162 hclge_reset_vf_rate(hdev);
12163 hclge_clear_vf_vlan(hdev);
12164 hclge_state_uninit(hdev);
12165 hclge_ptp_uninit(hdev);
12166 hclge_uninit_rxd_adv_layout(hdev);
12167 hclge_uninit_mac_table(hdev);
12168 hclge_del_all_fd_entries(hdev);
12171 mdiobus_unregister(mac->mdio_bus);
12173 /* Disable MISC vector(vector0) */
12174 hclge_enable_vector(&hdev->misc_vector, false);
12175 synchronize_irq(hdev->misc_vector.vector_irq);
12177 /* Disable all hw interrupts */
12178 hclge_config_mac_tnl_int(hdev, false);
12179 hclge_config_nic_hw_error(hdev, false);
12180 hclge_config_rocee_ras_interrupt(hdev, false);
12182 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
12183 hclge_misc_irq_uninit(hdev);
12184 hclge_devlink_uninit(hdev);
12185 hclge_pci_uninit(hdev);
12186 hclge_uninit_vport_vlan_table(hdev);
12187 mutex_destroy(&hdev->vport_lock);
12188 ae_dev->priv = NULL;
12191 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
12193 struct hclge_vport *vport = hclge_get_vport(handle);
12194 struct hclge_dev *hdev = vport->back;
12196 return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
12199 static void hclge_get_channels(struct hnae3_handle *handle,
12200 struct ethtool_channels *ch)
12202 ch->max_combined = hclge_get_max_channels(handle);
12203 ch->other_count = 1;
12205 ch->combined_count = handle->kinfo.rss_size;
12208 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
12209 u16 *alloc_tqps, u16 *max_rss_size)
12211 struct hclge_vport *vport = hclge_get_vport(handle);
12212 struct hclge_dev *hdev = vport->back;
12214 *alloc_tqps = vport->alloc_tqps;
12215 *max_rss_size = hdev->pf_rss_size_max;
12218 static int hclge_set_rss_tc_mode_cfg(struct hnae3_handle *handle)
12220 struct hclge_vport *vport = hclge_get_vport(handle);
12221 u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
12222 struct hclge_dev *hdev = vport->back;
12223 u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
12224 u16 tc_valid[HCLGE_MAX_TC_NUM];
12228 roundup_size = roundup_pow_of_two(vport->nic.kinfo.rss_size);
12229 roundup_size = ilog2(roundup_size);
12230 /* Set the RSS TC mode according to the new RSS size */
12231 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
12234 if (!(hdev->hw_tc_map & BIT(i)))
12238 tc_size[i] = roundup_size;
12239 tc_offset[i] = vport->nic.kinfo.rss_size * i;
12242 return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid,
12246 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
12247 bool rxfh_configured)
12249 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
12250 struct hclge_vport *vport = hclge_get_vport(handle);
12251 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
12252 struct hclge_dev *hdev = vport->back;
12253 u16 cur_rss_size = kinfo->rss_size;
12254 u16 cur_tqps = kinfo->num_tqps;
12259 kinfo->req_rss_size = new_tqps_num;
12261 ret = hclge_tm_vport_map_update(hdev);
12263 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
12267 ret = hclge_set_rss_tc_mode_cfg(handle);
12271 /* RSS indirection table has been configured by user */
12272 if (rxfh_configured)
12275 /* Reinitializes the rss indirect table according to the new RSS size */
12276 rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
12281 for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
12282 rss_indir[i] = i % kinfo->rss_size;
12284 ret = hclge_set_rss(handle, rss_indir, NULL, 0);
12286 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
12293 dev_info(&hdev->pdev->dev,
12294 "Channels changed, rss_size from %u to %u, tqps from %u to %u",
12295 cur_rss_size, kinfo->rss_size,
12296 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
12301 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12303 struct hclge_set_led_state_cmd *req;
12304 struct hclge_desc desc;
12307 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12309 req = (struct hclge_set_led_state_cmd *)desc.data;
12310 hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12311 HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12313 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12315 dev_err(&hdev->pdev->dev,
12316 "Send set led state cmd error, ret =%d\n", ret);
12321 enum hclge_led_status {
12324 HCLGE_LED_NO_CHANGE = 0xFF,
12327 static int hclge_set_led_id(struct hnae3_handle *handle,
12328 enum ethtool_phys_id_state status)
12330 struct hclge_vport *vport = hclge_get_vport(handle);
12331 struct hclge_dev *hdev = vport->back;
12334 case ETHTOOL_ID_ACTIVE:
12335 return hclge_set_led_status(hdev, HCLGE_LED_ON);
12336 case ETHTOOL_ID_INACTIVE:
12337 return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12343 static void hclge_get_link_mode(struct hnae3_handle *handle,
12344 unsigned long *supported,
12345 unsigned long *advertising)
12347 unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12348 struct hclge_vport *vport = hclge_get_vport(handle);
12349 struct hclge_dev *hdev = vport->back;
12350 unsigned int idx = 0;
12352 for (; idx < size; idx++) {
12353 supported[idx] = hdev->hw.mac.supported[idx];
12354 advertising[idx] = hdev->hw.mac.advertising[idx];
12358 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12360 struct hclge_vport *vport = hclge_get_vport(handle);
12361 struct hclge_dev *hdev = vport->back;
12362 bool gro_en_old = hdev->gro_en;
12365 hdev->gro_en = enable;
12366 ret = hclge_config_gro(hdev);
12368 hdev->gro_en = gro_en_old;
12373 static int hclge_sync_vport_promisc_mode(struct hclge_vport *vport)
12375 struct hnae3_handle *handle = &vport->nic;
12376 struct hclge_dev *hdev = vport->back;
12377 bool uc_en = false;
12378 bool mc_en = false;
12383 if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12384 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12385 vport->last_promisc_flags = vport->overflow_promisc_flags;
12388 if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12393 if (!vport->vport_id) {
12394 tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12395 ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12396 tmp_flags & HNAE3_MPE);
12398 set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
12401 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12407 if (vport->vf_info.trusted) {
12408 uc_en = vport->vf_info.request_uc_en > 0 ||
12409 vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE;
12410 mc_en = vport->vf_info.request_mc_en > 0 ||
12411 vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE;
12413 bc_en = vport->vf_info.request_bc_en > 0;
12415 ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
12418 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12421 hclge_set_vport_vlan_fltr_change(vport);
12426 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12428 struct hclge_vport *vport;
12432 for (i = 0; i < hdev->num_alloc_vport; i++) {
12433 vport = &hdev->vport[i];
12435 ret = hclge_sync_vport_promisc_mode(vport);
12441 static bool hclge_module_existed(struct hclge_dev *hdev)
12443 struct hclge_desc desc;
12447 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12448 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12450 dev_err(&hdev->pdev->dev,
12451 "failed to get SFP exist state, ret = %d\n", ret);
12455 existed = le32_to_cpu(desc.data[0]);
12457 return existed != 0;
12460 /* need 6 bds(total 140 bytes) in one reading
12461 * return the number of bytes actually read, 0 means read failed.
12463 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12466 struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12467 struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12473 /* setup all 6 bds to read module eeprom info. */
12474 for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12475 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12478 /* bd0~bd4 need next flag */
12479 if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12480 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
12483 /* setup bd0, this bd contains offset and read length. */
12484 sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12485 sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12486 read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12487 sfp_info_bd0->read_len = cpu_to_le16(read_len);
12489 ret = hclge_cmd_send(&hdev->hw, desc, i);
12491 dev_err(&hdev->pdev->dev,
12492 "failed to get SFP eeprom info, ret = %d\n", ret);
12496 /* copy sfp info from bd0 to out buffer. */
12497 copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12498 memcpy(data, sfp_info_bd0->data, copy_len);
12499 read_len = copy_len;
12501 /* copy sfp info from bd1~bd5 to out buffer if needed. */
12502 for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12503 if (read_len >= len)
12506 copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12507 memcpy(data + read_len, desc[i].data, copy_len);
12508 read_len += copy_len;
12514 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12517 struct hclge_vport *vport = hclge_get_vport(handle);
12518 struct hclge_dev *hdev = vport->back;
12522 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12523 return -EOPNOTSUPP;
12525 if (!hclge_module_existed(hdev))
12528 while (read_len < len) {
12529 data_len = hclge_get_sfp_eeprom_info(hdev,
12536 read_len += data_len;
12542 static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle,
12545 struct hclge_vport *vport = hclge_get_vport(handle);
12546 struct hclge_dev *hdev = vport->back;
12547 struct hclge_desc desc;
12550 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2)
12551 return -EOPNOTSUPP;
12553 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true);
12554 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12556 dev_err(&hdev->pdev->dev,
12557 "failed to query link diagnosis info, ret = %d\n", ret);
12561 *status_code = le32_to_cpu(desc.data[0]);
12565 /* After disable sriov, VF still has some config and info need clean,
12566 * which configed by PF.
12568 static void hclge_clear_vport_vf_info(struct hclge_vport *vport, int vfid)
12570 struct hclge_dev *hdev = vport->back;
12571 struct hclge_vlan_info vlan_info;
12574 clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state);
12575 clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
12576 vport->need_notify = 0;
12579 /* after disable sriov, clean VF rate configured by PF */
12580 ret = hclge_tm_qs_shaper_cfg(vport, 0);
12582 dev_err(&hdev->pdev->dev,
12583 "failed to clean vf%d rate config, ret = %d\n",
12586 vlan_info.vlan_tag = 0;
12588 vlan_info.vlan_proto = ETH_P_8021Q;
12589 ret = hclge_update_port_base_vlan_cfg(vport,
12590 HNAE3_PORT_BASE_VLAN_DISABLE,
12593 dev_err(&hdev->pdev->dev,
12594 "failed to clean vf%d port base vlan, ret = %d\n",
12597 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, false);
12599 dev_err(&hdev->pdev->dev,
12600 "failed to clean vf%d spoof config, ret = %d\n",
12603 memset(&vport->vf_info, 0, sizeof(vport->vf_info));
12606 static void hclge_clean_vport_config(struct hnae3_ae_dev *ae_dev, int num_vfs)
12608 struct hclge_dev *hdev = ae_dev->priv;
12609 struct hclge_vport *vport;
12612 for (i = 0; i < num_vfs; i++) {
12613 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
12615 hclge_clear_vport_vf_info(vport, i);
12619 static int hclge_get_dscp_prio(struct hnae3_handle *h, u8 dscp, u8 *tc_mode,
12622 struct hclge_vport *vport = hclge_get_vport(h);
12624 if (dscp >= HNAE3_MAX_DSCP)
12628 *tc_mode = vport->nic.kinfo.tc_map_mode;
12630 *priority = vport->nic.kinfo.dscp_prio[dscp] == HNAE3_PRIO_ID_INVALID ? 0 :
12631 vport->nic.kinfo.dscp_prio[dscp];
12636 static const struct hnae3_ae_ops hclge_ops = {
12637 .init_ae_dev = hclge_init_ae_dev,
12638 .uninit_ae_dev = hclge_uninit_ae_dev,
12639 .reset_prepare = hclge_reset_prepare_general,
12640 .reset_done = hclge_reset_done,
12641 .init_client_instance = hclge_init_client_instance,
12642 .uninit_client_instance = hclge_uninit_client_instance,
12643 .map_ring_to_vector = hclge_map_ring_to_vector,
12644 .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12645 .get_vector = hclge_get_vector,
12646 .put_vector = hclge_put_vector,
12647 .set_promisc_mode = hclge_set_promisc_mode,
12648 .request_update_promisc_mode = hclge_request_update_promisc_mode,
12649 .set_loopback = hclge_set_loopback,
12650 .start = hclge_ae_start,
12651 .stop = hclge_ae_stop,
12652 .client_start = hclge_client_start,
12653 .client_stop = hclge_client_stop,
12654 .get_status = hclge_get_status,
12655 .get_ksettings_an_result = hclge_get_ksettings_an_result,
12656 .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12657 .get_media_type = hclge_get_media_type,
12658 .check_port_speed = hclge_check_port_speed,
12659 .get_fec_stats = hclge_get_fec_stats,
12660 .get_fec = hclge_get_fec,
12661 .set_fec = hclge_set_fec,
12662 .get_rss_key_size = hclge_comm_get_rss_key_size,
12663 .get_rss = hclge_get_rss,
12664 .set_rss = hclge_set_rss,
12665 .set_rss_tuple = hclge_set_rss_tuple,
12666 .get_rss_tuple = hclge_get_rss_tuple,
12667 .get_tc_size = hclge_get_tc_size,
12668 .get_mac_addr = hclge_get_mac_addr,
12669 .set_mac_addr = hclge_set_mac_addr,
12670 .do_ioctl = hclge_do_ioctl,
12671 .add_uc_addr = hclge_add_uc_addr,
12672 .rm_uc_addr = hclge_rm_uc_addr,
12673 .add_mc_addr = hclge_add_mc_addr,
12674 .rm_mc_addr = hclge_rm_mc_addr,
12675 .set_autoneg = hclge_set_autoneg,
12676 .get_autoneg = hclge_get_autoneg,
12677 .restart_autoneg = hclge_restart_autoneg,
12678 .halt_autoneg = hclge_halt_autoneg,
12679 .get_pauseparam = hclge_get_pauseparam,
12680 .set_pauseparam = hclge_set_pauseparam,
12681 .set_mtu = hclge_set_mtu,
12682 .reset_queue = hclge_reset_tqp,
12683 .get_stats = hclge_get_stats,
12684 .get_mac_stats = hclge_get_mac_stat,
12685 .update_stats = hclge_update_stats,
12686 .get_strings = hclge_get_strings,
12687 .get_sset_count = hclge_get_sset_count,
12688 .get_fw_version = hclge_get_fw_version,
12689 .get_mdix_mode = hclge_get_mdix_mode,
12690 .enable_vlan_filter = hclge_enable_vlan_filter,
12691 .set_vlan_filter = hclge_set_vlan_filter,
12692 .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
12693 .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
12694 .reset_event = hclge_reset_event,
12695 .get_reset_level = hclge_get_reset_level,
12696 .set_default_reset_request = hclge_set_def_reset_request,
12697 .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
12698 .set_channels = hclge_set_channels,
12699 .get_channels = hclge_get_channels,
12700 .get_regs_len = hclge_get_regs_len,
12701 .get_regs = hclge_get_regs,
12702 .set_led_id = hclge_set_led_id,
12703 .get_link_mode = hclge_get_link_mode,
12704 .add_fd_entry = hclge_add_fd_entry,
12705 .del_fd_entry = hclge_del_fd_entry,
12706 .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
12707 .get_fd_rule_info = hclge_get_fd_rule_info,
12708 .get_fd_all_rules = hclge_get_all_rules,
12709 .enable_fd = hclge_enable_fd,
12710 .add_arfs_entry = hclge_add_fd_entry_by_arfs,
12711 .dbg_read_cmd = hclge_dbg_read_cmd,
12712 .handle_hw_ras_error = hclge_handle_hw_ras_error,
12713 .get_hw_reset_stat = hclge_get_hw_reset_stat,
12714 .ae_dev_resetting = hclge_ae_dev_resetting,
12715 .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
12716 .set_gro_en = hclge_gro_en,
12717 .get_global_queue_id = hclge_covert_handle_qid_global,
12718 .set_timer_task = hclge_set_timer_task,
12719 .mac_connect_phy = hclge_mac_connect_phy,
12720 .mac_disconnect_phy = hclge_mac_disconnect_phy,
12721 .get_vf_config = hclge_get_vf_config,
12722 .set_vf_link_state = hclge_set_vf_link_state,
12723 .set_vf_spoofchk = hclge_set_vf_spoofchk,
12724 .set_vf_trust = hclge_set_vf_trust,
12725 .set_vf_rate = hclge_set_vf_rate,
12726 .set_vf_mac = hclge_set_vf_mac,
12727 .get_module_eeprom = hclge_get_module_eeprom,
12728 .get_cmdq_stat = hclge_get_cmdq_stat,
12729 .add_cls_flower = hclge_add_cls_flower,
12730 .del_cls_flower = hclge_del_cls_flower,
12731 .cls_flower_active = hclge_is_cls_flower_active,
12732 .get_phy_link_ksettings = hclge_get_phy_link_ksettings,
12733 .set_phy_link_ksettings = hclge_set_phy_link_ksettings,
12734 .set_tx_hwts_info = hclge_ptp_set_tx_info,
12735 .get_rx_hwts = hclge_ptp_get_rx_hwts,
12736 .get_ts_info = hclge_ptp_get_ts_info,
12737 .get_link_diagnosis_info = hclge_get_link_diagnosis_info,
12738 .clean_vf_config = hclge_clean_vport_config,
12739 .get_dscp_prio = hclge_get_dscp_prio,
12740 .get_wol = hclge_get_wol,
12741 .set_wol = hclge_set_wol,
12744 static struct hnae3_ae_algo ae_algo = {
12746 .pdev_id_table = ae_algo_pci_tbl,
12749 static int __init hclge_init(void)
12751 pr_info("%s is initializing\n", HCLGE_NAME);
12753 hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME);
12755 pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
12759 hnae3_register_ae_algo(&ae_algo);
12764 static void __exit hclge_exit(void)
12766 hnae3_unregister_ae_algo_prepare(&ae_algo);
12767 hnae3_unregister_ae_algo(&ae_algo);
12768 destroy_workqueue(hclge_wq);
12770 module_init(hclge_init);
12771 module_exit(hclge_exit);
12773 MODULE_LICENSE("GPL");
12774 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
12775 MODULE_DESCRIPTION("HCLGE Driver");
12776 MODULE_VERSION(HCLGE_MOD_VERSION);