1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
7 #include <linux/types.h>
9 #include <linux/if_vlan.h>
11 #include "hclge_cmd.h"
14 #define HCLGE_MOD_VERSION "1.0"
15 #define HCLGE_DRIVER_NAME "hclge"
17 #define HCLGE_MAX_PF_NUM 8
19 #define HCLGE_RD_FIRST_STATS_NUM 2
20 #define HCLGE_RD_OTHER_STATS_NUM 4
22 #define HCLGE_INVALID_VPORT 0xffff
24 #define HCLGE_PF_CFG_BLOCK_SIZE 32
25 #define HCLGE_PF_CFG_DESC_NUM \
26 (HCLGE_PF_CFG_BLOCK_SIZE / HCLGE_CFG_RD_LEN_BYTES)
28 #define HCLGE_VECTOR_REG_BASE 0x20000
29 #define HCLGE_MISC_VECTOR_REG_BASE 0x20400
31 #define HCLGE_VECTOR_REG_OFFSET 0x4
32 #define HCLGE_VECTOR_VF_OFFSET 0x100000
34 #define HCLGE_CMDQ_TX_ADDR_L_REG 0x27000
35 #define HCLGE_CMDQ_TX_ADDR_H_REG 0x27004
36 #define HCLGE_CMDQ_TX_DEPTH_REG 0x27008
37 #define HCLGE_CMDQ_TX_TAIL_REG 0x27010
38 #define HCLGE_CMDQ_TX_HEAD_REG 0x27014
39 #define HCLGE_CMDQ_RX_ADDR_L_REG 0x27018
40 #define HCLGE_CMDQ_RX_ADDR_H_REG 0x2701C
41 #define HCLGE_CMDQ_RX_DEPTH_REG 0x27020
42 #define HCLGE_CMDQ_RX_TAIL_REG 0x27024
43 #define HCLGE_CMDQ_RX_HEAD_REG 0x27028
44 #define HCLGE_CMDQ_INTR_SRC_REG 0x27100
45 #define HCLGE_CMDQ_INTR_STS_REG 0x27104
46 #define HCLGE_CMDQ_INTR_EN_REG 0x27108
47 #define HCLGE_CMDQ_INTR_GEN_REG 0x2710C
49 /* bar registers for common func */
50 #define HCLGE_VECTOR0_OTER_EN_REG 0x20600
51 #define HCLGE_RAS_OTHER_STS_REG 0x20B00
52 #define HCLGE_FUNC_RESET_STS_REG 0x20C00
53 #define HCLGE_GRO_EN_REG 0x28000
55 /* bar registers for rcb */
56 #define HCLGE_RING_RX_ADDR_L_REG 0x80000
57 #define HCLGE_RING_RX_ADDR_H_REG 0x80004
58 #define HCLGE_RING_RX_BD_NUM_REG 0x80008
59 #define HCLGE_RING_RX_BD_LENGTH_REG 0x8000C
60 #define HCLGE_RING_RX_MERGE_EN_REG 0x80014
61 #define HCLGE_RING_RX_TAIL_REG 0x80018
62 #define HCLGE_RING_RX_HEAD_REG 0x8001C
63 #define HCLGE_RING_RX_FBD_NUM_REG 0x80020
64 #define HCLGE_RING_RX_OFFSET_REG 0x80024
65 #define HCLGE_RING_RX_FBD_OFFSET_REG 0x80028
66 #define HCLGE_RING_RX_STASH_REG 0x80030
67 #define HCLGE_RING_RX_BD_ERR_REG 0x80034
68 #define HCLGE_RING_TX_ADDR_L_REG 0x80040
69 #define HCLGE_RING_TX_ADDR_H_REG 0x80044
70 #define HCLGE_RING_TX_BD_NUM_REG 0x80048
71 #define HCLGE_RING_TX_PRIORITY_REG 0x8004C
72 #define HCLGE_RING_TX_TC_REG 0x80050
73 #define HCLGE_RING_TX_MERGE_EN_REG 0x80054
74 #define HCLGE_RING_TX_TAIL_REG 0x80058
75 #define HCLGE_RING_TX_HEAD_REG 0x8005C
76 #define HCLGE_RING_TX_FBD_NUM_REG 0x80060
77 #define HCLGE_RING_TX_OFFSET_REG 0x80064
78 #define HCLGE_RING_TX_EBD_NUM_REG 0x80068
79 #define HCLGE_RING_TX_EBD_OFFSET_REG 0x80070
80 #define HCLGE_RING_TX_BD_ERR_REG 0x80074
81 #define HCLGE_RING_EN_REG 0x80090
83 /* bar registers for tqp interrupt */
84 #define HCLGE_TQP_INTR_CTRL_REG 0x20000
85 #define HCLGE_TQP_INTR_GL0_REG 0x20100
86 #define HCLGE_TQP_INTR_GL1_REG 0x20200
87 #define HCLGE_TQP_INTR_GL2_REG 0x20300
88 #define HCLGE_TQP_INTR_RL_REG 0x20900
90 #define HCLGE_RSS_IND_TBL_SIZE 512
91 #define HCLGE_RSS_SET_BITMAP_MSK GENMASK(15, 0)
92 #define HCLGE_RSS_KEY_SIZE 40
93 #define HCLGE_RSS_HASH_ALGO_TOEPLITZ 0
94 #define HCLGE_RSS_HASH_ALGO_SIMPLE 1
95 #define HCLGE_RSS_HASH_ALGO_SYMMETRIC 2
96 #define HCLGE_RSS_HASH_ALGO_MASK GENMASK(3, 0)
97 #define HCLGE_RSS_CFG_TBL_NUM \
98 (HCLGE_RSS_IND_TBL_SIZE / HCLGE_RSS_CFG_TBL_SIZE)
100 #define HCLGE_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
101 #define HCLGE_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
102 #define HCLGE_D_PORT_BIT BIT(0)
103 #define HCLGE_S_PORT_BIT BIT(1)
104 #define HCLGE_D_IP_BIT BIT(2)
105 #define HCLGE_S_IP_BIT BIT(3)
106 #define HCLGE_V_TAG_BIT BIT(4)
108 #define HCLGE_RSS_TC_SIZE_0 1
109 #define HCLGE_RSS_TC_SIZE_1 2
110 #define HCLGE_RSS_TC_SIZE_2 4
111 #define HCLGE_RSS_TC_SIZE_3 8
112 #define HCLGE_RSS_TC_SIZE_4 16
113 #define HCLGE_RSS_TC_SIZE_5 32
114 #define HCLGE_RSS_TC_SIZE_6 64
115 #define HCLGE_RSS_TC_SIZE_7 128
117 #define HCLGE_UMV_TBL_SIZE 3072
118 #define HCLGE_DEFAULT_UMV_SPACE_PER_PF \
119 (HCLGE_UMV_TBL_SIZE / HCLGE_MAX_PF_NUM)
121 #define HCLGE_TQP_RESET_TRY_TIMES 10
123 #define HCLGE_PHY_PAGE_MDIX 0
124 #define HCLGE_PHY_PAGE_COPPER 0
126 /* Page Selection Reg. */
127 #define HCLGE_PHY_PAGE_REG 22
129 /* Copper Specific Control Register */
130 #define HCLGE_PHY_CSC_REG 16
132 /* Copper Specific Status Register */
133 #define HCLGE_PHY_CSS_REG 17
135 #define HCLGE_PHY_MDIX_CTRL_S 5
136 #define HCLGE_PHY_MDIX_CTRL_M GENMASK(6, 5)
138 #define HCLGE_PHY_MDIX_STATUS_B 6
139 #define HCLGE_PHY_SPEED_DUP_RESOLVE_B 11
141 /* Factor used to calculate offset and bitmap of VF num */
142 #define HCLGE_VF_NUM_PER_CMD 64
143 #define HCLGE_VF_NUM_PER_BYTE 8
145 enum HLCGE_PORT_TYPE {
150 #define HCLGE_PF_ID_S 0
151 #define HCLGE_PF_ID_M GENMASK(2, 0)
152 #define HCLGE_VF_ID_S 3
153 #define HCLGE_VF_ID_M GENMASK(10, 3)
154 #define HCLGE_PORT_TYPE_B 11
155 #define HCLGE_NETWORK_PORT_ID_S 0
156 #define HCLGE_NETWORK_PORT_ID_M GENMASK(3, 0)
158 /* Reset related Registers */
159 #define HCLGE_PF_OTHER_INT_REG 0x20600
160 #define HCLGE_MISC_RESET_STS_REG 0x20700
161 #define HCLGE_MISC_VECTOR_INT_STS 0x20800
162 #define HCLGE_GLOBAL_RESET_REG 0x20A00
163 #define HCLGE_GLOBAL_RESET_BIT 0
164 #define HCLGE_CORE_RESET_BIT 1
165 #define HCLGE_IMP_RESET_BIT 2
166 #define HCLGE_FUN_RST_ING 0x20C00
167 #define HCLGE_FUN_RST_ING_B 0
169 /* Vector0 register bits define */
170 #define HCLGE_VECTOR0_GLOBALRESET_INT_B 5
171 #define HCLGE_VECTOR0_CORERESET_INT_B 6
172 #define HCLGE_VECTOR0_IMPRESET_INT_B 7
174 /* Vector0 interrupt CMDQ event source register(RW) */
175 #define HCLGE_VECTOR0_CMDQ_SRC_REG 0x27100
176 /* CMDQ register bits for RX event(=MBX event) */
177 #define HCLGE_VECTOR0_RX_CMDQ_INT_B 1
179 #define HCLGE_VECTOR0_IMP_RESET_INT_B 1
181 #define HCLGE_MAC_DEFAULT_FRAME \
182 (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN)
183 #define HCLGE_MAC_MIN_FRAME 64
184 #define HCLGE_MAC_MAX_FRAME 9728
186 #define HCLGE_SUPPORT_1G_BIT BIT(0)
187 #define HCLGE_SUPPORT_10G_BIT BIT(1)
188 #define HCLGE_SUPPORT_25G_BIT BIT(2)
189 #define HCLGE_SUPPORT_50G_BIT BIT(3)
190 #define HCLGE_SUPPORT_100G_BIT BIT(4)
192 enum HCLGE_DEV_STATE {
193 HCLGE_STATE_REINITING,
195 HCLGE_STATE_DISABLED,
196 HCLGE_STATE_REMOVING,
197 HCLGE_STATE_SERVICE_INITED,
198 HCLGE_STATE_SERVICE_SCHED,
199 HCLGE_STATE_RST_SERVICE_SCHED,
200 HCLGE_STATE_RST_HANDLING,
201 HCLGE_STATE_MBX_SERVICE_SCHED,
202 HCLGE_STATE_MBX_HANDLING,
203 HCLGE_STATE_STATISTICS_UPDATING,
204 HCLGE_STATE_CMD_DISABLE,
208 enum hclge_evt_cause {
209 HCLGE_VECTOR0_EVENT_RST,
210 HCLGE_VECTOR0_EVENT_MBX,
211 HCLGE_VECTOR0_EVENT_ERR,
212 HCLGE_VECTOR0_EVENT_OTHER,
215 #define HCLGE_MPF_ENBALE 1
217 enum HCLGE_MAC_SPEED {
218 HCLGE_MAC_SPEED_UNKNOWN = 0, /* unknown */
219 HCLGE_MAC_SPEED_10M = 10, /* 10 Mbps */
220 HCLGE_MAC_SPEED_100M = 100, /* 100 Mbps */
221 HCLGE_MAC_SPEED_1G = 1000, /* 1000 Mbps = 1 Gbps */
222 HCLGE_MAC_SPEED_10G = 10000, /* 10000 Mbps = 10 Gbps */
223 HCLGE_MAC_SPEED_25G = 25000, /* 25000 Mbps = 25 Gbps */
224 HCLGE_MAC_SPEED_40G = 40000, /* 40000 Mbps = 40 Gbps */
225 HCLGE_MAC_SPEED_50G = 50000, /* 50000 Mbps = 50 Gbps */
226 HCLGE_MAC_SPEED_100G = 100000 /* 100000 Mbps = 100 Gbps */
229 enum HCLGE_MAC_DUPLEX {
238 u8 mac_addr[ETH_ALEN];
242 int link; /* store the link status of mac & phy (if phy exit)*/
243 struct phy_device *phydev;
244 struct mii_bus *mdio_bus;
245 phy_interface_t phy_if;
246 __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
247 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
251 void __iomem *io_base;
252 struct hclge_mac mac;
254 struct hclge_cmq cmq;
258 struct hlcge_tqp_stats {
259 /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */
260 u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
261 /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */
262 u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
266 /* copy of device pointer from pci_dev,
267 * used when perform DMA mapping
270 struct hnae3_queue q;
271 struct hlcge_tqp_stats tqp_stats;
272 u16 index; /* Global index in a NIC controller */
286 #define HCLGE_PG_NUM 4
287 #define HCLGE_SCH_MODE_SP 0
288 #define HCLGE_SCH_MODE_DWRR 1
289 struct hclge_pg_info {
291 u8 pg_sch_mode; /* 0: sp; 1: dwrr */
294 u8 tc_dwrr[HNAE3_MAX_TC];
297 struct hclge_tc_info {
299 u8 tc_sch_mode; /* 0: sp; 1: dwrr */
312 u8 mac_addr[ETH_ALEN];
319 struct hclge_tm_info {
321 u8 num_pg; /* It must be 1 if vNET-Base schd */
322 u8 pg_dwrr[HCLGE_PG_NUM];
323 u8 prio_tc[HNAE3_MAX_USER_PRIO];
324 struct hclge_pg_info pg_info[HCLGE_PG_NUM];
325 struct hclge_tc_info tc_info[HNAE3_MAX_TC];
326 enum hclge_fc_mode fc_mode;
327 u8 hw_pfc_map; /* Allow for packet drop or not on this TC */
328 u8 pfc_en; /* PFC enabled or not for user priority */
331 struct hclge_comm_stats_str {
332 char desc[ETH_GSTRING_LEN];
333 unsigned long offset;
336 /* mac stats ,opcode id: 0x0032 */
337 struct hclge_mac_stats {
338 u64 mac_tx_mac_pause_num;
339 u64 mac_rx_mac_pause_num;
340 u64 mac_tx_pfc_pri0_pkt_num;
341 u64 mac_tx_pfc_pri1_pkt_num;
342 u64 mac_tx_pfc_pri2_pkt_num;
343 u64 mac_tx_pfc_pri3_pkt_num;
344 u64 mac_tx_pfc_pri4_pkt_num;
345 u64 mac_tx_pfc_pri5_pkt_num;
346 u64 mac_tx_pfc_pri6_pkt_num;
347 u64 mac_tx_pfc_pri7_pkt_num;
348 u64 mac_rx_pfc_pri0_pkt_num;
349 u64 mac_rx_pfc_pri1_pkt_num;
350 u64 mac_rx_pfc_pri2_pkt_num;
351 u64 mac_rx_pfc_pri3_pkt_num;
352 u64 mac_rx_pfc_pri4_pkt_num;
353 u64 mac_rx_pfc_pri5_pkt_num;
354 u64 mac_rx_pfc_pri6_pkt_num;
355 u64 mac_rx_pfc_pri7_pkt_num;
356 u64 mac_tx_total_pkt_num;
357 u64 mac_tx_total_oct_num;
358 u64 mac_tx_good_pkt_num;
359 u64 mac_tx_bad_pkt_num;
360 u64 mac_tx_good_oct_num;
361 u64 mac_tx_bad_oct_num;
362 u64 mac_tx_uni_pkt_num;
363 u64 mac_tx_multi_pkt_num;
364 u64 mac_tx_broad_pkt_num;
365 u64 mac_tx_undersize_pkt_num;
366 u64 mac_tx_oversize_pkt_num;
367 u64 mac_tx_64_oct_pkt_num;
368 u64 mac_tx_65_127_oct_pkt_num;
369 u64 mac_tx_128_255_oct_pkt_num;
370 u64 mac_tx_256_511_oct_pkt_num;
371 u64 mac_tx_512_1023_oct_pkt_num;
372 u64 mac_tx_1024_1518_oct_pkt_num;
373 u64 mac_tx_1519_2047_oct_pkt_num;
374 u64 mac_tx_2048_4095_oct_pkt_num;
375 u64 mac_tx_4096_8191_oct_pkt_num;
377 u64 mac_tx_8192_9216_oct_pkt_num;
378 u64 mac_tx_9217_12287_oct_pkt_num;
379 u64 mac_tx_12288_16383_oct_pkt_num;
380 u64 mac_tx_1519_max_good_oct_pkt_num;
381 u64 mac_tx_1519_max_bad_oct_pkt_num;
383 u64 mac_rx_total_pkt_num;
384 u64 mac_rx_total_oct_num;
385 u64 mac_rx_good_pkt_num;
386 u64 mac_rx_bad_pkt_num;
387 u64 mac_rx_good_oct_num;
388 u64 mac_rx_bad_oct_num;
389 u64 mac_rx_uni_pkt_num;
390 u64 mac_rx_multi_pkt_num;
391 u64 mac_rx_broad_pkt_num;
392 u64 mac_rx_undersize_pkt_num;
393 u64 mac_rx_oversize_pkt_num;
394 u64 mac_rx_64_oct_pkt_num;
395 u64 mac_rx_65_127_oct_pkt_num;
396 u64 mac_rx_128_255_oct_pkt_num;
397 u64 mac_rx_256_511_oct_pkt_num;
398 u64 mac_rx_512_1023_oct_pkt_num;
399 u64 mac_rx_1024_1518_oct_pkt_num;
400 u64 mac_rx_1519_2047_oct_pkt_num;
401 u64 mac_rx_2048_4095_oct_pkt_num;
402 u64 mac_rx_4096_8191_oct_pkt_num;
404 u64 mac_rx_8192_9216_oct_pkt_num;
405 u64 mac_rx_9217_12287_oct_pkt_num;
406 u64 mac_rx_12288_16383_oct_pkt_num;
407 u64 mac_rx_1519_max_good_oct_pkt_num;
408 u64 mac_rx_1519_max_bad_oct_pkt_num;
410 u64 mac_tx_fragment_pkt_num;
411 u64 mac_tx_undermin_pkt_num;
412 u64 mac_tx_jabber_pkt_num;
413 u64 mac_tx_err_all_pkt_num;
414 u64 mac_tx_from_app_good_pkt_num;
415 u64 mac_tx_from_app_bad_pkt_num;
416 u64 mac_rx_fragment_pkt_num;
417 u64 mac_rx_undermin_pkt_num;
418 u64 mac_rx_jabber_pkt_num;
419 u64 mac_rx_fcs_err_pkt_num;
420 u64 mac_rx_send_app_good_pkt_num;
421 u64 mac_rx_send_app_bad_pkt_num;
422 u64 mac_tx_pfc_pause_pkt_num;
423 u64 mac_rx_pfc_pause_pkt_num;
424 u64 mac_tx_ctrl_pkt_num;
425 u64 mac_rx_ctrl_pkt_num;
428 #define HCLGE_STATS_TIMER_INTERVAL (60 * 5)
429 struct hclge_hw_stats {
430 struct hclge_mac_stats mac_stats;
434 struct hclge_vlan_type_cfg {
435 u16 rx_ot_fst_vlan_type;
436 u16 rx_ot_sec_vlan_type;
437 u16 rx_in_fst_vlan_type;
438 u16 rx_in_sec_vlan_type;
444 HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1,
445 HCLGE_FD_MODE_DEPTH_1K_WIDTH_400B_STAGE_2,
446 HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1,
447 HCLGE_FD_MODE_DEPTH_2K_WIDTH_200B_STAGE_2,
450 enum HCLGE_FD_KEY_TYPE {
451 HCLGE_FD_KEY_BASE_ON_PTYPE,
452 HCLGE_FD_KEY_BASE_ON_TUPLE,
455 enum HCLGE_FD_STAGE {
460 /* OUTER_XXX indicates tuples in tunnel header of tunnel packet
461 * INNER_XXX indicate tuples in tunneled header of tunnel packet or
462 * tuples of non-tunnel packet
464 enum HCLGE_FD_TUPLE {
498 enum HCLGE_FD_META_DATA {
515 static const struct key_info meta_data_key_info[] = {
516 { PACKET_TYPE_ID, 6},
526 static const struct key_info tuple_key_info[] = {
527 { OUTER_DST_MAC, 48},
528 { OUTER_SRC_MAC, 48},
529 { OUTER_VLAN_TAG_FST, 16},
530 { OUTER_VLAN_TAG_SEC, 16},
531 { OUTER_ETH_TYPE, 16},
534 { OUTER_IP_PROTO, 8},
538 { OUTER_SRC_PORT, 16},
539 { OUTER_DST_PORT, 16},
541 { OUTER_TUN_VNI, 24},
542 { OUTER_TUN_FLOW_ID, 8},
543 { INNER_DST_MAC, 48},
544 { INNER_SRC_MAC, 48},
545 { INNER_VLAN_TAG_FST, 16},
546 { INNER_VLAN_TAG_SEC, 16},
547 { INNER_ETH_TYPE, 16},
550 { INNER_IP_PROTO, 8},
554 { INNER_SRC_PORT, 16},
555 { INNER_DST_PORT, 16},
559 #define MAX_KEY_LENGTH 400
560 #define MAX_KEY_DWORDS DIV_ROUND_UP(MAX_KEY_LENGTH / 8, 4)
561 #define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4)
562 #define MAX_META_DATA_LENGTH 32
564 enum HCLGE_FD_PACKET_TYPE {
569 enum HCLGE_FD_ACTION {
570 HCLGE_FD_ACTION_ACCEPT_PACKET,
571 HCLGE_FD_ACTION_DROP_PACKET,
574 struct hclge_fd_key_cfg {
576 u8 inner_sipv6_word_en;
577 u8 inner_dipv6_word_en;
578 u8 outer_sipv6_word_en;
579 u8 outer_dipv6_word_en;
581 u32 meta_data_active;
584 struct hclge_fd_cfg {
589 u32 rule_num[2]; /* rule entry number */
590 u16 cnt_num[2]; /* rule hit counter number */
591 struct hclge_fd_key_cfg key_cfg[2];
594 struct hclge_fd_rule_tuples {
607 struct hclge_fd_rule {
608 struct hlist_node rule_node;
609 struct hclge_fd_rule_tuples tuples;
610 struct hclge_fd_rule_tuples tuples_mask;
619 struct hclge_fd_ad_data {
622 u8 forward_to_direct_queue;
627 u8 write_rule_id_to_bd;
632 /* For each bit of TCAM entry, it uses a pair of 'x' and
633 * 'y' to indicate which value to match, like below:
634 * ----------------------------------
635 * | bit x | bit y | search value |
636 * ----------------------------------
637 * | 0 | 0 | always hit |
638 * ----------------------------------
639 * | 1 | 0 | match '0' |
640 * ----------------------------------
641 * | 0 | 1 | match '1' |
642 * ----------------------------------
643 * | 1 | 1 | invalid |
644 * ----------------------------------
645 * Then for input key(k) and mask(v), we can calculate the value by
650 #define calc_x(x, k, v) ((x) = (~(k) & (v)))
651 #define calc_y(y, k, v) \
653 const typeof(k) _k_ = (k); \
654 const typeof(v) _v_ = (v); \
655 (y) = (_k_ ^ ~_v_) & (_k_); \
658 #define HCLGE_VPORT_NUM 256
660 struct pci_dev *pdev;
661 struct hnae3_ae_dev *ae_dev;
663 struct hclge_misc_vector misc_vector;
664 struct hclge_hw_stats hw_stats;
666 unsigned long flr_state;
667 unsigned long last_reset_time;
669 enum hnae3_reset_type reset_type;
670 enum hnae3_reset_type reset_level;
671 unsigned long default_reset_request;
672 unsigned long reset_request; /* reset has been requested */
673 unsigned long reset_pending; /* client rst is pending to be served */
674 unsigned long reset_count; /* the number of reset has been done */
677 u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */
678 u16 num_tqps; /* Num task queue pairs of this PF */
679 u16 num_req_vfs; /* Num VFs requested for this PF */
681 u16 base_tqp_pid; /* Base task tqp physical id of this PF */
682 u16 alloc_rss_size; /* Allocated RSS task queue */
683 u16 rss_size_max; /* HW defined max RSS task queue */
685 u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */
686 u16 num_alloc_vport; /* Num vports this driver supports */
692 enum hclge_fc_mode fc_mode_last_time;
693 u8 support_sfp_query;
695 #define HCLGE_FLAG_TC_BASE_SCH_MODE 1
696 #define HCLGE_FLAG_VNET_BASE_SCH_MODE 2
703 struct hclge_tm_info tm_info;
708 u16 roce_base_msix_offset;
712 u16 num_roce_msi; /* Num of roce vectors for this PF */
713 int roce_base_vector;
715 u16 pending_udp_bitmap;
720 u16 adminq_work_limit; /* Num of admin receive queue desc to process */
721 unsigned long service_timer_period;
722 unsigned long service_timer_previous;
723 struct timer_list service_timer;
724 struct timer_list reset_timer;
725 struct work_struct service_task;
726 struct work_struct rst_service_task;
727 struct work_struct mbx_service_task;
730 int num_alloc_vfs; /* Actual number of VFs allocated */
732 struct hclge_tqp *htqp;
733 struct hclge_vport *vport;
735 struct dentry *hclge_dbgfs;
737 struct hnae3_client *nic_client;
738 struct hnae3_client *roce_client;
740 #define HCLGE_FLAG_MAIN BIT(0)
741 #define HCLGE_FLAG_DCB_CAPABLE BIT(1)
742 #define HCLGE_FLAG_DCB_ENABLE BIT(2)
743 #define HCLGE_FLAG_MQPRIO_ENABLE BIT(3)
746 u32 pkt_buf_size; /* Total pf buf size for tx/rx */
747 u32 tx_buf_size; /* Tx buffer size for each TC */
748 u32 dv_buf_size; /* Dv buffer size for each TC */
750 u32 mps; /* Max packet size */
751 /* vport_lock protect resource shared by vports */
752 struct mutex vport_lock;
754 struct hclge_vlan_type_cfg vlan_type_cfg;
756 unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)];
758 struct hclge_fd_cfg fd_cfg;
759 struct hlist_head fd_rule_list;
760 u16 hclge_fd_rule_num;
763 /* max available unicast mac vlan space */
765 /* private unicast mac vlan space, it's same for PF and its VFs */
767 /* unicast mac vlan space shared by PF and its VFs */
769 struct mutex umv_mutex; /* protect share_umv_size */
772 /* VPort level vlan tag configuration for TX direction */
773 struct hclge_tx_vtag_cfg {
774 bool accept_tag1; /* Whether accept tag1 packet from host */
775 bool accept_untag1; /* Whether accept untag1 packet from host */
778 bool insert_tag1_en; /* Whether insert inner vlan tag */
779 bool insert_tag2_en; /* Whether insert outer vlan tag */
780 u16 default_tag1; /* The default inner vlan tag to insert */
781 u16 default_tag2; /* The default outer vlan tag to insert */
784 /* VPort level vlan tag configuration for RX direction */
785 struct hclge_rx_vtag_cfg {
786 bool strip_tag1_en; /* Whether strip inner vlan tag */
787 bool strip_tag2_en; /* Whether strip outer vlan tag */
788 bool vlan1_vlan_prionly;/* Inner VLAN Tag up to descriptor Enable */
789 bool vlan2_vlan_prionly;/* Outer VLAN Tag up to descriptor Enable */
792 struct hclge_rss_tuple_cfg {
803 enum HCLGE_VPORT_STATE {
804 HCLGE_VPORT_STATE_ALIVE,
805 HCLGE_VPORT_STATE_MAX
809 u16 alloc_tqps; /* Allocated Tx/Rx queues */
811 u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */
812 /* User configured lookup table entries */
813 u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE];
814 int rss_algo; /* User configured hash algorithm */
815 /* User configured rss tuple sets */
816 struct hclge_rss_tuple_cfg rss_tuple_sets;
821 u16 bw_limit; /* VSI BW Limit (0 = disabled) */
824 struct hclge_tx_vtag_cfg txvlan_cfg;
825 struct hclge_rx_vtag_cfg rxvlan_cfg;
830 struct hclge_dev *back; /* Back reference to associated dev */
831 struct hnae3_handle nic;
832 struct hnae3_handle roce;
835 unsigned long last_active_jiffies;
836 u32 mps; /* Max packet size */
839 void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
840 bool en_mc, bool en_bc, int vport_id);
842 int hclge_add_uc_addr_common(struct hclge_vport *vport,
843 const unsigned char *addr);
844 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
845 const unsigned char *addr);
846 int hclge_add_mc_addr_common(struct hclge_vport *vport,
847 const unsigned char *addr);
848 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
849 const unsigned char *addr);
851 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle);
852 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
853 int vector_id, bool en,
854 struct hnae3_ring_chain_node *ring_chain);
856 static inline int hclge_get_queue_id(struct hnae3_queue *queue)
858 struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q);
863 static inline bool hclge_is_reset_pending(struct hclge_dev *hdev)
865 return !!hdev->reset_pending;
868 int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
869 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex);
870 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
871 u16 vlan_id, bool is_kill);
872 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable);
874 int hclge_buffer_alloc(struct hclge_dev *hdev);
875 int hclge_rss_init_hw(struct hclge_dev *hdev);
876 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
878 int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
879 void hclge_mbx_handler(struct hclge_dev *hdev);
880 int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
881 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id);
882 int hclge_cfg_flowctrl(struct hclge_dev *hdev);
883 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id);
884 int hclge_vport_start(struct hclge_vport *vport);
885 void hclge_vport_stop(struct hclge_vport *vport);
886 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
887 int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf);
888 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id);
889 int hclge_notify_client(struct hclge_dev *hdev,
890 enum hnae3_reset_notify_type type);