1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/etherdevice.h>
5 #include <linux/iopoll.h>
6 #include <net/rtnetlink.h>
7 #include "hclgevf_cmd.h"
8 #include "hclgevf_main.h"
12 #define HCLGEVF_NAME "hclgevf"
14 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev);
15 static struct hnae3_ae_algo ae_algovf;
17 static const struct pci_device_id ae_algovf_pci_tbl[] = {
18 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
19 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
20 /* required last entry */
24 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
26 static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG,
27 HCLGEVF_CMDQ_TX_ADDR_H_REG,
28 HCLGEVF_CMDQ_TX_DEPTH_REG,
29 HCLGEVF_CMDQ_TX_TAIL_REG,
30 HCLGEVF_CMDQ_TX_HEAD_REG,
31 HCLGEVF_CMDQ_RX_ADDR_L_REG,
32 HCLGEVF_CMDQ_RX_ADDR_H_REG,
33 HCLGEVF_CMDQ_RX_DEPTH_REG,
34 HCLGEVF_CMDQ_RX_TAIL_REG,
35 HCLGEVF_CMDQ_RX_HEAD_REG,
36 HCLGEVF_VECTOR0_CMDQ_SRC_REG,
37 HCLGEVF_CMDQ_INTR_STS_REG,
38 HCLGEVF_CMDQ_INTR_EN_REG,
39 HCLGEVF_CMDQ_INTR_GEN_REG};
41 static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE,
45 static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG,
46 HCLGEVF_RING_RX_ADDR_H_REG,
47 HCLGEVF_RING_RX_BD_NUM_REG,
48 HCLGEVF_RING_RX_BD_LENGTH_REG,
49 HCLGEVF_RING_RX_MERGE_EN_REG,
50 HCLGEVF_RING_RX_TAIL_REG,
51 HCLGEVF_RING_RX_HEAD_REG,
52 HCLGEVF_RING_RX_FBD_NUM_REG,
53 HCLGEVF_RING_RX_OFFSET_REG,
54 HCLGEVF_RING_RX_FBD_OFFSET_REG,
55 HCLGEVF_RING_RX_STASH_REG,
56 HCLGEVF_RING_RX_BD_ERR_REG,
57 HCLGEVF_RING_TX_ADDR_L_REG,
58 HCLGEVF_RING_TX_ADDR_H_REG,
59 HCLGEVF_RING_TX_BD_NUM_REG,
60 HCLGEVF_RING_TX_PRIORITY_REG,
61 HCLGEVF_RING_TX_TC_REG,
62 HCLGEVF_RING_TX_MERGE_EN_REG,
63 HCLGEVF_RING_TX_TAIL_REG,
64 HCLGEVF_RING_TX_HEAD_REG,
65 HCLGEVF_RING_TX_FBD_NUM_REG,
66 HCLGEVF_RING_TX_OFFSET_REG,
67 HCLGEVF_RING_TX_EBD_NUM_REG,
68 HCLGEVF_RING_TX_EBD_OFFSET_REG,
69 HCLGEVF_RING_TX_BD_ERR_REG,
72 static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG,
73 HCLGEVF_TQP_INTR_GL0_REG,
74 HCLGEVF_TQP_INTR_GL1_REG,
75 HCLGEVF_TQP_INTR_GL2_REG,
76 HCLGEVF_TQP_INTR_RL_REG};
78 static inline struct hclgevf_dev *hclgevf_ae_get_hdev(
79 struct hnae3_handle *handle)
81 return container_of(handle, struct hclgevf_dev, nic);
84 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
86 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
87 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
88 struct hclgevf_desc desc;
89 struct hclgevf_tqp *tqp;
93 for (i = 0; i < kinfo->num_tqps; i++) {
94 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
95 hclgevf_cmd_setup_basic_desc(&desc,
96 HCLGEVF_OPC_QUERY_RX_STATUS,
99 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
100 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
102 dev_err(&hdev->pdev->dev,
103 "Query tqp stat fail, status = %d,queue = %d\n",
107 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
108 le32_to_cpu(desc.data[1]);
110 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS,
113 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
114 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
116 dev_err(&hdev->pdev->dev,
117 "Query tqp stat fail, status = %d,queue = %d\n",
121 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
122 le32_to_cpu(desc.data[1]);
128 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
130 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
131 struct hclgevf_tqp *tqp;
135 for (i = 0; i < kinfo->num_tqps; i++) {
136 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
137 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
139 for (i = 0; i < kinfo->num_tqps; i++) {
140 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
141 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
147 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset)
149 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
151 return kinfo->num_tqps * 2;
154 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
156 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
160 for (i = 0; i < kinfo->num_tqps; i++) {
161 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
162 struct hclgevf_tqp, q);
163 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
165 buff += ETH_GSTRING_LEN;
168 for (i = 0; i < kinfo->num_tqps; i++) {
169 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
170 struct hclgevf_tqp, q);
171 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
173 buff += ETH_GSTRING_LEN;
179 static void hclgevf_update_stats(struct hnae3_handle *handle,
180 struct net_device_stats *net_stats)
182 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
185 status = hclgevf_tqps_update_stats(handle);
187 dev_err(&hdev->pdev->dev,
188 "VF update of TQPS stats fail, status = %d.\n",
192 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
194 if (strset == ETH_SS_TEST)
196 else if (strset == ETH_SS_STATS)
197 return hclgevf_tqps_get_sset_count(handle, strset);
202 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
205 u8 *p = (char *)data;
207 if (strset == ETH_SS_STATS)
208 p = hclgevf_tqps_get_strings(handle, p);
211 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
213 hclgevf_tqps_get_stats(handle, data);
216 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
221 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0,
222 true, &resp_msg, sizeof(u8));
224 dev_err(&hdev->pdev->dev,
225 "VF request to get TC info from PF failed %d",
230 hdev->hw_tc_map = resp_msg;
235 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
237 #define HCLGEVF_TQPS_RSS_INFO_LEN 8
238 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
241 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0,
243 HCLGEVF_TQPS_RSS_INFO_LEN);
245 dev_err(&hdev->pdev->dev,
246 "VF request to get tqp info from PF failed %d",
251 memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16));
252 memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16));
253 memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16));
254 memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16));
259 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
261 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
262 u8 msg_data[2], resp_data[2];
266 memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
268 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QID_IN_PF, 0, msg_data,
269 2, true, resp_data, 2);
271 qid_in_pf = *(u16 *)resp_data;
276 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
278 struct hclgevf_tqp *tqp;
281 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
282 sizeof(struct hclgevf_tqp), GFP_KERNEL);
288 for (i = 0; i < hdev->num_tqps; i++) {
289 tqp->dev = &hdev->pdev->dev;
292 tqp->q.ae_algo = &ae_algovf;
293 tqp->q.buf_size = hdev->rx_buf_len;
294 tqp->q.desc_num = hdev->num_desc;
295 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET +
296 i * HCLGEVF_TQP_REG_SIZE;
304 static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
306 struct hnae3_handle *nic = &hdev->nic;
307 struct hnae3_knic_private_info *kinfo;
308 u16 new_tqps = hdev->num_tqps;
313 kinfo->num_desc = hdev->num_desc;
314 kinfo->rx_buf_len = hdev->rx_buf_len;
315 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++)
316 if (hdev->hw_tc_map & BIT(i))
320 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc);
321 new_tqps = kinfo->rss_size * kinfo->num_tc;
322 kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
324 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
325 sizeof(struct hnae3_queue *), GFP_KERNEL);
329 for (i = 0; i < kinfo->num_tqps; i++) {
330 hdev->htqp[i].q.handle = &hdev->nic;
331 hdev->htqp[i].q.tqp_index = i;
332 kinfo->tqp[i] = &hdev->htqp[i].q;
338 static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
343 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL,
344 0, false, &resp_msg, sizeof(u8));
346 dev_err(&hdev->pdev->dev,
347 "VF failed to fetch link status(%d) from PF", status);
350 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
352 struct hnae3_handle *handle = &hdev->nic;
353 struct hnae3_client *client;
355 client = handle->client;
358 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state;
360 if (link_state != hdev->hw.mac.link) {
361 client->ops->link_status_change(handle, !!link_state);
362 hdev->hw.mac.link = link_state;
366 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
368 struct hnae3_handle *nic = &hdev->nic;
371 nic->ae_algo = &ae_algovf;
372 nic->pdev = hdev->pdev;
373 nic->numa_node_mask = hdev->numa_node_mask;
374 nic->flags |= HNAE3_SUPPORT_VF;
376 if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) {
377 dev_err(&hdev->pdev->dev, "unsupported device type %d\n",
378 hdev->ae_dev->dev_type);
382 ret = hclgevf_knic_setup(hdev);
384 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
389 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
391 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) {
392 dev_warn(&hdev->pdev->dev,
393 "vector(vector_id %d) has been freed.\n", vector_id);
397 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
398 hdev->num_msi_left += 1;
399 hdev->num_msi_used -= 1;
402 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
403 struct hnae3_vector_info *vector_info)
405 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
406 struct hnae3_vector_info *vector = vector_info;
410 vector_num = min(hdev->num_msi_left, vector_num);
412 for (j = 0; j < vector_num; j++) {
413 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) {
414 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) {
415 vector->vector = pci_irq_vector(hdev->pdev, i);
416 vector->io_addr = hdev->hw.io_base +
417 HCLGEVF_VECTOR_REG_BASE +
418 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET;
419 hdev->vector_status[i] = 0;
420 hdev->vector_irq[i] = vector->vector;
429 hdev->num_msi_left -= alloc;
430 hdev->num_msi_used += alloc;
435 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
439 for (i = 0; i < hdev->num_msi; i++)
440 if (vector == hdev->vector_irq[i])
446 static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev,
447 const u8 hfunc, const u8 *key)
449 struct hclgevf_rss_config_cmd *req;
450 struct hclgevf_desc desc;
455 req = (struct hclgevf_rss_config_cmd *)desc.data;
457 for (key_offset = 0; key_offset < 3; key_offset++) {
458 hclgevf_cmd_setup_basic_desc(&desc,
459 HCLGEVF_OPC_RSS_GENERIC_CONFIG,
462 req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK);
464 (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B);
468 HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2;
470 key_size = HCLGEVF_RSS_HASH_KEY_NUM;
472 memcpy(req->hash_key,
473 key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size);
475 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
477 dev_err(&hdev->pdev->dev,
478 "Configure RSS config fail, status = %d\n",
487 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
489 return HCLGEVF_RSS_KEY_SIZE;
492 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle)
494 return HCLGEVF_RSS_IND_TBL_SIZE;
497 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev)
499 const u8 *indir = hdev->rss_cfg.rss_indirection_tbl;
500 struct hclgevf_rss_indirection_table_cmd *req;
501 struct hclgevf_desc desc;
505 req = (struct hclgevf_rss_indirection_table_cmd *)desc.data;
507 for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) {
508 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE,
510 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE;
511 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK;
512 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++)
514 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j];
516 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
518 dev_err(&hdev->pdev->dev,
519 "VF failed(=%d) to set RSS indirection table\n",
528 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
530 struct hclgevf_rss_tc_mode_cmd *req;
531 u16 tc_offset[HCLGEVF_MAX_TC_NUM];
532 u16 tc_valid[HCLGEVF_MAX_TC_NUM];
533 u16 tc_size[HCLGEVF_MAX_TC_NUM];
534 struct hclgevf_desc desc;
539 req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
541 roundup_size = roundup_pow_of_two(rss_size);
542 roundup_size = ilog2(roundup_size);
544 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
545 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i));
546 tc_size[i] = roundup_size;
547 tc_offset[i] = rss_size * i;
550 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
551 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
552 hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
553 (tc_valid[i] & 0x1));
554 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
555 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
556 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
557 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
559 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
561 dev_err(&hdev->pdev->dev,
562 "VF failed(=%d) to set rss tc mode\n", status);
567 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
570 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
571 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
574 if (handle->pdev->revision >= 0x21) {
575 /* Get hash algorithm */
577 switch (rss_cfg->hash_algo) {
578 case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ:
579 *hfunc = ETH_RSS_HASH_TOP;
581 case HCLGEVF_RSS_HASH_ALGO_SIMPLE:
582 *hfunc = ETH_RSS_HASH_XOR;
585 *hfunc = ETH_RSS_HASH_UNKNOWN;
590 /* Get the RSS Key required by the user */
592 memcpy(key, rss_cfg->rss_hash_key,
593 HCLGEVF_RSS_KEY_SIZE);
597 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
598 indir[i] = rss_cfg->rss_indirection_tbl[i];
603 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
604 const u8 *key, const u8 hfunc)
606 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
607 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
610 if (handle->pdev->revision >= 0x21) {
611 /* Set the RSS Hash Key if specififed by the user */
614 case ETH_RSS_HASH_TOP:
616 HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
618 case ETH_RSS_HASH_XOR:
620 HCLGEVF_RSS_HASH_ALGO_SIMPLE;
622 case ETH_RSS_HASH_NO_CHANGE:
628 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
633 /* Update the shadow RSS key with user specified qids */
634 memcpy(rss_cfg->rss_hash_key, key,
635 HCLGEVF_RSS_KEY_SIZE);
639 /* update the shadow RSS table with user specified qids */
640 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
641 rss_cfg->rss_indirection_tbl[i] = indir[i];
643 /* update the hardware */
644 return hclgevf_set_rss_indir_table(hdev);
647 static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
649 u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0;
651 if (nfc->data & RXH_L4_B_2_3)
652 hash_sets |= HCLGEVF_D_PORT_BIT;
654 hash_sets &= ~HCLGEVF_D_PORT_BIT;
656 if (nfc->data & RXH_IP_SRC)
657 hash_sets |= HCLGEVF_S_IP_BIT;
659 hash_sets &= ~HCLGEVF_S_IP_BIT;
661 if (nfc->data & RXH_IP_DST)
662 hash_sets |= HCLGEVF_D_IP_BIT;
664 hash_sets &= ~HCLGEVF_D_IP_BIT;
666 if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
667 hash_sets |= HCLGEVF_V_TAG_BIT;
672 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
673 struct ethtool_rxnfc *nfc)
675 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
676 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
677 struct hclgevf_rss_input_tuple_cmd *req;
678 struct hclgevf_desc desc;
682 if (handle->pdev->revision == 0x20)
686 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
689 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
690 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
692 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
693 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
694 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
695 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
696 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
697 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
698 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
699 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
701 tuple_sets = hclgevf_get_rss_hash_bits(nfc);
702 switch (nfc->flow_type) {
704 req->ipv4_tcp_en = tuple_sets;
707 req->ipv6_tcp_en = tuple_sets;
710 req->ipv4_udp_en = tuple_sets;
713 req->ipv6_udp_en = tuple_sets;
716 req->ipv4_sctp_en = tuple_sets;
719 if ((nfc->data & RXH_L4_B_0_1) ||
720 (nfc->data & RXH_L4_B_2_3))
723 req->ipv6_sctp_en = tuple_sets;
726 req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
729 req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
735 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
737 dev_err(&hdev->pdev->dev,
738 "Set rss tuple fail, status = %d\n", ret);
742 rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
743 rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
744 rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
745 rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
746 rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
747 rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
748 rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
749 rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
753 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
754 struct ethtool_rxnfc *nfc)
756 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
757 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
760 if (handle->pdev->revision == 0x20)
765 switch (nfc->flow_type) {
767 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
770 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en;
773 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
776 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en;
779 tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
782 tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
786 tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT;
795 if (tuple_sets & HCLGEVF_D_PORT_BIT)
796 nfc->data |= RXH_L4_B_2_3;
797 if (tuple_sets & HCLGEVF_S_PORT_BIT)
798 nfc->data |= RXH_L4_B_0_1;
799 if (tuple_sets & HCLGEVF_D_IP_BIT)
800 nfc->data |= RXH_IP_DST;
801 if (tuple_sets & HCLGEVF_S_IP_BIT)
802 nfc->data |= RXH_IP_SRC;
807 static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev,
808 struct hclgevf_rss_cfg *rss_cfg)
810 struct hclgevf_rss_input_tuple_cmd *req;
811 struct hclgevf_desc desc;
814 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
816 req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
818 req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
819 req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
820 req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
821 req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
822 req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
823 req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
824 req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
825 req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
827 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
829 dev_err(&hdev->pdev->dev,
830 "Configure rss input fail, status = %d\n", ret);
834 static int hclgevf_get_tc_size(struct hnae3_handle *handle)
836 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
837 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
839 return rss_cfg->rss_size;
842 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
844 struct hnae3_ring_chain_node *ring_chain)
846 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
847 struct hnae3_ring_chain_node *node;
848 struct hclge_mbx_vf_to_pf_cmd *req;
849 struct hclgevf_desc desc;
854 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
856 for (node = ring_chain; node; node = node->next) {
857 int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM +
858 HCLGE_MBX_RING_NODE_VARIABLE_NUM * i;
861 hclgevf_cmd_setup_basic_desc(&desc,
862 HCLGEVF_OPC_MBX_VF_TO_PF,
865 HCLGE_MBX_MAP_RING_TO_VECTOR :
866 HCLGE_MBX_UNMAP_RING_TO_VECTOR;
868 req->msg[1] = vector_id;
871 req->msg[idx_offset] =
872 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B);
873 req->msg[idx_offset + 1] = node->tqp_index;
874 req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx,
876 HNAE3_RING_GL_IDX_S);
879 if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM -
880 HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) /
881 HCLGE_MBX_RING_NODE_VARIABLE_NUM) ||
885 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
887 dev_err(&hdev->pdev->dev,
888 "Map TQP fail, status is %d.\n",
893 hclgevf_cmd_setup_basic_desc(&desc,
894 HCLGEVF_OPC_MBX_VF_TO_PF,
897 req->msg[1] = vector_id;
904 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
905 struct hnae3_ring_chain_node *ring_chain)
907 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
910 vector_id = hclgevf_get_vector_index(hdev, vector);
912 dev_err(&handle->pdev->dev,
913 "Get vector index fail. ret =%d\n", vector_id);
917 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain);
920 static int hclgevf_unmap_ring_from_vector(
921 struct hnae3_handle *handle,
923 struct hnae3_ring_chain_node *ring_chain)
925 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
928 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
931 vector_id = hclgevf_get_vector_index(hdev, vector);
933 dev_err(&handle->pdev->dev,
934 "Get vector index fail. ret =%d\n", vector_id);
938 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain);
940 dev_err(&handle->pdev->dev,
941 "Unmap ring from vector fail. vector=%d, ret =%d\n",
948 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
950 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
953 vector_id = hclgevf_get_vector_index(hdev, vector);
955 dev_err(&handle->pdev->dev,
956 "hclgevf_put_vector get vector index fail. ret =%d\n",
961 hclgevf_free_vector(hdev, vector_id);
966 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
967 bool en_uc_pmc, bool en_mc_pmc)
969 struct hclge_mbx_vf_to_pf_cmd *req;
970 struct hclgevf_desc desc;
973 req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
975 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
976 req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE;
977 req->msg[1] = en_uc_pmc ? 1 : 0;
978 req->msg[2] = en_mc_pmc ? 1 : 0;
980 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
982 dev_err(&hdev->pdev->dev,
983 "Set promisc mode fail, status is %d.\n", status);
988 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle,
989 bool en_uc_pmc, bool en_mc_pmc)
991 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
993 return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc);
996 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
997 int stream_id, bool enable)
999 struct hclgevf_cfg_com_tqp_queue_cmd *req;
1000 struct hclgevf_desc desc;
1003 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
1005 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE,
1007 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
1008 req->stream_id = cpu_to_le16(stream_id);
1009 req->enable |= enable << HCLGEVF_TQP_ENABLE_B;
1011 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
1013 dev_err(&hdev->pdev->dev,
1014 "TQP enable fail, status =%d.\n", status);
1019 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
1021 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
1022 struct hclgevf_tqp *tqp;
1025 for (i = 0; i < kinfo->num_tqps; i++) {
1026 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
1027 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
1031 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
1033 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1035 ether_addr_copy(p, hdev->hw.mac.mac_addr);
1038 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
1041 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1042 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr;
1043 u8 *new_mac_addr = (u8 *)p;
1044 u8 msg_data[ETH_ALEN * 2];
1048 ether_addr_copy(msg_data, new_mac_addr);
1049 ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr);
1051 subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD :
1052 HCLGE_MBX_MAC_VLAN_UC_MODIFY;
1054 status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
1055 subcode, msg_data, ETH_ALEN * 2,
1058 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
1063 static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
1064 const unsigned char *addr)
1066 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1068 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
1069 HCLGE_MBX_MAC_VLAN_UC_ADD,
1070 addr, ETH_ALEN, false, NULL, 0);
1073 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
1074 const unsigned char *addr)
1076 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1078 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
1079 HCLGE_MBX_MAC_VLAN_UC_REMOVE,
1080 addr, ETH_ALEN, false, NULL, 0);
1083 static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
1084 const unsigned char *addr)
1086 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1088 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
1089 HCLGE_MBX_MAC_VLAN_MC_ADD,
1090 addr, ETH_ALEN, false, NULL, 0);
1093 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
1094 const unsigned char *addr)
1096 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1098 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
1099 HCLGE_MBX_MAC_VLAN_MC_REMOVE,
1100 addr, ETH_ALEN, false, NULL, 0);
1103 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
1104 __be16 proto, u16 vlan_id,
1107 #define HCLGEVF_VLAN_MBX_MSG_LEN 5
1108 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1109 u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN];
1114 if (proto != htons(ETH_P_8021Q))
1115 return -EPROTONOSUPPORT;
1117 msg_data[0] = is_kill;
1118 memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
1119 memcpy(&msg_data[3], &proto, sizeof(proto));
1120 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
1121 HCLGE_MBX_VLAN_FILTER, msg_data,
1122 HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
1125 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
1127 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1130 msg_data = enable ? 1 : 0;
1131 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
1132 HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data,
1136 static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
1138 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1142 memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
1144 /* disable vf queue before send queue reset msg to PF */
1145 ret = hclgevf_tqp_enable(hdev, queue_id, 0, false);
1149 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data,
1153 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
1155 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1157 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MTU, 0, (u8 *)&new_mtu,
1158 sizeof(new_mtu), true, NULL, 0);
1161 static int hclgevf_notify_client(struct hclgevf_dev *hdev,
1162 enum hnae3_reset_notify_type type)
1164 struct hnae3_client *client = hdev->nic_client;
1165 struct hnae3_handle *handle = &hdev->nic;
1168 if (!client->ops->reset_notify)
1171 ret = client->ops->reset_notify(handle, type);
1173 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
1179 static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev)
1181 struct hclgevf_dev *hdev = ae_dev->priv;
1183 set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
1186 static int hclgevf_flr_poll_timeout(struct hclgevf_dev *hdev,
1187 unsigned long delay_us,
1188 unsigned long wait_cnt)
1190 unsigned long cnt = 0;
1192 while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
1194 usleep_range(delay_us, delay_us * 2);
1196 if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
1197 dev_err(&hdev->pdev->dev,
1198 "flr wait timeout\n");
1205 static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
1207 #define HCLGEVF_RESET_WAIT_US 20000
1208 #define HCLGEVF_RESET_WAIT_CNT 2000
1209 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \
1210 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT)
1215 /* wait to check the hardware reset completion status */
1216 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
1217 dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val);
1219 if (hdev->reset_type == HNAE3_FLR_RESET)
1220 return hclgevf_flr_poll_timeout(hdev,
1221 HCLGEVF_RESET_WAIT_US,
1222 HCLGEVF_RESET_WAIT_CNT);
1224 ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val,
1225 !(val & HCLGEVF_RST_ING_BITS),
1226 HCLGEVF_RESET_WAIT_US,
1227 HCLGEVF_RESET_WAIT_TIMEOUT_US);
1229 /* hardware completion status should be available by this time */
1231 dev_err(&hdev->pdev->dev,
1232 "could'nt get reset done status from h/w, timeout!\n");
1236 /* we will wait a bit more to let reset of the stack to complete. This
1237 * might happen in case reset assertion was made by PF. Yes, this also
1238 * means we might end up waiting bit more even for VF reset.
1245 static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
1249 /* uninitialize the nic client */
1250 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
1254 /* re-initialize the hclge device */
1255 ret = hclgevf_reset_hdev(hdev);
1257 dev_err(&hdev->pdev->dev,
1258 "hclge device re-init failed, VF is disabled!\n");
1262 /* bring up the nic client again */
1263 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
1267 return hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT);
1270 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
1274 switch (hdev->reset_type) {
1275 case HNAE3_VF_FUNC_RESET:
1276 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
1277 0, true, NULL, sizeof(u8));
1279 case HNAE3_FLR_RESET:
1280 set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
1286 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
1288 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n",
1289 hdev->reset_type, ret);
1294 static int hclgevf_reset(struct hclgevf_dev *hdev)
1296 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1299 /* Initialize ae_dev reset status as well, in case enet layer wants to
1300 * know if device is undergoing reset
1302 ae_dev->reset_type = hdev->reset_type;
1303 hdev->reset_count++;
1306 /* bring down the nic to stop any ongoing TX/RX */
1307 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
1309 goto err_reset_lock;
1313 ret = hclgevf_reset_prepare_wait(hdev);
1317 /* check if VF could successfully fetch the hardware reset completion
1318 * status from the hardware
1320 ret = hclgevf_reset_wait(hdev);
1322 /* can't do much in this situation, will disable VF */
1323 dev_err(&hdev->pdev->dev,
1324 "VF failed(=%d) to fetch H/W reset completion status\n",
1331 /* now, re-initialize the nic client and ae device*/
1332 ret = hclgevf_reset_stack(hdev);
1334 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
1335 goto err_reset_lock;
1338 /* bring up the nic to enable TX/RX again */
1339 ret = hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
1341 goto err_reset_lock;
1345 hdev->last_reset_time = jiffies;
1346 ae_dev->reset_type = HNAE3_NONE_RESET;
1352 /* When VF reset failed, only the higher level reset asserted by PF
1353 * can restore it, so re-initialize the command queue to receive
1354 * this higher reset event.
1356 hclgevf_cmd_init(hdev);
1357 dev_err(&hdev->pdev->dev, "failed to reset VF\n");
1362 static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev,
1363 unsigned long *addr)
1365 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
1367 /* return the highest priority reset level amongst all */
1368 if (test_bit(HNAE3_VF_RESET, addr)) {
1369 rst_level = HNAE3_VF_RESET;
1370 clear_bit(HNAE3_VF_RESET, addr);
1371 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
1372 clear_bit(HNAE3_VF_FUNC_RESET, addr);
1373 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) {
1374 rst_level = HNAE3_VF_FULL_RESET;
1375 clear_bit(HNAE3_VF_FULL_RESET, addr);
1376 clear_bit(HNAE3_VF_FUNC_RESET, addr);
1377 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) {
1378 rst_level = HNAE3_VF_PF_FUNC_RESET;
1379 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
1380 clear_bit(HNAE3_VF_FUNC_RESET, addr);
1381 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) {
1382 rst_level = HNAE3_VF_FUNC_RESET;
1383 clear_bit(HNAE3_VF_FUNC_RESET, addr);
1384 } else if (test_bit(HNAE3_FLR_RESET, addr)) {
1385 rst_level = HNAE3_FLR_RESET;
1386 clear_bit(HNAE3_FLR_RESET, addr);
1392 static void hclgevf_reset_event(struct pci_dev *pdev,
1393 struct hnae3_handle *handle)
1395 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1396 struct hclgevf_dev *hdev = ae_dev->priv;
1398 dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
1400 if (hdev->default_reset_request)
1402 hclgevf_get_reset_level(hdev,
1403 &hdev->default_reset_request);
1405 hdev->reset_level = HNAE3_VF_FUNC_RESET;
1407 /* reset of this VF requested */
1408 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
1409 hclgevf_reset_task_schedule(hdev);
1411 hdev->last_reset_time = jiffies;
1414 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
1415 enum hnae3_reset_type rst_type)
1417 struct hclgevf_dev *hdev = ae_dev->priv;
1419 set_bit(rst_type, &hdev->default_reset_request);
1422 static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev)
1424 #define HCLGEVF_FLR_WAIT_MS 100
1425 #define HCLGEVF_FLR_WAIT_CNT 50
1426 struct hclgevf_dev *hdev = ae_dev->priv;
1429 clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
1430 clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
1431 set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
1432 hclgevf_reset_event(hdev->pdev, NULL);
1434 while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
1435 cnt++ < HCLGEVF_FLR_WAIT_CNT)
1436 msleep(HCLGEVF_FLR_WAIT_MS);
1438 if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
1439 dev_err(&hdev->pdev->dev,
1440 "flr wait down timeout: %d\n", cnt);
1443 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
1445 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1447 return hdev->fw_version;
1450 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
1452 struct hclgevf_misc_vector *vector = &hdev->misc_vector;
1454 vector->vector_irq = pci_irq_vector(hdev->pdev,
1455 HCLGEVF_MISC_VECTOR_NUM);
1456 vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
1457 /* vector status always valid for Vector 0 */
1458 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0;
1459 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq;
1461 hdev->num_msi_left -= 1;
1462 hdev->num_msi_used += 1;
1465 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
1467 if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) &&
1468 !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) {
1469 set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
1470 schedule_work(&hdev->rst_service_task);
1474 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
1476 if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) &&
1477 !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) {
1478 set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
1479 schedule_work(&hdev->mbx_service_task);
1483 static void hclgevf_task_schedule(struct hclgevf_dev *hdev)
1485 if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state) &&
1486 !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state))
1487 schedule_work(&hdev->service_task);
1490 static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev)
1492 /* if we have any pending mailbox event then schedule the mbx task */
1493 if (hdev->mbx_event_pending)
1494 hclgevf_mbx_task_schedule(hdev);
1496 if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state))
1497 hclgevf_reset_task_schedule(hdev);
1500 static void hclgevf_service_timer(struct timer_list *t)
1502 struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer);
1504 mod_timer(&hdev->service_timer, jiffies + 5 * HZ);
1506 hclgevf_task_schedule(hdev);
1509 static void hclgevf_reset_service_task(struct work_struct *work)
1511 struct hclgevf_dev *hdev =
1512 container_of(work, struct hclgevf_dev, rst_service_task);
1515 if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
1518 clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
1520 if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
1521 &hdev->reset_state)) {
1522 /* PF has initmated that it is about to reset the hardware.
1523 * We now have to poll & check if harware has actually completed
1524 * the reset sequence. On hardware reset completion, VF needs to
1525 * reset the client and ae device.
1527 hdev->reset_attempts = 0;
1529 hdev->last_reset_time = jiffies;
1530 while ((hdev->reset_type =
1531 hclgevf_get_reset_level(hdev, &hdev->reset_pending))
1532 != HNAE3_NONE_RESET) {
1533 ret = hclgevf_reset(hdev);
1535 dev_err(&hdev->pdev->dev,
1536 "VF stack reset failed %d.\n", ret);
1538 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
1539 &hdev->reset_state)) {
1540 /* we could be here when either of below happens:
1541 * 1. reset was initiated due to watchdog timeout due to
1542 * a. IMP was earlier reset and our TX got choked down and
1543 * which resulted in watchdog reacting and inducing VF
1544 * reset. This also means our cmdq would be unreliable.
1545 * b. problem in TX due to other lower layer(example link
1546 * layer not functioning properly etc.)
1547 * 2. VF reset might have been initiated due to some config
1550 * NOTE: Theres no clear way to detect above cases than to react
1551 * to the response of PF for this reset request. PF will ack the
1552 * 1b and 2. cases but we will not get any intimation about 1a
1553 * from PF as cmdq would be in unreliable state i.e. mailbox
1554 * communication between PF and VF would be broken.
1557 /* if we are never geting into pending state it means either:
1558 * 1. PF is not receiving our request which could be due to IMP
1561 * We cannot do much for 2. but to check first we can try reset
1562 * our PCIe + stack and see if it alleviates the problem.
1564 if (hdev->reset_attempts > 3) {
1565 /* prepare for full reset of stack + pcie interface */
1566 set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
1568 /* "defer" schedule the reset task again */
1569 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1571 hdev->reset_attempts++;
1573 set_bit(hdev->reset_level, &hdev->reset_pending);
1574 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1576 hclgevf_reset_task_schedule(hdev);
1579 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
1582 static void hclgevf_mailbox_service_task(struct work_struct *work)
1584 struct hclgevf_dev *hdev;
1586 hdev = container_of(work, struct hclgevf_dev, mbx_service_task);
1588 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
1591 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
1593 hclgevf_mbx_async_handler(hdev);
1595 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
1598 static void hclgevf_keep_alive_timer(struct timer_list *t)
1600 struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer);
1602 schedule_work(&hdev->keep_alive_task);
1603 mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ);
1606 static void hclgevf_keep_alive_task(struct work_struct *work)
1608 struct hclgevf_dev *hdev;
1612 hdev = container_of(work, struct hclgevf_dev, keep_alive_task);
1613 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL,
1614 0, false, &respmsg, sizeof(u8));
1616 dev_err(&hdev->pdev->dev,
1617 "VF sends keep alive cmd failed(=%d)\n", ret);
1620 static void hclgevf_service_task(struct work_struct *work)
1622 struct hclgevf_dev *hdev;
1624 hdev = container_of(work, struct hclgevf_dev, service_task);
1626 /* request the link status from the PF. PF would be able to tell VF
1627 * about such updates in future so we might remove this later
1629 hclgevf_request_link_info(hdev);
1631 hclgevf_deferred_task_schedule(hdev);
1633 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
1636 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
1638 hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
1641 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
1644 u32 cmdq_src_reg, rst_ing_reg;
1646 /* fetch the events from their corresponding regs */
1647 cmdq_src_reg = hclgevf_read_dev(&hdev->hw,
1648 HCLGEVF_VECTOR0_CMDQ_SRC_REG);
1650 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_src_reg) {
1651 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
1652 dev_info(&hdev->pdev->dev,
1653 "receive reset interrupt 0x%x!\n", rst_ing_reg);
1654 set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
1655 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1656 set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
1657 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B);
1658 *clearval = cmdq_src_reg;
1659 return HCLGEVF_VECTOR0_EVENT_RST;
1662 /* check for vector0 mailbox(=CMDQ RX) event source */
1663 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
1664 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
1665 *clearval = cmdq_src_reg;
1666 return HCLGEVF_VECTOR0_EVENT_MBX;
1669 dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n");
1671 return HCLGEVF_VECTOR0_EVENT_OTHER;
1674 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
1676 writel(en ? 1 : 0, vector->addr);
1679 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
1681 enum hclgevf_evt_cause event_cause;
1682 struct hclgevf_dev *hdev = data;
1685 hclgevf_enable_vector(&hdev->misc_vector, false);
1686 event_cause = hclgevf_check_evt_cause(hdev, &clearval);
1688 switch (event_cause) {
1689 case HCLGEVF_VECTOR0_EVENT_RST:
1690 hclgevf_reset_task_schedule(hdev);
1692 case HCLGEVF_VECTOR0_EVENT_MBX:
1693 hclgevf_mbx_handler(hdev);
1699 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) {
1700 hclgevf_clear_event_cause(hdev, clearval);
1701 hclgevf_enable_vector(&hdev->misc_vector, true);
1707 static int hclgevf_configure(struct hclgevf_dev *hdev)
1711 hdev->hw.mac.media_type = HNAE3_MEDIA_TYPE_NONE;
1713 /* get queue configuration from PF */
1714 ret = hclgevf_get_queue_info(hdev);
1717 /* get tc configuration from PF */
1718 return hclgevf_get_tc_info(hdev);
1721 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev)
1723 struct pci_dev *pdev = ae_dev->pdev;
1724 struct hclgevf_dev *hdev;
1726 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
1731 hdev->ae_dev = ae_dev;
1732 ae_dev->priv = hdev;
1737 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
1739 struct hnae3_handle *roce = &hdev->roce;
1740 struct hnae3_handle *nic = &hdev->nic;
1742 roce->rinfo.num_vectors = hdev->num_roce_msix;
1744 if (hdev->num_msi_left < roce->rinfo.num_vectors ||
1745 hdev->num_msi_left == 0)
1748 roce->rinfo.base_vector = hdev->roce_base_vector;
1750 roce->rinfo.netdev = nic->kinfo.netdev;
1751 roce->rinfo.roce_io_base = hdev->hw.io_base;
1753 roce->pdev = nic->pdev;
1754 roce->ae_algo = nic->ae_algo;
1755 roce->numa_node_mask = nic->numa_node_mask;
1760 static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en)
1762 struct hclgevf_cfg_gro_status_cmd *req;
1763 struct hclgevf_desc desc;
1766 if (!hnae3_dev_gro_supported(hdev))
1769 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG,
1771 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
1773 req->gro_en = cpu_to_le16(en ? 1 : 0);
1775 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
1777 dev_err(&hdev->pdev->dev,
1778 "VF GRO hardware config cmd failed, ret = %d.\n", ret);
1783 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
1785 struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
1788 rss_cfg->rss_size = hdev->rss_size_max;
1790 if (hdev->pdev->revision >= 0x21) {
1791 rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
1792 netdev_rss_key_fill(rss_cfg->rss_hash_key,
1793 HCLGEVF_RSS_KEY_SIZE);
1795 ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
1796 rss_cfg->rss_hash_key);
1800 rss_cfg->rss_tuple_sets.ipv4_tcp_en =
1801 HCLGEVF_RSS_INPUT_TUPLE_OTHER;
1802 rss_cfg->rss_tuple_sets.ipv4_udp_en =
1803 HCLGEVF_RSS_INPUT_TUPLE_OTHER;
1804 rss_cfg->rss_tuple_sets.ipv4_sctp_en =
1805 HCLGEVF_RSS_INPUT_TUPLE_SCTP;
1806 rss_cfg->rss_tuple_sets.ipv4_fragment_en =
1807 HCLGEVF_RSS_INPUT_TUPLE_OTHER;
1808 rss_cfg->rss_tuple_sets.ipv6_tcp_en =
1809 HCLGEVF_RSS_INPUT_TUPLE_OTHER;
1810 rss_cfg->rss_tuple_sets.ipv6_udp_en =
1811 HCLGEVF_RSS_INPUT_TUPLE_OTHER;
1812 rss_cfg->rss_tuple_sets.ipv6_sctp_en =
1813 HCLGEVF_RSS_INPUT_TUPLE_SCTP;
1814 rss_cfg->rss_tuple_sets.ipv6_fragment_en =
1815 HCLGEVF_RSS_INPUT_TUPLE_OTHER;
1817 ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg);
1823 /* Initialize RSS indirect table for each vport */
1824 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
1825 rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max;
1827 ret = hclgevf_set_rss_indir_table(hdev);
1831 return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max);
1834 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
1836 /* other vlan config(like, VLAN TX/RX offload) would also be added
1839 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
1843 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable)
1845 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1848 mod_timer(&hdev->service_timer, jiffies + HZ);
1850 del_timer_sync(&hdev->service_timer);
1851 cancel_work_sync(&hdev->service_task);
1852 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
1856 static int hclgevf_ae_start(struct hnae3_handle *handle)
1858 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1860 /* reset tqp stats */
1861 hclgevf_reset_tqp_stats(handle);
1863 hclgevf_request_link_info(hdev);
1865 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1870 static void hclgevf_ae_stop(struct hnae3_handle *handle)
1872 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1875 set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1877 for (i = 0; i < handle->kinfo.num_tqps; i++)
1878 hclgevf_reset_tqp(handle, i);
1880 /* reset tqp stats */
1881 hclgevf_reset_tqp_stats(handle);
1882 hclgevf_update_link_status(hdev, 0);
1885 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
1887 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1890 msg_data = alive ? 1 : 0;
1891 return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_ALIVE,
1892 0, &msg_data, 1, false, NULL, 0);
1895 static int hclgevf_client_start(struct hnae3_handle *handle)
1897 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1899 mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ);
1900 return hclgevf_set_alive(handle, true);
1903 static void hclgevf_client_stop(struct hnae3_handle *handle)
1905 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1908 ret = hclgevf_set_alive(handle, false);
1910 dev_warn(&hdev->pdev->dev,
1911 "%s failed %d\n", __func__, ret);
1913 del_timer_sync(&hdev->keep_alive_timer);
1914 cancel_work_sync(&hdev->keep_alive_task);
1917 static void hclgevf_state_init(struct hclgevf_dev *hdev)
1919 /* setup tasks for the MBX */
1920 INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task);
1921 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
1922 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
1924 /* setup tasks for service timer */
1925 timer_setup(&hdev->service_timer, hclgevf_service_timer, 0);
1927 INIT_WORK(&hdev->service_task, hclgevf_service_task);
1928 clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
1930 INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task);
1932 mutex_init(&hdev->mbx_resp.mbx_mutex);
1934 /* bring the device down */
1935 set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1938 static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
1940 set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1942 if (hdev->service_timer.function)
1943 del_timer_sync(&hdev->service_timer);
1944 if (hdev->service_task.func)
1945 cancel_work_sync(&hdev->service_task);
1946 if (hdev->mbx_service_task.func)
1947 cancel_work_sync(&hdev->mbx_service_task);
1948 if (hdev->rst_service_task.func)
1949 cancel_work_sync(&hdev->rst_service_task);
1951 mutex_destroy(&hdev->mbx_resp.mbx_mutex);
1954 static int hclgevf_init_msi(struct hclgevf_dev *hdev)
1956 struct pci_dev *pdev = hdev->pdev;
1960 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B))
1961 vectors = pci_alloc_irq_vectors(pdev,
1962 hdev->roce_base_msix_offset + 1,
1966 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1967 PCI_IRQ_MSI | PCI_IRQ_MSIX);
1971 "failed(%d) to allocate MSI/MSI-X vectors\n",
1975 if (vectors < hdev->num_msi)
1976 dev_warn(&hdev->pdev->dev,
1977 "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1978 hdev->num_msi, vectors);
1980 hdev->num_msi = vectors;
1981 hdev->num_msi_left = vectors;
1982 hdev->base_msi_vector = pdev->irq;
1983 hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset;
1985 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1986 sizeof(u16), GFP_KERNEL);
1987 if (!hdev->vector_status) {
1988 pci_free_irq_vectors(pdev);
1992 for (i = 0; i < hdev->num_msi; i++)
1993 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT;
1995 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1996 sizeof(int), GFP_KERNEL);
1997 if (!hdev->vector_irq) {
1998 devm_kfree(&pdev->dev, hdev->vector_status);
1999 pci_free_irq_vectors(pdev);
2006 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
2008 struct pci_dev *pdev = hdev->pdev;
2010 devm_kfree(&pdev->dev, hdev->vector_status);
2011 devm_kfree(&pdev->dev, hdev->vector_irq);
2012 pci_free_irq_vectors(pdev);
2015 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
2019 hclgevf_get_misc_vector(hdev);
2021 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
2022 0, "hclgevf_cmd", hdev);
2024 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
2025 hdev->misc_vector.vector_irq);
2029 hclgevf_clear_event_cause(hdev, 0);
2031 /* enable misc. vector(vector 0) */
2032 hclgevf_enable_vector(&hdev->misc_vector, true);
2037 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
2039 /* disable misc vector(vector 0) */
2040 hclgevf_enable_vector(&hdev->misc_vector, false);
2041 synchronize_irq(hdev->misc_vector.vector_irq);
2042 free_irq(hdev->misc_vector.vector_irq, hdev);
2043 hclgevf_free_vector(hdev, 0);
2046 static int hclgevf_init_client_instance(struct hnae3_client *client,
2047 struct hnae3_ae_dev *ae_dev)
2049 struct hclgevf_dev *hdev = ae_dev->priv;
2052 switch (client->type) {
2053 case HNAE3_CLIENT_KNIC:
2054 hdev->nic_client = client;
2055 hdev->nic.client = client;
2057 ret = client->ops->init_instance(&hdev->nic);
2061 hnae3_set_client_init_flag(client, ae_dev, 1);
2063 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
2064 struct hnae3_client *rc = hdev->roce_client;
2066 ret = hclgevf_init_roce_base_info(hdev);
2069 ret = rc->ops->init_instance(&hdev->roce);
2073 hnae3_set_client_init_flag(hdev->roce_client, ae_dev,
2077 case HNAE3_CLIENT_UNIC:
2078 hdev->nic_client = client;
2079 hdev->nic.client = client;
2081 ret = client->ops->init_instance(&hdev->nic);
2085 hnae3_set_client_init_flag(client, ae_dev, 1);
2087 case HNAE3_CLIENT_ROCE:
2088 if (hnae3_dev_roce_supported(hdev)) {
2089 hdev->roce_client = client;
2090 hdev->roce.client = client;
2093 if (hdev->roce_client && hdev->nic_client) {
2094 ret = hclgevf_init_roce_base_info(hdev);
2098 ret = client->ops->init_instance(&hdev->roce);
2103 hnae3_set_client_init_flag(client, ae_dev, 1);
2112 hdev->nic_client = NULL;
2113 hdev->nic.client = NULL;
2116 hdev->roce_client = NULL;
2117 hdev->roce.client = NULL;
2121 static void hclgevf_uninit_client_instance(struct hnae3_client *client,
2122 struct hnae3_ae_dev *ae_dev)
2124 struct hclgevf_dev *hdev = ae_dev->priv;
2126 /* un-init roce, if it exists */
2127 if (hdev->roce_client) {
2128 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
2129 hdev->roce_client = NULL;
2130 hdev->roce.client = NULL;
2133 /* un-init nic/unic, if this was not called by roce client */
2134 if (client->ops->uninit_instance && hdev->nic_client &&
2135 client->type != HNAE3_CLIENT_ROCE) {
2136 client->ops->uninit_instance(&hdev->nic, 0);
2137 hdev->nic_client = NULL;
2138 hdev->nic.client = NULL;
2142 static int hclgevf_pci_init(struct hclgevf_dev *hdev)
2144 struct pci_dev *pdev = hdev->pdev;
2145 struct hclgevf_hw *hw;
2148 ret = pci_enable_device(pdev);
2150 dev_err(&pdev->dev, "failed to enable PCI device\n");
2154 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2156 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
2157 goto err_disable_device;
2160 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME);
2162 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
2163 goto err_disable_device;
2166 pci_set_master(pdev);
2169 hw->io_base = pci_iomap(pdev, 2, 0);
2171 dev_err(&pdev->dev, "can't map configuration register space\n");
2173 goto err_clr_master;
2179 pci_clear_master(pdev);
2180 pci_release_regions(pdev);
2182 pci_disable_device(pdev);
2187 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
2189 struct pci_dev *pdev = hdev->pdev;
2191 pci_iounmap(pdev, hdev->hw.io_base);
2192 pci_clear_master(pdev);
2193 pci_release_regions(pdev);
2194 pci_disable_device(pdev);
2197 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
2199 struct hclgevf_query_res_cmd *req;
2200 struct hclgevf_desc desc;
2203 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true);
2204 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
2206 dev_err(&hdev->pdev->dev,
2207 "query vf resource failed, ret = %d.\n", ret);
2211 req = (struct hclgevf_query_res_cmd *)desc.data;
2213 if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) {
2214 hdev->roce_base_msix_offset =
2215 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
2216 HCLGEVF_MSIX_OFT_ROCEE_M,
2217 HCLGEVF_MSIX_OFT_ROCEE_S);
2218 hdev->num_roce_msix =
2219 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
2220 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
2222 /* VF should have NIC vectors and Roce vectors, NIC vectors
2223 * are queued before Roce vectors. The offset is fixed to 64.
2225 hdev->num_msi = hdev->num_roce_msix +
2226 hdev->roce_base_msix_offset;
2229 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
2230 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
2236 static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
2238 struct pci_dev *pdev = hdev->pdev;
2241 if (hdev->reset_type == HNAE3_VF_FULL_RESET &&
2242 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
2243 hclgevf_misc_irq_uninit(hdev);
2244 hclgevf_uninit_msi(hdev);
2245 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
2248 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
2249 pci_set_master(pdev);
2250 ret = hclgevf_init_msi(hdev);
2253 "failed(%d) to init MSI/MSI-X\n", ret);
2257 ret = hclgevf_misc_irq_init(hdev);
2259 hclgevf_uninit_msi(hdev);
2260 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
2265 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
2271 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
2273 struct pci_dev *pdev = hdev->pdev;
2276 ret = hclgevf_pci_reset(hdev);
2278 dev_err(&pdev->dev, "pci reset failed %d\n", ret);
2282 ret = hclgevf_cmd_init(hdev);
2284 dev_err(&pdev->dev, "cmd failed %d\n", ret);
2288 ret = hclgevf_rss_init_hw(hdev);
2290 dev_err(&hdev->pdev->dev,
2291 "failed(%d) to initialize RSS\n", ret);
2295 ret = hclgevf_config_gro(hdev, true);
2299 ret = hclgevf_init_vlan_config(hdev);
2301 dev_err(&hdev->pdev->dev,
2302 "failed(%d) to initialize VLAN config\n", ret);
2306 dev_info(&hdev->pdev->dev, "Reset done\n");
2311 static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
2313 struct pci_dev *pdev = hdev->pdev;
2316 ret = hclgevf_pci_init(hdev);
2318 dev_err(&pdev->dev, "PCI initialization failed\n");
2322 ret = hclgevf_cmd_queue_init(hdev);
2324 dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret);
2325 goto err_cmd_queue_init;
2328 ret = hclgevf_cmd_init(hdev);
2332 /* Get vf resource */
2333 ret = hclgevf_query_vf_resource(hdev);
2335 dev_err(&hdev->pdev->dev,
2336 "Query vf status error, ret = %d.\n", ret);
2340 ret = hclgevf_init_msi(hdev);
2342 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
2346 hclgevf_state_init(hdev);
2347 hdev->reset_level = HNAE3_VF_FUNC_RESET;
2349 ret = hclgevf_misc_irq_init(hdev);
2351 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
2353 goto err_misc_irq_init;
2356 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
2358 ret = hclgevf_configure(hdev);
2360 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
2364 ret = hclgevf_alloc_tqps(hdev);
2366 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret);
2370 ret = hclgevf_set_handle_info(hdev);
2372 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret);
2376 ret = hclgevf_config_gro(hdev, true);
2380 /* Initialize RSS for this VF */
2381 ret = hclgevf_rss_init_hw(hdev);
2383 dev_err(&hdev->pdev->dev,
2384 "failed(%d) to initialize RSS\n", ret);
2388 ret = hclgevf_init_vlan_config(hdev);
2390 dev_err(&hdev->pdev->dev,
2391 "failed(%d) to initialize VLAN config\n", ret);
2395 hdev->last_reset_time = jiffies;
2396 pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME);
2401 hclgevf_misc_irq_uninit(hdev);
2403 hclgevf_state_uninit(hdev);
2404 hclgevf_uninit_msi(hdev);
2406 hclgevf_cmd_uninit(hdev);
2408 hclgevf_pci_uninit(hdev);
2409 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
2413 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
2415 hclgevf_state_uninit(hdev);
2417 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
2418 hclgevf_misc_irq_uninit(hdev);
2419 hclgevf_uninit_msi(hdev);
2422 hclgevf_pci_uninit(hdev);
2423 hclgevf_cmd_uninit(hdev);
2426 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
2428 struct pci_dev *pdev = ae_dev->pdev;
2429 struct hclgevf_dev *hdev;
2432 ret = hclgevf_alloc_hdev(ae_dev);
2434 dev_err(&pdev->dev, "hclge device allocation failed\n");
2438 ret = hclgevf_init_hdev(ae_dev->priv);
2440 dev_err(&pdev->dev, "hclge device initialization failed\n");
2444 hdev = ae_dev->priv;
2445 timer_setup(&hdev->keep_alive_timer, hclgevf_keep_alive_timer, 0);
2446 INIT_WORK(&hdev->keep_alive_task, hclgevf_keep_alive_task);
2451 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
2453 struct hclgevf_dev *hdev = ae_dev->priv;
2455 hclgevf_uninit_hdev(hdev);
2456 ae_dev->priv = NULL;
2459 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
2461 struct hnae3_handle *nic = &hdev->nic;
2462 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
2464 return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
2468 * hclgevf_get_channels - Get the current channels enabled and max supported.
2469 * @handle: hardware information for network interface
2470 * @ch: ethtool channels structure
2472 * We don't support separate tx and rx queues as channels. The other count
2473 * represents how many queues are being used for control. max_combined counts
2474 * how many queue pairs we can support. They may not be mapped 1 to 1 with
2475 * q_vectors since we support a lot more queue pairs than q_vectors.
2477 static void hclgevf_get_channels(struct hnae3_handle *handle,
2478 struct ethtool_channels *ch)
2480 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2482 ch->max_combined = hclgevf_get_max_channels(hdev);
2483 ch->other_count = 0;
2485 ch->combined_count = hdev->num_tqps;
2488 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle,
2489 u16 *alloc_tqps, u16 *max_rss_size)
2491 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2493 *alloc_tqps = hdev->num_tqps;
2494 *max_rss_size = hdev->rss_size_max;
2497 static int hclgevf_get_status(struct hnae3_handle *handle)
2499 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2501 return hdev->hw.mac.link;
2504 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle,
2505 u8 *auto_neg, u32 *speed,
2508 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2511 *speed = hdev->hw.mac.speed;
2513 *duplex = hdev->hw.mac.duplex;
2515 *auto_neg = AUTONEG_DISABLE;
2518 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
2521 hdev->hw.mac.speed = speed;
2522 hdev->hw.mac.duplex = duplex;
2525 static int hclgevf_gro_en(struct hnae3_handle *handle, int enable)
2527 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2529 return hclgevf_config_gro(hdev, enable);
2532 static void hclgevf_get_media_type(struct hnae3_handle *handle,
2535 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2537 *media_type = hdev->hw.mac.media_type;
2540 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle)
2542 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2544 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
2547 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle)
2549 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2551 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
2554 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle)
2556 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2558 return hdev->reset_count;
2561 #define MAX_SEPARATE_NUM 4
2562 #define SEPARATOR_VALUE 0xFFFFFFFF
2563 #define REG_NUM_PER_LINE 4
2564 #define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
2566 static int hclgevf_get_regs_len(struct hnae3_handle *handle)
2568 int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
2569 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2571 cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
2572 common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
2573 ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
2574 tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
2576 return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps +
2577 tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE;
2580 static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
2583 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
2584 int i, j, reg_um, separator_num;
2587 *version = hdev->fw_version;
2589 /* fetching per-VF registers values from VF PCIe register space */
2590 reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
2591 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
2592 for (i = 0; i < reg_um; i++)
2593 *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
2594 for (i = 0; i < separator_num; i++)
2595 *reg++ = SEPARATOR_VALUE;
2597 reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
2598 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
2599 for (i = 0; i < reg_um; i++)
2600 *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]);
2601 for (i = 0; i < separator_num; i++)
2602 *reg++ = SEPARATOR_VALUE;
2604 reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
2605 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
2606 for (j = 0; j < hdev->num_tqps; j++) {
2607 for (i = 0; i < reg_um; i++)
2608 *reg++ = hclgevf_read_dev(&hdev->hw,
2609 ring_reg_addr_list[i] +
2611 for (i = 0; i < separator_num; i++)
2612 *reg++ = SEPARATOR_VALUE;
2615 reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
2616 separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
2617 for (j = 0; j < hdev->num_msi_used - 1; j++) {
2618 for (i = 0; i < reg_um; i++)
2619 *reg++ = hclgevf_read_dev(&hdev->hw,
2620 tqp_intr_reg_addr_list[i] +
2622 for (i = 0; i < separator_num; i++)
2623 *reg++ = SEPARATOR_VALUE;
2627 static const struct hnae3_ae_ops hclgevf_ops = {
2628 .init_ae_dev = hclgevf_init_ae_dev,
2629 .uninit_ae_dev = hclgevf_uninit_ae_dev,
2630 .flr_prepare = hclgevf_flr_prepare,
2631 .flr_done = hclgevf_flr_done,
2632 .init_client_instance = hclgevf_init_client_instance,
2633 .uninit_client_instance = hclgevf_uninit_client_instance,
2634 .start = hclgevf_ae_start,
2635 .stop = hclgevf_ae_stop,
2636 .client_start = hclgevf_client_start,
2637 .client_stop = hclgevf_client_stop,
2638 .map_ring_to_vector = hclgevf_map_ring_to_vector,
2639 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
2640 .get_vector = hclgevf_get_vector,
2641 .put_vector = hclgevf_put_vector,
2642 .reset_queue = hclgevf_reset_tqp,
2643 .set_promisc_mode = hclgevf_set_promisc_mode,
2644 .get_mac_addr = hclgevf_get_mac_addr,
2645 .set_mac_addr = hclgevf_set_mac_addr,
2646 .add_uc_addr = hclgevf_add_uc_addr,
2647 .rm_uc_addr = hclgevf_rm_uc_addr,
2648 .add_mc_addr = hclgevf_add_mc_addr,
2649 .rm_mc_addr = hclgevf_rm_mc_addr,
2650 .get_stats = hclgevf_get_stats,
2651 .update_stats = hclgevf_update_stats,
2652 .get_strings = hclgevf_get_strings,
2653 .get_sset_count = hclgevf_get_sset_count,
2654 .get_rss_key_size = hclgevf_get_rss_key_size,
2655 .get_rss_indir_size = hclgevf_get_rss_indir_size,
2656 .get_rss = hclgevf_get_rss,
2657 .set_rss = hclgevf_set_rss,
2658 .get_rss_tuple = hclgevf_get_rss_tuple,
2659 .set_rss_tuple = hclgevf_set_rss_tuple,
2660 .get_tc_size = hclgevf_get_tc_size,
2661 .get_fw_version = hclgevf_get_fw_version,
2662 .set_vlan_filter = hclgevf_set_vlan_filter,
2663 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag,
2664 .reset_event = hclgevf_reset_event,
2665 .set_default_reset_request = hclgevf_set_def_reset_request,
2666 .get_channels = hclgevf_get_channels,
2667 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
2668 .get_regs_len = hclgevf_get_regs_len,
2669 .get_regs = hclgevf_get_regs,
2670 .get_status = hclgevf_get_status,
2671 .get_ksettings_an_result = hclgevf_get_ksettings_an_result,
2672 .get_media_type = hclgevf_get_media_type,
2673 .get_hw_reset_stat = hclgevf_get_hw_reset_stat,
2674 .ae_dev_resetting = hclgevf_ae_dev_resetting,
2675 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt,
2676 .set_gro_en = hclgevf_gro_en,
2677 .set_mtu = hclgevf_set_mtu,
2678 .get_global_queue_id = hclgevf_get_qid_global,
2679 .set_timer_task = hclgevf_set_timer_task,
2682 static struct hnae3_ae_algo ae_algovf = {
2683 .ops = &hclgevf_ops,
2684 .pdev_id_table = ae_algovf_pci_tbl,
2687 static int hclgevf_init(void)
2689 pr_info("%s is initializing\n", HCLGEVF_NAME);
2691 hnae3_register_ae_algo(&ae_algovf);
2696 static void hclgevf_exit(void)
2698 hnae3_unregister_ae_algo(&ae_algovf);
2700 module_init(hclgevf_init);
2701 module_exit(hclgevf_exit);
2703 MODULE_LICENSE("GPL");
2704 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
2705 MODULE_DESCRIPTION("HCLGEVF Driver");
2706 MODULE_VERSION(HCLGEVF_MOD_VERSION);