1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/etherdevice.h>
7 #include "hclge_main.h"
10 enum hclge_shaper_level {
11 HCLGE_SHAPER_LVL_PRI = 0,
12 HCLGE_SHAPER_LVL_PG = 1,
13 HCLGE_SHAPER_LVL_PORT = 2,
14 HCLGE_SHAPER_LVL_QSET = 3,
15 HCLGE_SHAPER_LVL_CNT = 4,
16 HCLGE_SHAPER_LVL_VF = 0,
17 HCLGE_SHAPER_LVL_PF = 1,
20 #define HCLGE_TM_PFC_PKT_GET_CMD_NUM 3
21 #define HCLGE_TM_PFC_NUM_GET_PER_CMD 3
23 #define HCLGE_SHAPER_BS_U_DEF 5
24 #define HCLGE_SHAPER_BS_S_DEF 20
26 /* hclge_shaper_para_calc: calculate ir parameter for the shaper
27 * @ir: Rate to be config, its unit is Mbps
28 * @shaper_level: the shaper level. eg: port, pg, priority, queueset
29 * @ir_para: parameters of IR shaper
30 * @max_tm_rate: max tm rate is available to config
34 * IR_b * (2 ^ IR_u) * 8
35 * IR(Mbps) = ------------------------- * CLOCK(1000Mbps)
38 * @return: 0: calculate sucessful, negative: fail
40 static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
41 struct hclge_shaper_ir_para *ir_para,
44 #define DEFAULT_SHAPER_IR_B 126
45 #define DIVISOR_CLK (1000 * 8)
46 #define DEFAULT_DIVISOR_IR_B (DEFAULT_SHAPER_IR_B * DIVISOR_CLK)
48 static const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
49 6 * 256, /* Prioriy level */
50 6 * 32, /* Prioriy group level */
51 6 * 8, /* Port level */
52 6 * 256 /* Qset level */
60 if (shaper_level >= HCLGE_SHAPER_LVL_CNT ||
64 tick = tick_array[shaper_level];
67 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
68 * the formula is changed to:
70 * ir_calc = ---------------- * 1000
73 ir_calc = (DEFAULT_DIVISOR_IR_B + (tick >> 1) - 1) / tick;
76 ir_para->ir_b = DEFAULT_SHAPER_IR_B;
81 } else if (ir_calc > ir) {
82 /* Increasing the denominator to select ir_s value */
83 while (ir_calc >= ir && ir) {
85 ir_calc = DEFAULT_DIVISOR_IR_B /
86 (tick * (1 << ir_s_calc));
89 ir_para->ir_b = (ir * tick * (1 << ir_s_calc) +
90 (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
92 /* Increasing the numerator to select ir_u value */
95 while (ir_calc < ir) {
97 numerator = DEFAULT_DIVISOR_IR_B * (1 << ir_u_calc);
98 ir_calc = (numerator + (tick >> 1)) / tick;
102 ir_para->ir_b = DEFAULT_SHAPER_IR_B;
104 u32 denominator = DIVISOR_CLK * (1 << --ir_u_calc);
105 ir_para->ir_b = (ir * tick + (denominator >> 1)) /
110 ir_para->ir_u = ir_u_calc;
111 ir_para->ir_s = ir_s_calc;
116 static int hclge_pfc_stats_get(struct hclge_dev *hdev,
117 enum hclge_opcode_type opcode, u64 *stats)
119 struct hclge_desc desc[HCLGE_TM_PFC_PKT_GET_CMD_NUM];
122 if (!(opcode == HCLGE_OPC_QUERY_PFC_RX_PKT_CNT ||
123 opcode == HCLGE_OPC_QUERY_PFC_TX_PKT_CNT))
126 for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1; i++) {
127 hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
128 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
131 hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
133 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM);
137 for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) {
138 struct hclge_pfc_stats_cmd *pfc_stats =
139 (struct hclge_pfc_stats_cmd *)desc[i].data;
141 for (j = 0; j < HCLGE_TM_PFC_NUM_GET_PER_CMD; j++) {
142 u32 index = i * HCLGE_TM_PFC_PKT_GET_CMD_NUM + j;
144 if (index < HCLGE_MAX_TC_NUM)
146 le64_to_cpu(pfc_stats->pkt_num[j]);
152 int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats)
154 return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_RX_PKT_CNT, stats);
157 int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats)
159 return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_TX_PKT_CNT, stats);
162 int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
164 struct hclge_desc desc;
166 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false);
168 desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) |
169 (rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0));
171 return hclge_cmd_send(&hdev->hw, &desc, 1);
174 static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
177 struct hclge_desc desc;
178 struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data;
180 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false);
182 pfc->tx_rx_en_bitmap = tx_rx_bitmap;
183 pfc->pri_en_bitmap = pfc_bitmap;
185 return hclge_cmd_send(&hdev->hw, &desc, 1);
188 static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
189 u8 pause_trans_gap, u16 pause_trans_time)
191 struct hclge_cfg_pause_param_cmd *pause_param;
192 struct hclge_desc desc;
194 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
196 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
198 ether_addr_copy(pause_param->mac_addr, addr);
199 ether_addr_copy(pause_param->mac_addr_extra, addr);
200 pause_param->pause_trans_gap = pause_trans_gap;
201 pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
203 return hclge_cmd_send(&hdev->hw, &desc, 1);
206 int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
208 struct hclge_cfg_pause_param_cmd *pause_param;
209 struct hclge_desc desc;
214 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
216 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
218 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
222 trans_gap = pause_param->pause_trans_gap;
223 trans_time = le16_to_cpu(pause_param->pause_trans_time);
225 return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time);
228 static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
232 tc = hdev->tm_info.prio_tc[pri_id];
234 if (tc >= hdev->tm_info.num_tc)
238 * the register for priority has four bytes, the first bytes includes
239 * priority0 and priority1, the higher 4bit stands for priority1
240 * while the lower 4bit stands for priority0, as below:
241 * first byte: | pri_1 | pri_0 |
242 * second byte: | pri_3 | pri_2 |
243 * third byte: | pri_5 | pri_4 |
244 * fourth byte: | pri_7 | pri_6 |
246 pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4);
251 static int hclge_up_to_tc_map(struct hclge_dev *hdev)
253 struct hclge_desc desc;
254 u8 *pri = (u8 *)desc.data;
258 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
260 for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
261 ret = hclge_fill_pri_array(hdev, pri, pri_id);
266 return hclge_cmd_send(&hdev->hw, &desc, 1);
269 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
270 u8 pg_id, u8 pri_bit_map)
272 struct hclge_pg_to_pri_link_cmd *map;
273 struct hclge_desc desc;
275 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false);
277 map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
280 map->pri_bit_map = pri_bit_map;
282 return hclge_cmd_send(&hdev->hw, &desc, 1);
285 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
288 struct hclge_qs_to_pri_link_cmd *map;
289 struct hclge_desc desc;
291 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false);
293 map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
295 map->qs_id = cpu_to_le16(qs_id);
297 map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK;
299 return hclge_cmd_send(&hdev->hw, &desc, 1);
302 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
305 struct hclge_nq_to_qs_link_cmd *map;
306 struct hclge_desc desc;
310 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false);
312 map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
314 map->nq_id = cpu_to_le16(q_id);
316 /* convert qs_id to the following format to support qset_id >= 1024
317 * qs_id: | 15 | 14 ~ 10 | 9 ~ 0 |
320 * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 |
321 * | qs_id_h | vld | qs_id_l |
323 qs_id_l = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_L_MSK,
325 qs_id_h = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_H_MSK,
327 hnae3_set_field(qs_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S,
329 hnae3_set_field(qs_id, HCLGE_TM_QS_ID_H_EXT_MSK, HCLGE_TM_QS_ID_H_EXT_S,
331 map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK);
333 return hclge_cmd_send(&hdev->hw, &desc, 1);
336 static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id,
339 struct hclge_pg_weight_cmd *weight;
340 struct hclge_desc desc;
342 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false);
344 weight = (struct hclge_pg_weight_cmd *)desc.data;
346 weight->pg_id = pg_id;
349 return hclge_cmd_send(&hdev->hw, &desc, 1);
352 static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id,
355 struct hclge_priority_weight_cmd *weight;
356 struct hclge_desc desc;
358 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false);
360 weight = (struct hclge_priority_weight_cmd *)desc.data;
362 weight->pri_id = pri_id;
365 return hclge_cmd_send(&hdev->hw, &desc, 1);
368 static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
371 struct hclge_qs_weight_cmd *weight;
372 struct hclge_desc desc;
374 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false);
376 weight = (struct hclge_qs_weight_cmd *)desc.data;
378 weight->qs_id = cpu_to_le16(qs_id);
381 return hclge_cmd_send(&hdev->hw, &desc, 1);
384 static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s,
387 u32 shapping_para = 0;
389 hclge_tm_set_field(shapping_para, IR_B, ir_b);
390 hclge_tm_set_field(shapping_para, IR_U, ir_u);
391 hclge_tm_set_field(shapping_para, IR_S, ir_s);
392 hclge_tm_set_field(shapping_para, BS_B, bs_b);
393 hclge_tm_set_field(shapping_para, BS_S, bs_s);
395 return shapping_para;
398 static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
399 enum hclge_shap_bucket bucket, u8 pg_id,
400 u32 shapping_para, u32 rate)
402 struct hclge_pg_shapping_cmd *shap_cfg_cmd;
403 enum hclge_opcode_type opcode;
404 struct hclge_desc desc;
406 opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
407 HCLGE_OPC_TM_PG_C_SHAPPING;
408 hclge_cmd_setup_basic_desc(&desc, opcode, false);
410 shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
412 shap_cfg_cmd->pg_id = pg_id;
414 shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para);
416 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
418 shap_cfg_cmd->pg_rate = cpu_to_le32(rate);
420 return hclge_cmd_send(&hdev->hw, &desc, 1);
423 static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
425 struct hclge_port_shapping_cmd *shap_cfg_cmd;
426 struct hclge_shaper_ir_para ir_para;
427 struct hclge_desc desc;
431 ret = hclge_shaper_para_calc(hdev->hw.mac.speed, HCLGE_SHAPER_LVL_PORT,
433 hdev->ae_dev->dev_specs.max_tm_rate);
437 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
438 shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
440 shapping_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
442 HCLGE_SHAPER_BS_U_DEF,
443 HCLGE_SHAPER_BS_S_DEF);
445 shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para);
447 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
449 shap_cfg_cmd->port_rate = cpu_to_le32(hdev->hw.mac.speed);
451 return hclge_cmd_send(&hdev->hw, &desc, 1);
454 static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
455 enum hclge_shap_bucket bucket, u8 pri_id,
456 u32 shapping_para, u32 rate)
458 struct hclge_pri_shapping_cmd *shap_cfg_cmd;
459 enum hclge_opcode_type opcode;
460 struct hclge_desc desc;
462 opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
463 HCLGE_OPC_TM_PRI_C_SHAPPING;
465 hclge_cmd_setup_basic_desc(&desc, opcode, false);
467 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
469 shap_cfg_cmd->pri_id = pri_id;
471 shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para);
473 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
475 shap_cfg_cmd->pri_rate = cpu_to_le32(rate);
477 return hclge_cmd_send(&hdev->hw, &desc, 1);
480 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id)
482 struct hclge_desc desc;
484 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false);
486 if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR)
487 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
491 desc.data[0] = cpu_to_le32(pg_id);
493 return hclge_cmd_send(&hdev->hw, &desc, 1);
496 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id)
498 struct hclge_desc desc;
500 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false);
502 if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
503 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
507 desc.data[0] = cpu_to_le32(pri_id);
509 return hclge_cmd_send(&hdev->hw, &desc, 1);
512 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode)
514 struct hclge_desc desc;
516 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false);
518 if (mode == HCLGE_SCH_MODE_DWRR)
519 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
523 desc.data[0] = cpu_to_le32(qs_id);
525 return hclge_cmd_send(&hdev->hw, &desc, 1);
528 static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id,
531 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
532 struct hclge_desc desc;
534 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
537 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
539 bp_to_qs_map_cmd->tc_id = tc;
540 bp_to_qs_map_cmd->qs_group_id = grp_id;
541 bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map);
543 return hclge_cmd_send(&hdev->hw, &desc, 1);
546 int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
548 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
549 struct hclge_qs_shapping_cmd *shap_cfg_cmd;
550 struct hclge_shaper_ir_para ir_para;
551 struct hclge_dev *hdev = vport->back;
552 struct hclge_desc desc;
557 max_tx_rate = hdev->ae_dev->dev_specs.max_tm_rate;
559 ret = hclge_shaper_para_calc(max_tx_rate, HCLGE_SHAPER_LVL_QSET,
561 hdev->ae_dev->dev_specs.max_tm_rate);
565 shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
567 HCLGE_SHAPER_BS_U_DEF,
568 HCLGE_SHAPER_BS_S_DEF);
570 for (i = 0; i < kinfo->tc_info.num_tc; i++) {
571 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG,
574 shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
575 shap_cfg_cmd->qs_id = cpu_to_le16(vport->qs_offset + i);
576 shap_cfg_cmd->qs_shapping_para = cpu_to_le32(shaper_para);
578 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
579 shap_cfg_cmd->qs_rate = cpu_to_le32(max_tx_rate);
581 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
583 dev_err(&hdev->pdev->dev,
584 "vport%u, qs%u failed to set tx_rate:%d, ret=%d\n",
585 vport->vport_id, shap_cfg_cmd->qs_id,
594 static u16 hclge_vport_get_max_rss_size(struct hclge_vport *vport)
596 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
597 struct hnae3_tc_info *tc_info = &kinfo->tc_info;
598 struct hclge_dev *hdev = vport->back;
599 u16 max_rss_size = 0;
602 if (!tc_info->mqprio_active)
603 return vport->alloc_tqps / tc_info->num_tc;
605 for (i = 0; i < HNAE3_MAX_TC; i++) {
606 if (!(hdev->hw_tc_map & BIT(i)) || i >= tc_info->num_tc)
608 if (max_rss_size < tc_info->tqp_count[i])
609 max_rss_size = tc_info->tqp_count[i];
615 static u16 hclge_vport_get_tqp_num(struct hclge_vport *vport)
617 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
618 struct hnae3_tc_info *tc_info = &kinfo->tc_info;
619 struct hclge_dev *hdev = vport->back;
623 if (!tc_info->mqprio_active)
624 return kinfo->rss_size * tc_info->num_tc;
626 for (i = 0; i < HNAE3_MAX_TC; i++) {
627 if (hdev->hw_tc_map & BIT(i) && i < tc_info->num_tc)
628 sum += tc_info->tqp_count[i];
634 static void hclge_tm_update_kinfo_rss_size(struct hclge_vport *vport)
636 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
637 struct hclge_dev *hdev = vport->back;
638 u16 vport_max_rss_size;
641 /* TC configuration is shared by PF/VF in one port, only allow
642 * one tc for VF for simplicity. VF's vport_id is non zero.
644 if (vport->vport_id) {
645 kinfo->tc_info.num_tc = 1;
646 vport->qs_offset = HNAE3_MAX_TC +
647 vport->vport_id - HCLGE_VF_VPORT_START_NUM;
648 vport_max_rss_size = hdev->vf_rss_size_max;
650 kinfo->tc_info.num_tc =
651 min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
652 vport->qs_offset = 0;
653 vport_max_rss_size = hdev->pf_rss_size_max;
656 max_rss_size = min_t(u16, vport_max_rss_size,
657 hclge_vport_get_max_rss_size(vport));
659 /* Set to user value, no larger than max_rss_size. */
660 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
661 kinfo->req_rss_size <= max_rss_size) {
662 dev_info(&hdev->pdev->dev, "rss changes from %u to %u\n",
663 kinfo->rss_size, kinfo->req_rss_size);
664 kinfo->rss_size = kinfo->req_rss_size;
665 } else if (kinfo->rss_size > max_rss_size ||
666 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
667 /* Set to the maximum specification value (max_rss_size). */
668 kinfo->rss_size = max_rss_size;
672 static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
674 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
675 struct hclge_dev *hdev = vport->back;
678 hclge_tm_update_kinfo_rss_size(vport);
679 kinfo->num_tqps = hclge_vport_get_tqp_num(vport);
680 vport->dwrr = 100; /* 100 percent as init */
681 vport->alloc_rss_size = kinfo->rss_size;
682 vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
684 /* when enable mqprio, the tc_info has been updated. */
685 if (kinfo->tc_info.mqprio_active)
688 for (i = 0; i < HNAE3_MAX_TC; i++) {
689 if (hdev->hw_tc_map & BIT(i) && i < kinfo->tc_info.num_tc) {
690 kinfo->tc_info.tqp_offset[i] = i * kinfo->rss_size;
691 kinfo->tc_info.tqp_count[i] = kinfo->rss_size;
693 /* Set to default queue if TC is disable */
694 kinfo->tc_info.tqp_offset[i] = 0;
695 kinfo->tc_info.tqp_count[i] = 1;
699 memcpy(kinfo->tc_info.prio_tc, hdev->tm_info.prio_tc,
700 sizeof_field(struct hnae3_tc_info, prio_tc));
703 static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
705 struct hclge_vport *vport = hdev->vport;
708 for (i = 0; i < hdev->num_alloc_vport; i++) {
709 hclge_tm_vport_tc_info_update(vport);
715 static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
719 for (i = 0; i < hdev->tm_info.num_tc; i++) {
720 hdev->tm_info.tc_info[i].tc_id = i;
721 hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
722 hdev->tm_info.tc_info[i].pgid = 0;
723 hdev->tm_info.tc_info[i].bw_limit =
724 hdev->tm_info.pg_info[0].bw_limit;
727 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
728 hdev->tm_info.prio_tc[i] =
729 (i >= hdev->tm_info.num_tc) ? 0 : i;
732 static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
734 #define BW_PERCENT 100
738 for (i = 0; i < hdev->tm_info.num_pg; i++) {
741 hdev->tm_info.pg_dwrr[i] = i ? 0 : BW_PERCENT;
743 hdev->tm_info.pg_info[i].pg_id = i;
744 hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
746 hdev->tm_info.pg_info[i].bw_limit =
747 hdev->ae_dev->dev_specs.max_tm_rate;
752 hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
753 for (k = 0; k < hdev->tm_info.num_tc; k++)
754 hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
758 static void hclge_update_fc_mode_by_dcb_flag(struct hclge_dev *hdev)
760 if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) {
761 if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
762 dev_warn(&hdev->pdev->dev,
763 "Only 1 tc used, but last mode is FC_PFC\n");
765 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
766 } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
767 /* fc_mode_last_time record the last fc_mode when
768 * DCB is enabled, so that fc_mode can be set to
769 * the correct value when DCB is disabled.
771 hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
772 hdev->tm_info.fc_mode = HCLGE_FC_PFC;
776 static void hclge_update_fc_mode(struct hclge_dev *hdev)
778 if (!hdev->tm_info.pfc_en) {
779 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
783 if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
784 hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
785 hdev->tm_info.fc_mode = HCLGE_FC_PFC;
789 void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
791 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
792 hclge_update_fc_mode(hdev);
794 hclge_update_fc_mode_by_dcb_flag(hdev);
797 static void hclge_tm_schd_info_init(struct hclge_dev *hdev)
799 hclge_tm_pg_info_init(hdev);
801 hclge_tm_tc_info_init(hdev);
803 hclge_tm_vport_info_update(hdev);
805 hclge_tm_pfc_info_update(hdev);
808 static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
813 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
816 for (i = 0; i < hdev->tm_info.num_pg; i++) {
818 ret = hclge_tm_pg_to_pri_map_cfg(
819 hdev, i, hdev->tm_info.pg_info[i].tc_bit_map);
827 static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
829 u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
830 struct hclge_shaper_ir_para ir_para;
836 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
840 for (i = 0; i < hdev->tm_info.num_pg; i++) {
841 u32 rate = hdev->tm_info.pg_info[i].bw_limit;
843 /* Calc shaper para */
844 ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PG,
845 &ir_para, max_tm_rate);
849 shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
850 HCLGE_SHAPER_BS_U_DEF,
851 HCLGE_SHAPER_BS_S_DEF);
852 ret = hclge_tm_pg_shapping_cfg(hdev,
853 HCLGE_TM_SHAP_C_BUCKET, i,
858 shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b,
861 HCLGE_SHAPER_BS_U_DEF,
862 HCLGE_SHAPER_BS_S_DEF);
863 ret = hclge_tm_pg_shapping_cfg(hdev,
864 HCLGE_TM_SHAP_P_BUCKET, i,
873 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
879 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
883 for (i = 0; i < hdev->tm_info.num_pg; i++) {
885 ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]);
893 static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
894 struct hclge_vport *vport)
896 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
897 struct hnae3_tc_info *tc_info = &kinfo->tc_info;
898 struct hnae3_queue **tqp = kinfo->tqp;
902 for (i = 0; i < tc_info->num_tc; i++) {
903 for (j = 0; j < tc_info->tqp_count[i]; j++) {
904 struct hnae3_queue *q = tqp[tc_info->tqp_offset[i] + j];
906 ret = hclge_tm_q_to_qs_map_cfg(hdev,
907 hclge_get_queue_id(q),
908 vport->qs_offset + i);
917 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
919 struct hclge_vport *vport = hdev->vport;
923 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
924 /* Cfg qs -> pri mapping, one by one mapping */
925 for (k = 0; k < hdev->num_alloc_vport; k++) {
926 struct hnae3_knic_private_info *kinfo =
929 for (i = 0; i < kinfo->tc_info.num_tc; i++) {
930 ret = hclge_tm_qs_to_pri_map_cfg(
931 hdev, vport[k].qs_offset + i, i);
936 } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) {
937 /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */
938 for (k = 0; k < hdev->num_alloc_vport; k++)
939 for (i = 0; i < HNAE3_MAX_TC; i++) {
940 ret = hclge_tm_qs_to_pri_map_cfg(
941 hdev, vport[k].qs_offset + i, k);
949 /* Cfg q -> qs mapping */
950 for (i = 0; i < hdev->num_alloc_vport; i++) {
951 ret = hclge_vport_q_to_qs_map(hdev, vport);
961 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
963 u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
964 struct hclge_shaper_ir_para ir_para;
969 for (i = 0; i < hdev->tm_info.num_tc; i++) {
970 u32 rate = hdev->tm_info.tc_info[i].bw_limit;
972 ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI,
973 &ir_para, max_tm_rate);
977 shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
978 HCLGE_SHAPER_BS_U_DEF,
979 HCLGE_SHAPER_BS_S_DEF);
980 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i,
985 shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b,
988 HCLGE_SHAPER_BS_U_DEF,
989 HCLGE_SHAPER_BS_S_DEF);
990 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
999 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
1001 struct hclge_dev *hdev = vport->back;
1002 struct hclge_shaper_ir_para ir_para;
1006 ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
1008 hdev->ae_dev->dev_specs.max_tm_rate);
1012 shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
1013 HCLGE_SHAPER_BS_U_DEF,
1014 HCLGE_SHAPER_BS_S_DEF);
1015 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
1016 vport->vport_id, shaper_para,
1021 shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
1023 HCLGE_SHAPER_BS_U_DEF,
1024 HCLGE_SHAPER_BS_S_DEF);
1025 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
1026 vport->vport_id, shaper_para,
1034 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
1036 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1037 struct hclge_dev *hdev = vport->back;
1038 u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
1039 struct hclge_shaper_ir_para ir_para;
1043 for (i = 0; i < kinfo->tc_info.num_tc; i++) {
1044 ret = hclge_shaper_para_calc(hdev->tm_info.tc_info[i].bw_limit,
1045 HCLGE_SHAPER_LVL_QSET,
1046 &ir_para, max_tm_rate);
1054 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev)
1056 struct hclge_vport *vport = hdev->vport;
1060 /* Need config vport shaper */
1061 for (i = 0; i < hdev->num_alloc_vport; i++) {
1062 ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport);
1066 ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport);
1076 static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev)
1080 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1081 ret = hclge_tm_pri_tc_base_shaper_cfg(hdev);
1085 ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev);
1093 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
1095 struct hclge_vport *vport = hdev->vport;
1096 struct hclge_pg_info *pg_info;
1101 for (i = 0; i < hdev->tm_info.num_tc; i++) {
1103 &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
1104 dwrr = pg_info->tc_dwrr[i];
1106 ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr);
1110 for (k = 0; k < hdev->num_alloc_vport; k++) {
1111 ret = hclge_tm_qs_weight_cfg(
1112 hdev, vport[k].qs_offset + i,
1122 static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev)
1124 #define DEFAULT_TC_WEIGHT 1
1125 #define DEFAULT_TC_OFFSET 14
1127 struct hclge_ets_tc_weight_cmd *ets_weight;
1128 struct hclge_desc desc;
1131 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false);
1132 ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
1134 for (i = 0; i < HNAE3_MAX_TC; i++) {
1135 struct hclge_pg_info *pg_info;
1137 ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT;
1139 if (!(hdev->hw_tc_map & BIT(i)))
1143 &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
1144 ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
1147 ets_weight->weight_offset = DEFAULT_TC_OFFSET;
1149 return hclge_cmd_send(&hdev->hw, &desc, 1);
1152 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
1154 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1155 struct hclge_dev *hdev = vport->back;
1160 ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr);
1165 for (i = 0; i < kinfo->tc_info.num_tc; i++) {
1166 ret = hclge_tm_qs_weight_cfg(
1167 hdev, vport->qs_offset + i,
1168 hdev->tm_info.pg_info[0].tc_dwrr[i]);
1176 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev)
1178 struct hclge_vport *vport = hdev->vport;
1182 for (i = 0; i < hdev->num_alloc_vport; i++) {
1183 ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport);
1193 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
1197 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1198 ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev);
1202 if (!hnae3_dev_dcb_supported(hdev))
1205 ret = hclge_tm_ets_tc_dwrr_cfg(hdev);
1206 if (ret == -EOPNOTSUPP) {
1207 dev_warn(&hdev->pdev->dev,
1208 "fw %08x does't support ets tc weight cmd\n",
1215 ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev);
1223 static int hclge_tm_map_cfg(struct hclge_dev *hdev)
1227 ret = hclge_up_to_tc_map(hdev);
1231 ret = hclge_tm_pg_to_pri_map(hdev);
1235 return hclge_tm_pri_q_qs_cfg(hdev);
1238 static int hclge_tm_shaper_cfg(struct hclge_dev *hdev)
1242 ret = hclge_tm_port_shaper_cfg(hdev);
1246 ret = hclge_tm_pg_shaper_cfg(hdev);
1250 return hclge_tm_pri_shaper_cfg(hdev);
1253 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev)
1257 ret = hclge_tm_pg_dwrr_cfg(hdev);
1261 return hclge_tm_pri_dwrr_cfg(hdev);
1264 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
1269 /* Only being config on TC-Based scheduler mode */
1270 if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
1273 for (i = 0; i < hdev->tm_info.num_pg; i++) {
1274 ret = hclge_tm_pg_schd_mode_cfg(hdev, i);
1282 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
1284 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1285 struct hclge_dev *hdev = vport->back;
1289 if (vport->vport_id >= HNAE3_MAX_TC)
1292 ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
1296 for (i = 0; i < kinfo->tc_info.num_tc; i++) {
1297 u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode;
1299 ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i,
1308 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
1310 struct hclge_vport *vport = hdev->vport;
1314 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1315 for (i = 0; i < hdev->tm_info.num_tc; i++) {
1316 ret = hclge_tm_pri_schd_mode_cfg(hdev, i);
1320 for (k = 0; k < hdev->num_alloc_vport; k++) {
1321 ret = hclge_tm_qs_schd_mode_cfg(
1322 hdev, vport[k].qs_offset + i,
1323 HCLGE_SCH_MODE_DWRR);
1329 for (i = 0; i < hdev->num_alloc_vport; i++) {
1330 ret = hclge_tm_schd_mode_vnet_base_cfg(vport);
1341 static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
1345 ret = hclge_tm_lvl2_schd_mode_cfg(hdev);
1349 return hclge_tm_lvl34_schd_mode_cfg(hdev);
1352 int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
1356 /* Cfg tm mapping */
1357 ret = hclge_tm_map_cfg(hdev);
1362 ret = hclge_tm_shaper_cfg(hdev);
1367 ret = hclge_tm_dwrr_cfg(hdev);
1371 /* Cfg schd mode for each level schd */
1372 return hclge_tm_schd_mode_hw(hdev);
1375 static int hclge_pause_param_setup_hw(struct hclge_dev *hdev)
1377 struct hclge_mac *mac = &hdev->hw.mac;
1379 return hclge_pause_param_cfg(hdev, mac->mac_addr,
1380 HCLGE_DEFAULT_PAUSE_TRANS_GAP,
1381 HCLGE_DEFAULT_PAUSE_TRANS_TIME);
1384 static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
1386 u8 enable_bitmap = 0;
1388 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
1389 enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK |
1390 HCLGE_RX_MAC_PAUSE_EN_MSK;
1392 return hclge_pfc_pause_en_cfg(hdev, enable_bitmap,
1393 hdev->tm_info.pfc_en);
1396 /* for the queues that use for backpress, divides to several groups,
1397 * each group contains 32 queue sets, which can be represented by u32 bitmap.
1399 static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
1401 u16 grp_id_shift = HCLGE_BP_GRP_ID_S;
1402 u16 grp_id_mask = HCLGE_BP_GRP_ID_M;
1403 u8 grp_num = HCLGE_BP_GRP_NUM;
1406 if (hdev->num_tqps > HCLGE_TQP_MAX_SIZE_DEV_V2) {
1407 grp_num = HCLGE_BP_EXT_GRP_NUM;
1408 grp_id_mask = HCLGE_BP_EXT_GRP_ID_M;
1409 grp_id_shift = HCLGE_BP_EXT_GRP_ID_S;
1412 for (i = 0; i < grp_num; i++) {
1416 for (k = 0; k < hdev->num_alloc_vport; k++) {
1417 struct hclge_vport *vport = &hdev->vport[k];
1418 u16 qs_id = vport->qs_offset + tc;
1421 grp = hnae3_get_field(qs_id, grp_id_mask, grp_id_shift);
1422 sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
1423 HCLGE_BP_SUB_GRP_ID_S);
1425 qs_bitmap |= (1 << sub_grp);
1428 ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);
1436 static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
1440 switch (hdev->tm_info.fc_mode) {
1445 case HCLGE_FC_RX_PAUSE:
1449 case HCLGE_FC_TX_PAUSE:
1466 return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
1469 static int hclge_tm_bp_setup(struct hclge_dev *hdev)
1474 for (i = 0; i < hdev->tm_info.num_tc; i++) {
1475 ret = hclge_bp_setup_hw(hdev, i);
1483 int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
1487 ret = hclge_pause_param_setup_hw(hdev);
1491 ret = hclge_mac_pause_setup_hw(hdev);
1495 /* Only DCB-supported dev supports qset back pressure and pfc cmd */
1496 if (!hnae3_dev_dcb_supported(hdev))
1499 /* GE MAC does not support PFC, when driver is initializing and MAC
1500 * is in GE Mode, ignore the error here, otherwise initialization
1503 ret = hclge_pfc_setup_hw(hdev);
1504 if (init && ret == -EOPNOTSUPP)
1505 dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n");
1507 dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n",
1512 return hclge_tm_bp_setup(hdev);
1515 void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
1517 struct hclge_vport *vport = hdev->vport;
1518 struct hnae3_knic_private_info *kinfo;
1521 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
1522 hdev->tm_info.prio_tc[i] = prio_tc[i];
1524 for (k = 0; k < hdev->num_alloc_vport; k++) {
1525 kinfo = &vport[k].nic.kinfo;
1526 kinfo->tc_info.prio_tc[i] = prio_tc[i];
1531 void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
1536 hdev->tm_info.num_tc = num_tc;
1538 for (i = 0; i < hdev->tm_info.num_tc; i++)
1543 hdev->tm_info.num_tc = 1;
1546 hdev->hw_tc_map = bit_map;
1548 hclge_tm_schd_info_init(hdev);
1551 int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
1555 if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
1556 (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE))
1559 ret = hclge_tm_schd_setup_hw(hdev);
1563 ret = hclge_pause_setup_hw(hdev, init);
1570 int hclge_tm_schd_init(struct hclge_dev *hdev)
1572 /* fc_mode is HCLGE_FC_FULL on reset */
1573 hdev->tm_info.fc_mode = HCLGE_FC_FULL;
1574 hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
1576 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE &&
1577 hdev->tm_info.num_pg != 1)
1580 hclge_tm_schd_info_init(hdev);
1582 return hclge_tm_init_hw(hdev, true);
1585 int hclge_tm_vport_map_update(struct hclge_dev *hdev)
1587 struct hclge_vport *vport = hdev->vport;
1590 hclge_tm_vport_tc_info_update(vport);
1592 ret = hclge_vport_q_to_qs_map(hdev, vport);
1596 if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en)
1599 return hclge_tm_bp_setup(hdev);
1602 int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num)
1604 struct hclge_tm_nodes_cmd *nodes;
1605 struct hclge_desc desc;
1608 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) {
1609 /* Each PF has 8 qsets and each VF has 1 qset */
1610 *qset_num = HCLGE_TM_PF_MAX_QSET_NUM + pci_num_vf(hdev->pdev);
1614 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
1615 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1617 dev_err(&hdev->pdev->dev,
1618 "failed to get qset num, ret = %d\n", ret);
1622 nodes = (struct hclge_tm_nodes_cmd *)desc.data;
1623 *qset_num = le16_to_cpu(nodes->qset_num);
1627 int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num)
1629 struct hclge_tm_nodes_cmd *nodes;
1630 struct hclge_desc desc;
1633 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) {
1634 *pri_num = HCLGE_TM_PF_MAX_PRI_NUM;
1638 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
1639 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1641 dev_err(&hdev->pdev->dev,
1642 "failed to get pri num, ret = %d\n", ret);
1646 nodes = (struct hclge_tm_nodes_cmd *)desc.data;
1647 *pri_num = nodes->pri_num;
1651 int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority,
1654 struct hclge_qs_to_pri_link_cmd *map;
1655 struct hclge_desc desc;
1658 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, true);
1659 map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
1660 map->qs_id = cpu_to_le16(qset_id);
1661 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1663 dev_err(&hdev->pdev->dev,
1664 "failed to get qset map priority, ret = %d\n", ret);
1668 *priority = map->priority;
1669 *link_vld = map->link_vld;
1673 int hclge_tm_get_qset_sch_mode(struct hclge_dev *hdev, u16 qset_id, u8 *mode)
1675 struct hclge_qs_sch_mode_cfg_cmd *qs_sch_mode;
1676 struct hclge_desc desc;
1679 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, true);
1680 qs_sch_mode = (struct hclge_qs_sch_mode_cfg_cmd *)desc.data;
1681 qs_sch_mode->qs_id = cpu_to_le16(qset_id);
1682 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1684 dev_err(&hdev->pdev->dev,
1685 "failed to get qset sch mode, ret = %d\n", ret);
1689 *mode = qs_sch_mode->sch_mode;
1693 int hclge_tm_get_qset_weight(struct hclge_dev *hdev, u16 qset_id, u8 *weight)
1695 struct hclge_qs_weight_cmd *qs_weight;
1696 struct hclge_desc desc;
1699 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, true);
1700 qs_weight = (struct hclge_qs_weight_cmd *)desc.data;
1701 qs_weight->qs_id = cpu_to_le16(qset_id);
1702 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1704 dev_err(&hdev->pdev->dev,
1705 "failed to get qset weight, ret = %d\n", ret);
1709 *weight = qs_weight->dwrr;
1713 int hclge_tm_get_qset_shaper(struct hclge_dev *hdev, u16 qset_id,
1714 struct hclge_tm_shaper_para *para)
1716 struct hclge_qs_shapping_cmd *shap_cfg_cmd;
1717 struct hclge_desc desc;
1721 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true);
1722 shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
1723 shap_cfg_cmd->qs_id = cpu_to_le16(qset_id);
1724 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1726 dev_err(&hdev->pdev->dev,
1727 "failed to get qset %u shaper, ret = %d\n", qset_id,
1732 shapping_para = le32_to_cpu(shap_cfg_cmd->qs_shapping_para);
1733 para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
1734 para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
1735 para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
1736 para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
1737 para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
1738 para->flag = shap_cfg_cmd->flag;
1739 para->rate = le32_to_cpu(shap_cfg_cmd->qs_rate);
1743 int hclge_tm_get_pri_sch_mode(struct hclge_dev *hdev, u8 pri_id, u8 *mode)
1745 struct hclge_pri_sch_mode_cfg_cmd *pri_sch_mode;
1746 struct hclge_desc desc;
1749 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, true);
1750 pri_sch_mode = (struct hclge_pri_sch_mode_cfg_cmd *)desc.data;
1751 pri_sch_mode->pri_id = pri_id;
1752 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1754 dev_err(&hdev->pdev->dev,
1755 "failed to get priority sch mode, ret = %d\n", ret);
1759 *mode = pri_sch_mode->sch_mode;
1763 int hclge_tm_get_pri_weight(struct hclge_dev *hdev, u8 pri_id, u8 *weight)
1765 struct hclge_priority_weight_cmd *priority_weight;
1766 struct hclge_desc desc;
1769 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, true);
1770 priority_weight = (struct hclge_priority_weight_cmd *)desc.data;
1771 priority_weight->pri_id = pri_id;
1772 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1774 dev_err(&hdev->pdev->dev,
1775 "failed to get priority weight, ret = %d\n", ret);
1779 *weight = priority_weight->dwrr;
1783 int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id,
1784 enum hclge_opcode_type cmd,
1785 struct hclge_tm_shaper_para *para)
1787 struct hclge_pri_shapping_cmd *shap_cfg_cmd;
1788 struct hclge_desc desc;
1792 if (cmd != HCLGE_OPC_TM_PRI_C_SHAPPING &&
1793 cmd != HCLGE_OPC_TM_PRI_P_SHAPPING)
1796 hclge_cmd_setup_basic_desc(&desc, cmd, true);
1797 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
1798 shap_cfg_cmd->pri_id = pri_id;
1799 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1801 dev_err(&hdev->pdev->dev,
1802 "failed to get priority shaper(%#x), ret = %d\n",
1807 shapping_para = le32_to_cpu(shap_cfg_cmd->pri_shapping_para);
1808 para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
1809 para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
1810 para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
1811 para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
1812 para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
1813 para->flag = shap_cfg_cmd->flag;
1814 para->rate = le32_to_cpu(shap_cfg_cmd->pri_rate);
1818 int hclge_tm_get_q_to_qs_map(struct hclge_dev *hdev, u16 q_id, u16 *qset_id)
1820 struct hclge_nq_to_qs_link_cmd *map;
1821 struct hclge_desc desc;
1826 map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
1827 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, true);
1828 map->nq_id = cpu_to_le16(q_id);
1829 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1831 dev_err(&hdev->pdev->dev,
1832 "failed to get queue to qset map, ret = %d\n", ret);
1835 *qset_id = le16_to_cpu(map->qset_id);
1837 /* convert qset_id to the following format, drop the vld bit
1838 * | qs_id_h | vld | qs_id_l |
1839 * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 |
1842 * qset_id: | 15 | 14 ~ 10 | 9 ~ 0 |
1844 qs_id_l = hnae3_get_field(*qset_id, HCLGE_TM_QS_ID_L_MSK,
1845 HCLGE_TM_QS_ID_L_S);
1846 qs_id_h = hnae3_get_field(*qset_id, HCLGE_TM_QS_ID_H_EXT_MSK,
1847 HCLGE_TM_QS_ID_H_EXT_S);
1849 hnae3_set_field(*qset_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S,
1851 hnae3_set_field(*qset_id, HCLGE_TM_QS_ID_H_MSK, HCLGE_TM_QS_ID_H_S,
1856 int hclge_tm_get_q_to_tc(struct hclge_dev *hdev, u16 q_id, u8 *tc_id)
1858 #define HCLGE_TM_TC_MASK 0x7
1860 struct hclge_tqp_tx_queue_tc_cmd *tc;
1861 struct hclge_desc desc;
1864 tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data;
1865 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TQP_TX_QUEUE_TC, true);
1866 tc->queue_id = cpu_to_le16(q_id);
1867 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1869 dev_err(&hdev->pdev->dev,
1870 "failed to get queue to tc map, ret = %d\n", ret);
1874 *tc_id = tc->tc_id & HCLGE_TM_TC_MASK;
1878 int hclge_tm_get_pg_to_pri_map(struct hclge_dev *hdev, u8 pg_id,
1881 struct hclge_pg_to_pri_link_cmd *map;
1882 struct hclge_desc desc;
1885 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, true);
1886 map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
1888 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1890 dev_err(&hdev->pdev->dev,
1891 "failed to get pg to pri map, ret = %d\n", ret);
1895 *pri_bit_map = map->pri_bit_map;
1899 int hclge_tm_get_pg_weight(struct hclge_dev *hdev, u8 pg_id, u8 *weight)
1901 struct hclge_pg_weight_cmd *pg_weight_cmd;
1902 struct hclge_desc desc;
1905 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, true);
1906 pg_weight_cmd = (struct hclge_pg_weight_cmd *)desc.data;
1907 pg_weight_cmd->pg_id = pg_id;
1908 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1910 dev_err(&hdev->pdev->dev,
1911 "failed to get pg weight, ret = %d\n", ret);
1915 *weight = pg_weight_cmd->dwrr;
1919 int hclge_tm_get_pg_sch_mode(struct hclge_dev *hdev, u8 pg_id, u8 *mode)
1921 struct hclge_desc desc;
1924 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, true);
1925 desc.data[0] = cpu_to_le32(pg_id);
1926 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1928 dev_err(&hdev->pdev->dev,
1929 "failed to get pg sch mode, ret = %d\n", ret);
1933 *mode = (u8)le32_to_cpu(desc.data[1]);
1937 int hclge_tm_get_pg_shaper(struct hclge_dev *hdev, u8 pg_id,
1938 enum hclge_opcode_type cmd,
1939 struct hclge_tm_shaper_para *para)
1941 struct hclge_pg_shapping_cmd *shap_cfg_cmd;
1942 struct hclge_desc desc;
1946 if (cmd != HCLGE_OPC_TM_PG_C_SHAPPING &&
1947 cmd != HCLGE_OPC_TM_PG_P_SHAPPING)
1950 hclge_cmd_setup_basic_desc(&desc, cmd, true);
1951 shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
1952 shap_cfg_cmd->pg_id = pg_id;
1953 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1955 dev_err(&hdev->pdev->dev,
1956 "failed to get pg shaper(%#x), ret = %d\n",
1961 shapping_para = le32_to_cpu(shap_cfg_cmd->pg_shapping_para);
1962 para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
1963 para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
1964 para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
1965 para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
1966 para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
1967 para->flag = shap_cfg_cmd->flag;
1968 para->rate = le32_to_cpu(shap_cfg_cmd->pg_rate);
1972 int hclge_tm_get_port_shaper(struct hclge_dev *hdev,
1973 struct hclge_tm_shaper_para *para)
1975 struct hclge_port_shapping_cmd *port_shap_cfg_cmd;
1976 struct hclge_desc desc;
1980 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, true);
1981 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1983 dev_err(&hdev->pdev->dev,
1984 "failed to get port shaper, ret = %d\n", ret);
1988 port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
1989 shapping_para = le32_to_cpu(port_shap_cfg_cmd->port_shapping_para);
1990 para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
1991 para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
1992 para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
1993 para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
1994 para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
1995 para->flag = port_shap_cfg_cmd->flag;
1996 para->rate = le32_to_cpu(port_shap_cfg_cmd->port_rate);