1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/etherdevice.h>
7 #include "hclge_main.h"
10 enum hclge_shaper_level {
11 HCLGE_SHAPER_LVL_PRI = 0,
12 HCLGE_SHAPER_LVL_PG = 1,
13 HCLGE_SHAPER_LVL_PORT = 2,
14 HCLGE_SHAPER_LVL_QSET = 3,
15 HCLGE_SHAPER_LVL_CNT = 4,
16 HCLGE_SHAPER_LVL_VF = 0,
17 HCLGE_SHAPER_LVL_PF = 1,
20 #define HCLGE_TM_PFC_PKT_GET_CMD_NUM 3
21 #define HCLGE_TM_PFC_NUM_GET_PER_CMD 3
23 #define HCLGE_SHAPER_BS_U_DEF 5
24 #define HCLGE_SHAPER_BS_S_DEF 20
26 /* hclge_shaper_para_calc: calculate ir parameter for the shaper
27 * @ir: Rate to be config, its unit is Mbps
28 * @shaper_level: the shaper level. eg: port, pg, priority, queueset
29 * @ir_para: parameters of IR shaper
30 * @max_tm_rate: max tm rate is available to config
34 * IR_b * (2 ^ IR_u) * 8
35 * IR(Mbps) = ------------------------- * CLOCK(1000Mbps)
38 * @return: 0: calculate sucessful, negative: fail
40 static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
41 struct hclge_shaper_ir_para *ir_para,
44 #define DEFAULT_SHAPER_IR_B 126
45 #define DIVISOR_CLK (1000 * 8)
46 #define DEFAULT_DIVISOR_IR_B (DEFAULT_SHAPER_IR_B * DIVISOR_CLK)
48 static const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
49 6 * 256, /* Prioriy level */
50 6 * 32, /* Prioriy group level */
51 6 * 8, /* Port level */
52 6 * 256 /* Qset level */
60 if (shaper_level >= HCLGE_SHAPER_LVL_CNT ||
64 tick = tick_array[shaper_level];
67 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
68 * the formula is changed to:
70 * ir_calc = ---------------- * 1000
73 ir_calc = (DEFAULT_DIVISOR_IR_B + (tick >> 1) - 1) / tick;
76 ir_para->ir_b = DEFAULT_SHAPER_IR_B;
81 } else if (ir_calc > ir) {
82 /* Increasing the denominator to select ir_s value */
83 while (ir_calc >= ir && ir) {
85 ir_calc = DEFAULT_DIVISOR_IR_B /
86 (tick * (1 << ir_s_calc));
89 ir_para->ir_b = (ir * tick * (1 << ir_s_calc) +
90 (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
92 /* Increasing the numerator to select ir_u value */
95 while (ir_calc < ir) {
97 numerator = DEFAULT_DIVISOR_IR_B * (1 << ir_u_calc);
98 ir_calc = (numerator + (tick >> 1)) / tick;
102 ir_para->ir_b = DEFAULT_SHAPER_IR_B;
104 u32 denominator = DIVISOR_CLK * (1 << --ir_u_calc);
105 ir_para->ir_b = (ir * tick + (denominator >> 1)) /
110 ir_para->ir_u = ir_u_calc;
111 ir_para->ir_s = ir_s_calc;
116 static const u16 hclge_pfc_tx_stats_offset[] = {
117 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num),
118 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num),
119 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num),
120 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num),
121 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num),
122 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num),
123 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num),
124 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)
127 static const u16 hclge_pfc_rx_stats_offset[] = {
128 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num),
129 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num),
130 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num),
131 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num),
132 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num),
133 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num),
134 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num),
135 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)
138 static void hclge_pfc_stats_get(struct hclge_dev *hdev, bool tx, u64 *stats)
144 offset = hclge_pfc_tx_stats_offset;
146 offset = hclge_pfc_rx_stats_offset;
148 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
149 stats[i] = HCLGE_STATS_READ(&hdev->mac_stats, offset[i]);
152 void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats)
154 hclge_pfc_stats_get(hdev, false, stats);
157 void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats)
159 hclge_pfc_stats_get(hdev, true, stats);
162 int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
164 struct hclge_desc desc;
166 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false);
168 desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) |
169 (rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0));
171 return hclge_cmd_send(&hdev->hw, &desc, 1);
174 static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
177 struct hclge_desc desc;
178 struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data;
180 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false);
182 pfc->tx_rx_en_bitmap = tx_rx_bitmap;
183 pfc->pri_en_bitmap = pfc_bitmap;
185 return hclge_cmd_send(&hdev->hw, &desc, 1);
188 static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
189 u8 pause_trans_gap, u16 pause_trans_time)
191 struct hclge_cfg_pause_param_cmd *pause_param;
192 struct hclge_desc desc;
194 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
196 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
198 ether_addr_copy(pause_param->mac_addr, addr);
199 ether_addr_copy(pause_param->mac_addr_extra, addr);
200 pause_param->pause_trans_gap = pause_trans_gap;
201 pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
203 return hclge_cmd_send(&hdev->hw, &desc, 1);
206 int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
208 struct hclge_cfg_pause_param_cmd *pause_param;
209 struct hclge_desc desc;
214 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
216 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
218 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
222 trans_gap = pause_param->pause_trans_gap;
223 trans_time = le16_to_cpu(pause_param->pause_trans_time);
225 return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time);
228 static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
232 tc = hdev->tm_info.prio_tc[pri_id];
234 if (tc >= hdev->tm_info.num_tc)
238 * the register for priority has four bytes, the first bytes includes
239 * priority0 and priority1, the higher 4bit stands for priority1
240 * while the lower 4bit stands for priority0, as below:
241 * first byte: | pri_1 | pri_0 |
242 * second byte: | pri_3 | pri_2 |
243 * third byte: | pri_5 | pri_4 |
244 * fourth byte: | pri_7 | pri_6 |
246 pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4);
251 static int hclge_up_to_tc_map(struct hclge_dev *hdev)
253 struct hclge_desc desc;
254 u8 *pri = (u8 *)desc.data;
258 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
260 for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
261 ret = hclge_fill_pri_array(hdev, pri, pri_id);
266 return hclge_cmd_send(&hdev->hw, &desc, 1);
269 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
270 u8 pg_id, u8 pri_bit_map)
272 struct hclge_pg_to_pri_link_cmd *map;
273 struct hclge_desc desc;
275 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false);
277 map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
280 map->pri_bit_map = pri_bit_map;
282 return hclge_cmd_send(&hdev->hw, &desc, 1);
285 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, u16 qs_id, u8 pri,
288 struct hclge_qs_to_pri_link_cmd *map;
289 struct hclge_desc desc;
291 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false);
293 map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
295 map->qs_id = cpu_to_le16(qs_id);
297 map->link_vld = link_vld ? HCLGE_TM_QS_PRI_LINK_VLD_MSK : 0;
299 return hclge_cmd_send(&hdev->hw, &desc, 1);
302 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
305 struct hclge_nq_to_qs_link_cmd *map;
306 struct hclge_desc desc;
310 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false);
312 map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
314 map->nq_id = cpu_to_le16(q_id);
316 /* convert qs_id to the following format to support qset_id >= 1024
317 * qs_id: | 15 | 14 ~ 10 | 9 ~ 0 |
320 * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 |
321 * | qs_id_h | vld | qs_id_l |
323 qs_id_l = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_L_MSK,
325 qs_id_h = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_H_MSK,
327 hnae3_set_field(qs_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S,
329 hnae3_set_field(qs_id, HCLGE_TM_QS_ID_H_EXT_MSK, HCLGE_TM_QS_ID_H_EXT_S,
331 map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK);
333 return hclge_cmd_send(&hdev->hw, &desc, 1);
336 static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id,
339 struct hclge_pg_weight_cmd *weight;
340 struct hclge_desc desc;
342 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false);
344 weight = (struct hclge_pg_weight_cmd *)desc.data;
346 weight->pg_id = pg_id;
349 return hclge_cmd_send(&hdev->hw, &desc, 1);
352 static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id,
355 struct hclge_priority_weight_cmd *weight;
356 struct hclge_desc desc;
358 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false);
360 weight = (struct hclge_priority_weight_cmd *)desc.data;
362 weight->pri_id = pri_id;
365 return hclge_cmd_send(&hdev->hw, &desc, 1);
368 static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
371 struct hclge_qs_weight_cmd *weight;
372 struct hclge_desc desc;
374 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false);
376 weight = (struct hclge_qs_weight_cmd *)desc.data;
378 weight->qs_id = cpu_to_le16(qs_id);
381 return hclge_cmd_send(&hdev->hw, &desc, 1);
384 static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s,
387 u32 shapping_para = 0;
389 hclge_tm_set_field(shapping_para, IR_B, ir_b);
390 hclge_tm_set_field(shapping_para, IR_U, ir_u);
391 hclge_tm_set_field(shapping_para, IR_S, ir_s);
392 hclge_tm_set_field(shapping_para, BS_B, bs_b);
393 hclge_tm_set_field(shapping_para, BS_S, bs_s);
395 return shapping_para;
398 static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
399 enum hclge_shap_bucket bucket, u8 pg_id,
400 u32 shapping_para, u32 rate)
402 struct hclge_pg_shapping_cmd *shap_cfg_cmd;
403 enum hclge_opcode_type opcode;
404 struct hclge_desc desc;
406 opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
407 HCLGE_OPC_TM_PG_C_SHAPPING;
408 hclge_cmd_setup_basic_desc(&desc, opcode, false);
410 shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
412 shap_cfg_cmd->pg_id = pg_id;
414 shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para);
416 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
418 shap_cfg_cmd->pg_rate = cpu_to_le32(rate);
420 return hclge_cmd_send(&hdev->hw, &desc, 1);
423 int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
425 struct hclge_port_shapping_cmd *shap_cfg_cmd;
426 struct hclge_shaper_ir_para ir_para;
427 struct hclge_desc desc;
431 ret = hclge_shaper_para_calc(hdev->hw.mac.speed, HCLGE_SHAPER_LVL_PORT,
433 hdev->ae_dev->dev_specs.max_tm_rate);
437 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
438 shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
440 shapping_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
442 HCLGE_SHAPER_BS_U_DEF,
443 HCLGE_SHAPER_BS_S_DEF);
445 shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para);
447 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
449 shap_cfg_cmd->port_rate = cpu_to_le32(hdev->hw.mac.speed);
451 return hclge_cmd_send(&hdev->hw, &desc, 1);
454 static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
455 enum hclge_shap_bucket bucket, u8 pri_id,
456 u32 shapping_para, u32 rate)
458 struct hclge_pri_shapping_cmd *shap_cfg_cmd;
459 enum hclge_opcode_type opcode;
460 struct hclge_desc desc;
462 opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
463 HCLGE_OPC_TM_PRI_C_SHAPPING;
465 hclge_cmd_setup_basic_desc(&desc, opcode, false);
467 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
469 shap_cfg_cmd->pri_id = pri_id;
471 shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para);
473 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
475 shap_cfg_cmd->pri_rate = cpu_to_le32(rate);
477 return hclge_cmd_send(&hdev->hw, &desc, 1);
480 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id)
482 struct hclge_desc desc;
484 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false);
486 if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR)
487 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
491 desc.data[0] = cpu_to_le32(pg_id);
493 return hclge_cmd_send(&hdev->hw, &desc, 1);
496 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id)
498 struct hclge_desc desc;
500 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false);
502 if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
503 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
507 desc.data[0] = cpu_to_le32(pri_id);
509 return hclge_cmd_send(&hdev->hw, &desc, 1);
512 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode)
514 struct hclge_desc desc;
516 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false);
518 if (mode == HCLGE_SCH_MODE_DWRR)
519 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
523 desc.data[0] = cpu_to_le32(qs_id);
525 return hclge_cmd_send(&hdev->hw, &desc, 1);
528 static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id,
531 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
532 struct hclge_desc desc;
534 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
537 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
539 bp_to_qs_map_cmd->tc_id = tc;
540 bp_to_qs_map_cmd->qs_group_id = grp_id;
541 bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map);
543 return hclge_cmd_send(&hdev->hw, &desc, 1);
546 int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
548 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
549 struct hclge_qs_shapping_cmd *shap_cfg_cmd;
550 struct hclge_shaper_ir_para ir_para;
551 struct hclge_dev *hdev = vport->back;
552 struct hclge_desc desc;
557 max_tx_rate = hdev->ae_dev->dev_specs.max_tm_rate;
559 ret = hclge_shaper_para_calc(max_tx_rate, HCLGE_SHAPER_LVL_QSET,
561 hdev->ae_dev->dev_specs.max_tm_rate);
565 shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
567 HCLGE_SHAPER_BS_U_DEF,
568 HCLGE_SHAPER_BS_S_DEF);
570 for (i = 0; i < kinfo->tc_info.num_tc; i++) {
571 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG,
574 shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
575 shap_cfg_cmd->qs_id = cpu_to_le16(vport->qs_offset + i);
576 shap_cfg_cmd->qs_shapping_para = cpu_to_le32(shaper_para);
578 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
579 shap_cfg_cmd->qs_rate = cpu_to_le32(max_tx_rate);
581 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
583 dev_err(&hdev->pdev->dev,
584 "vport%u, qs%u failed to set tx_rate:%d, ret=%d\n",
585 vport->vport_id, shap_cfg_cmd->qs_id,
594 static u16 hclge_vport_get_max_rss_size(struct hclge_vport *vport)
596 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
597 struct hnae3_tc_info *tc_info = &kinfo->tc_info;
598 struct hclge_dev *hdev = vport->back;
599 u16 max_rss_size = 0;
602 if (!tc_info->mqprio_active)
603 return vport->alloc_tqps / tc_info->num_tc;
605 for (i = 0; i < HNAE3_MAX_TC; i++) {
606 if (!(hdev->hw_tc_map & BIT(i)) || i >= tc_info->num_tc)
608 if (max_rss_size < tc_info->tqp_count[i])
609 max_rss_size = tc_info->tqp_count[i];
615 static u16 hclge_vport_get_tqp_num(struct hclge_vport *vport)
617 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
618 struct hnae3_tc_info *tc_info = &kinfo->tc_info;
619 struct hclge_dev *hdev = vport->back;
623 if (!tc_info->mqprio_active)
624 return kinfo->rss_size * tc_info->num_tc;
626 for (i = 0; i < HNAE3_MAX_TC; i++) {
627 if (hdev->hw_tc_map & BIT(i) && i < tc_info->num_tc)
628 sum += tc_info->tqp_count[i];
634 static void hclge_tm_update_kinfo_rss_size(struct hclge_vport *vport)
636 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
637 struct hclge_dev *hdev = vport->back;
638 u16 vport_max_rss_size;
641 /* TC configuration is shared by PF/VF in one port, only allow
642 * one tc for VF for simplicity. VF's vport_id is non zero.
644 if (vport->vport_id) {
645 kinfo->tc_info.max_tc = 1;
646 kinfo->tc_info.num_tc = 1;
647 vport->qs_offset = HNAE3_MAX_TC +
648 vport->vport_id - HCLGE_VF_VPORT_START_NUM;
649 vport_max_rss_size = hdev->vf_rss_size_max;
651 kinfo->tc_info.max_tc = hdev->tc_max;
652 kinfo->tc_info.num_tc =
653 min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
654 vport->qs_offset = 0;
655 vport_max_rss_size = hdev->pf_rss_size_max;
658 max_rss_size = min_t(u16, vport_max_rss_size,
659 hclge_vport_get_max_rss_size(vport));
661 /* Set to user value, no larger than max_rss_size. */
662 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
663 kinfo->req_rss_size <= max_rss_size) {
664 dev_info(&hdev->pdev->dev, "rss changes from %u to %u\n",
665 kinfo->rss_size, kinfo->req_rss_size);
666 kinfo->rss_size = kinfo->req_rss_size;
667 } else if (kinfo->rss_size > max_rss_size ||
668 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
669 /* Set to the maximum specification value (max_rss_size). */
670 kinfo->rss_size = max_rss_size;
674 static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
676 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
677 struct hclge_dev *hdev = vport->back;
680 hclge_tm_update_kinfo_rss_size(vport);
681 kinfo->num_tqps = hclge_vport_get_tqp_num(vport);
682 vport->dwrr = 100; /* 100 percent as init */
683 vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
685 if (vport->vport_id == PF_VPORT_ID)
686 hdev->rss_cfg.rss_size = kinfo->rss_size;
688 /* when enable mqprio, the tc_info has been updated. */
689 if (kinfo->tc_info.mqprio_active)
692 for (i = 0; i < HNAE3_MAX_TC; i++) {
693 if (hdev->hw_tc_map & BIT(i) && i < kinfo->tc_info.num_tc) {
694 kinfo->tc_info.tqp_offset[i] = i * kinfo->rss_size;
695 kinfo->tc_info.tqp_count[i] = kinfo->rss_size;
697 /* Set to default queue if TC is disable */
698 kinfo->tc_info.tqp_offset[i] = 0;
699 kinfo->tc_info.tqp_count[i] = 1;
703 memcpy(kinfo->tc_info.prio_tc, hdev->tm_info.prio_tc,
704 sizeof_field(struct hnae3_tc_info, prio_tc));
707 static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
709 struct hclge_vport *vport = hdev->vport;
712 for (i = 0; i < hdev->num_alloc_vport; i++) {
713 hclge_tm_vport_tc_info_update(vport);
719 static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
724 for (i = 0; i < hdev->tc_max; i++) {
725 if (i < hdev->tm_info.num_tc) {
726 tc_sch_mode = HCLGE_SCH_MODE_DWRR;
727 bw_limit = hdev->tm_info.pg_info[0].bw_limit;
729 tc_sch_mode = HCLGE_SCH_MODE_SP;
733 hdev->tm_info.tc_info[i].tc_id = i;
734 hdev->tm_info.tc_info[i].tc_sch_mode = tc_sch_mode;
735 hdev->tm_info.tc_info[i].pgid = 0;
736 hdev->tm_info.tc_info[i].bw_limit = bw_limit;
739 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
740 hdev->tm_info.prio_tc[i] =
741 (i >= hdev->tm_info.num_tc) ? 0 : i;
744 static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
746 #define BW_PERCENT 100
750 for (i = 0; i < hdev->tm_info.num_pg; i++) {
753 hdev->tm_info.pg_dwrr[i] = i ? 0 : BW_PERCENT;
755 hdev->tm_info.pg_info[i].pg_id = i;
756 hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
758 hdev->tm_info.pg_info[i].bw_limit =
759 hdev->ae_dev->dev_specs.max_tm_rate;
764 hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
765 for (k = 0; k < hdev->tm_info.num_tc; k++)
766 hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
767 for (; k < HNAE3_MAX_TC; k++)
768 hdev->tm_info.pg_info[i].tc_dwrr[k] = 0;
772 static void hclge_update_fc_mode_by_dcb_flag(struct hclge_dev *hdev)
774 if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) {
775 if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
776 dev_warn(&hdev->pdev->dev,
777 "Only 1 tc used, but last mode is FC_PFC\n");
779 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
780 } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
781 /* fc_mode_last_time record the last fc_mode when
782 * DCB is enabled, so that fc_mode can be set to
783 * the correct value when DCB is disabled.
785 hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
786 hdev->tm_info.fc_mode = HCLGE_FC_PFC;
790 static void hclge_update_fc_mode(struct hclge_dev *hdev)
792 if (!hdev->tm_info.pfc_en) {
793 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
797 if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
798 hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
799 hdev->tm_info.fc_mode = HCLGE_FC_PFC;
803 void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
805 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
806 hclge_update_fc_mode(hdev);
808 hclge_update_fc_mode_by_dcb_flag(hdev);
811 static void hclge_tm_schd_info_init(struct hclge_dev *hdev)
813 hclge_tm_pg_info_init(hdev);
815 hclge_tm_tc_info_init(hdev);
817 hclge_tm_vport_info_update(hdev);
819 hclge_tm_pfc_info_update(hdev);
822 static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
827 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
830 for (i = 0; i < hdev->tm_info.num_pg; i++) {
832 ret = hclge_tm_pg_to_pri_map_cfg(
833 hdev, i, hdev->tm_info.pg_info[i].tc_bit_map);
841 static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
843 u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
844 struct hclge_shaper_ir_para ir_para;
850 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
854 for (i = 0; i < hdev->tm_info.num_pg; i++) {
855 u32 rate = hdev->tm_info.pg_info[i].bw_limit;
857 /* Calc shaper para */
858 ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PG,
859 &ir_para, max_tm_rate);
863 shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
864 HCLGE_SHAPER_BS_U_DEF,
865 HCLGE_SHAPER_BS_S_DEF);
866 ret = hclge_tm_pg_shapping_cfg(hdev,
867 HCLGE_TM_SHAP_C_BUCKET, i,
872 shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b,
875 HCLGE_SHAPER_BS_U_DEF,
876 HCLGE_SHAPER_BS_S_DEF);
877 ret = hclge_tm_pg_shapping_cfg(hdev,
878 HCLGE_TM_SHAP_P_BUCKET, i,
887 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
893 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
897 for (i = 0; i < hdev->tm_info.num_pg; i++) {
899 ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]);
907 static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
908 struct hclge_vport *vport)
910 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
911 struct hnae3_tc_info *tc_info = &kinfo->tc_info;
912 struct hnae3_queue **tqp = kinfo->tqp;
916 for (i = 0; i < tc_info->num_tc; i++) {
917 for (j = 0; j < tc_info->tqp_count[i]; j++) {
918 struct hnae3_queue *q = tqp[tc_info->tqp_offset[i] + j];
920 ret = hclge_tm_q_to_qs_map_cfg(hdev,
921 hclge_get_queue_id(q),
922 vport->qs_offset + i);
931 static int hclge_tm_pri_q_qs_cfg_tc_base(struct hclge_dev *hdev)
933 struct hclge_vport *vport = hdev->vport;
937 /* Cfg qs -> pri mapping, one by one mapping */
938 for (k = 0; k < hdev->num_alloc_vport; k++) {
939 struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo;
941 for (i = 0; i < kinfo->tc_info.max_tc; i++) {
942 u8 pri = i < kinfo->tc_info.num_tc ? i : 0;
943 bool link_vld = i < kinfo->tc_info.num_tc;
945 ret = hclge_tm_qs_to_pri_map_cfg(hdev,
946 vport[k].qs_offset + i,
956 static int hclge_tm_pri_q_qs_cfg_vnet_base(struct hclge_dev *hdev)
958 struct hclge_vport *vport = hdev->vport;
962 /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */
963 for (k = 0; k < hdev->num_alloc_vport; k++)
964 for (i = 0; i < HNAE3_MAX_TC; i++) {
965 ret = hclge_tm_qs_to_pri_map_cfg(hdev,
966 vport[k].qs_offset + i,
975 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
977 struct hclge_vport *vport = hdev->vport;
981 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE)
982 ret = hclge_tm_pri_q_qs_cfg_tc_base(hdev);
983 else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
984 ret = hclge_tm_pri_q_qs_cfg_vnet_base(hdev);
991 /* Cfg q -> qs mapping */
992 for (i = 0; i < hdev->num_alloc_vport; i++) {
993 ret = hclge_vport_q_to_qs_map(hdev, vport);
1003 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
1005 u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
1006 struct hclge_shaper_ir_para ir_para;
1007 u32 shaper_para_c, shaper_para_p;
1011 for (i = 0; i < hdev->tc_max; i++) {
1012 u32 rate = hdev->tm_info.tc_info[i].bw_limit;
1015 ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI,
1016 &ir_para, max_tm_rate);
1020 shaper_para_c = hclge_tm_get_shapping_para(0, 0, 0,
1021 HCLGE_SHAPER_BS_U_DEF,
1022 HCLGE_SHAPER_BS_S_DEF);
1023 shaper_para_p = hclge_tm_get_shapping_para(ir_para.ir_b,
1026 HCLGE_SHAPER_BS_U_DEF,
1027 HCLGE_SHAPER_BS_S_DEF);
1033 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i,
1034 shaper_para_c, rate);
1038 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
1039 shaper_para_p, rate);
1047 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
1049 struct hclge_dev *hdev = vport->back;
1050 struct hclge_shaper_ir_para ir_para;
1054 ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
1056 hdev->ae_dev->dev_specs.max_tm_rate);
1060 shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
1061 HCLGE_SHAPER_BS_U_DEF,
1062 HCLGE_SHAPER_BS_S_DEF);
1063 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
1064 vport->vport_id, shaper_para,
1069 shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
1071 HCLGE_SHAPER_BS_U_DEF,
1072 HCLGE_SHAPER_BS_S_DEF);
1073 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
1074 vport->vport_id, shaper_para,
1082 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
1084 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1085 struct hclge_dev *hdev = vport->back;
1086 u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
1087 struct hclge_shaper_ir_para ir_para;
1091 for (i = 0; i < kinfo->tc_info.num_tc; i++) {
1092 ret = hclge_shaper_para_calc(hdev->tm_info.tc_info[i].bw_limit,
1093 HCLGE_SHAPER_LVL_QSET,
1094 &ir_para, max_tm_rate);
1102 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev)
1104 struct hclge_vport *vport = hdev->vport;
1108 /* Need config vport shaper */
1109 for (i = 0; i < hdev->num_alloc_vport; i++) {
1110 ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport);
1114 ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport);
1124 static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev)
1128 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1129 ret = hclge_tm_pri_tc_base_shaper_cfg(hdev);
1133 ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev);
1141 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
1143 struct hclge_vport *vport = hdev->vport;
1144 struct hclge_pg_info *pg_info;
1149 for (i = 0; i < hdev->tc_max; i++) {
1151 &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
1152 dwrr = pg_info->tc_dwrr[i];
1154 ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr);
1158 for (k = 0; k < hdev->num_alloc_vport; k++) {
1159 struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo;
1161 if (i >= kinfo->tc_info.max_tc)
1164 dwrr = i < kinfo->tc_info.num_tc ? vport[k].dwrr : 0;
1165 ret = hclge_tm_qs_weight_cfg(
1166 hdev, vport[k].qs_offset + i,
1176 static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev)
1178 #define DEFAULT_TC_OFFSET 14
1180 struct hclge_ets_tc_weight_cmd *ets_weight;
1181 struct hclge_desc desc;
1184 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false);
1185 ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
1187 for (i = 0; i < HNAE3_MAX_TC; i++) {
1188 struct hclge_pg_info *pg_info;
1190 pg_info = &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
1191 ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
1194 ets_weight->weight_offset = DEFAULT_TC_OFFSET;
1196 return hclge_cmd_send(&hdev->hw, &desc, 1);
1199 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
1201 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1202 struct hclge_dev *hdev = vport->back;
1207 ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr);
1212 for (i = 0; i < kinfo->tc_info.num_tc; i++) {
1213 ret = hclge_tm_qs_weight_cfg(
1214 hdev, vport->qs_offset + i,
1215 hdev->tm_info.pg_info[0].tc_dwrr[i]);
1223 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev)
1225 struct hclge_vport *vport = hdev->vport;
1229 for (i = 0; i < hdev->num_alloc_vport; i++) {
1230 ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport);
1240 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
1244 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1245 ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev);
1249 if (!hnae3_dev_dcb_supported(hdev))
1252 ret = hclge_tm_ets_tc_dwrr_cfg(hdev);
1253 if (ret == -EOPNOTSUPP) {
1254 dev_warn(&hdev->pdev->dev,
1255 "fw %08x doesn't support ets tc weight cmd\n",
1262 ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev);
1270 static int hclge_tm_map_cfg(struct hclge_dev *hdev)
1274 ret = hclge_up_to_tc_map(hdev);
1278 ret = hclge_tm_pg_to_pri_map(hdev);
1282 return hclge_tm_pri_q_qs_cfg(hdev);
1285 static int hclge_tm_shaper_cfg(struct hclge_dev *hdev)
1289 ret = hclge_tm_port_shaper_cfg(hdev);
1293 ret = hclge_tm_pg_shaper_cfg(hdev);
1297 return hclge_tm_pri_shaper_cfg(hdev);
1300 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev)
1304 ret = hclge_tm_pg_dwrr_cfg(hdev);
1308 return hclge_tm_pri_dwrr_cfg(hdev);
1311 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
1316 /* Only being config on TC-Based scheduler mode */
1317 if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
1320 for (i = 0; i < hdev->tm_info.num_pg; i++) {
1321 ret = hclge_tm_pg_schd_mode_cfg(hdev, i);
1329 static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id)
1331 struct hclge_vport *vport = hdev->vport;
1336 ret = hclge_tm_pri_schd_mode_cfg(hdev, pri_id);
1340 for (i = 0; i < hdev->num_alloc_vport; i++) {
1341 struct hnae3_knic_private_info *kinfo = &vport[i].nic.kinfo;
1343 if (pri_id >= kinfo->tc_info.max_tc)
1346 mode = pri_id < kinfo->tc_info.num_tc ? HCLGE_SCH_MODE_DWRR :
1348 ret = hclge_tm_qs_schd_mode_cfg(hdev,
1349 vport[i].qs_offset + pri_id,
1358 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
1360 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1361 struct hclge_dev *hdev = vport->back;
1365 if (vport->vport_id >= HNAE3_MAX_TC)
1368 ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
1372 for (i = 0; i < kinfo->tc_info.num_tc; i++) {
1373 u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode;
1375 ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i,
1384 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
1386 struct hclge_vport *vport = hdev->vport;
1390 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1391 for (i = 0; i < hdev->tc_max; i++) {
1392 ret = hclge_tm_schd_mode_tc_base_cfg(hdev, i);
1397 for (i = 0; i < hdev->num_alloc_vport; i++) {
1398 ret = hclge_tm_schd_mode_vnet_base_cfg(vport);
1409 static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
1413 ret = hclge_tm_lvl2_schd_mode_cfg(hdev);
1417 return hclge_tm_lvl34_schd_mode_cfg(hdev);
1420 int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
1424 /* Cfg tm mapping */
1425 ret = hclge_tm_map_cfg(hdev);
1430 ret = hclge_tm_shaper_cfg(hdev);
1435 ret = hclge_tm_dwrr_cfg(hdev);
1439 /* Cfg schd mode for each level schd */
1440 return hclge_tm_schd_mode_hw(hdev);
1443 static int hclge_pause_param_setup_hw(struct hclge_dev *hdev)
1445 struct hclge_mac *mac = &hdev->hw.mac;
1447 return hclge_pause_param_cfg(hdev, mac->mac_addr,
1448 HCLGE_DEFAULT_PAUSE_TRANS_GAP,
1449 HCLGE_DEFAULT_PAUSE_TRANS_TIME);
1452 static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
1454 u8 enable_bitmap = 0;
1456 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
1457 enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK |
1458 HCLGE_RX_MAC_PAUSE_EN_MSK;
1460 return hclge_pfc_pause_en_cfg(hdev, enable_bitmap,
1461 hdev->tm_info.pfc_en);
1464 /* for the queues that use for backpress, divides to several groups,
1465 * each group contains 32 queue sets, which can be represented by u32 bitmap.
1467 static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
1469 u16 grp_id_shift = HCLGE_BP_GRP_ID_S;
1470 u16 grp_id_mask = HCLGE_BP_GRP_ID_M;
1471 u8 grp_num = HCLGE_BP_GRP_NUM;
1474 if (hdev->num_tqps > HCLGE_TQP_MAX_SIZE_DEV_V2) {
1475 grp_num = HCLGE_BP_EXT_GRP_NUM;
1476 grp_id_mask = HCLGE_BP_EXT_GRP_ID_M;
1477 grp_id_shift = HCLGE_BP_EXT_GRP_ID_S;
1480 for (i = 0; i < grp_num; i++) {
1484 for (k = 0; k < hdev->num_alloc_vport; k++) {
1485 struct hclge_vport *vport = &hdev->vport[k];
1486 u16 qs_id = vport->qs_offset + tc;
1489 grp = hnae3_get_field(qs_id, grp_id_mask, grp_id_shift);
1490 sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
1491 HCLGE_BP_SUB_GRP_ID_S);
1493 qs_bitmap |= (1 << sub_grp);
1496 ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);
1504 static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
1508 switch (hdev->tm_info.fc_mode) {
1513 case HCLGE_FC_RX_PAUSE:
1517 case HCLGE_FC_TX_PAUSE:
1534 return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
1537 static int hclge_tm_bp_setup(struct hclge_dev *hdev)
1542 for (i = 0; i < hdev->tm_info.num_tc; i++) {
1543 ret = hclge_bp_setup_hw(hdev, i);
1551 int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
1555 ret = hclge_pause_param_setup_hw(hdev);
1559 ret = hclge_mac_pause_setup_hw(hdev);
1563 /* Only DCB-supported dev supports qset back pressure and pfc cmd */
1564 if (!hnae3_dev_dcb_supported(hdev))
1567 /* GE MAC does not support PFC, when driver is initializing and MAC
1568 * is in GE Mode, ignore the error here, otherwise initialization
1571 ret = hclge_pfc_setup_hw(hdev);
1572 if (init && ret == -EOPNOTSUPP)
1573 dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n");
1575 dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n",
1580 return hclge_tm_bp_setup(hdev);
1583 void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
1585 struct hclge_vport *vport = hdev->vport;
1586 struct hnae3_knic_private_info *kinfo;
1589 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
1590 hdev->tm_info.prio_tc[i] = prio_tc[i];
1592 for (k = 0; k < hdev->num_alloc_vport; k++) {
1593 kinfo = &vport[k].nic.kinfo;
1594 kinfo->tc_info.prio_tc[i] = prio_tc[i];
1599 void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
1604 hdev->tm_info.num_tc = num_tc;
1606 for (i = 0; i < hdev->tm_info.num_tc; i++)
1611 hdev->tm_info.num_tc = 1;
1614 hdev->hw_tc_map = bit_map;
1616 hclge_tm_schd_info_init(hdev);
1619 int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
1623 if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
1624 (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE))
1627 ret = hclge_tm_schd_setup_hw(hdev);
1631 ret = hclge_pause_setup_hw(hdev, init);
1638 int hclge_tm_schd_init(struct hclge_dev *hdev)
1640 /* fc_mode is HCLGE_FC_FULL on reset */
1641 hdev->tm_info.fc_mode = HCLGE_FC_FULL;
1642 hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
1644 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE &&
1645 hdev->tm_info.num_pg != 1)
1648 hclge_tm_schd_info_init(hdev);
1650 return hclge_tm_init_hw(hdev, true);
1653 int hclge_tm_vport_map_update(struct hclge_dev *hdev)
1655 struct hclge_vport *vport = hdev->vport;
1658 hclge_tm_vport_tc_info_update(vport);
1660 ret = hclge_vport_q_to_qs_map(hdev, vport);
1664 if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en)
1667 return hclge_tm_bp_setup(hdev);
1670 int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num)
1672 struct hclge_tm_nodes_cmd *nodes;
1673 struct hclge_desc desc;
1676 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) {
1677 /* Each PF has 8 qsets and each VF has 1 qset */
1678 *qset_num = HCLGE_TM_PF_MAX_QSET_NUM + pci_num_vf(hdev->pdev);
1682 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
1683 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1685 dev_err(&hdev->pdev->dev,
1686 "failed to get qset num, ret = %d\n", ret);
1690 nodes = (struct hclge_tm_nodes_cmd *)desc.data;
1691 *qset_num = le16_to_cpu(nodes->qset_num);
1695 int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num)
1697 struct hclge_tm_nodes_cmd *nodes;
1698 struct hclge_desc desc;
1701 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) {
1702 *pri_num = HCLGE_TM_PF_MAX_PRI_NUM;
1706 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
1707 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1709 dev_err(&hdev->pdev->dev,
1710 "failed to get pri num, ret = %d\n", ret);
1714 nodes = (struct hclge_tm_nodes_cmd *)desc.data;
1715 *pri_num = nodes->pri_num;
1719 int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority,
1722 struct hclge_qs_to_pri_link_cmd *map;
1723 struct hclge_desc desc;
1726 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, true);
1727 map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
1728 map->qs_id = cpu_to_le16(qset_id);
1729 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1731 dev_err(&hdev->pdev->dev,
1732 "failed to get qset map priority, ret = %d\n", ret);
1736 *priority = map->priority;
1737 *link_vld = map->link_vld;
1741 int hclge_tm_get_qset_sch_mode(struct hclge_dev *hdev, u16 qset_id, u8 *mode)
1743 struct hclge_qs_sch_mode_cfg_cmd *qs_sch_mode;
1744 struct hclge_desc desc;
1747 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, true);
1748 qs_sch_mode = (struct hclge_qs_sch_mode_cfg_cmd *)desc.data;
1749 qs_sch_mode->qs_id = cpu_to_le16(qset_id);
1750 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1752 dev_err(&hdev->pdev->dev,
1753 "failed to get qset sch mode, ret = %d\n", ret);
1757 *mode = qs_sch_mode->sch_mode;
1761 int hclge_tm_get_qset_weight(struct hclge_dev *hdev, u16 qset_id, u8 *weight)
1763 struct hclge_qs_weight_cmd *qs_weight;
1764 struct hclge_desc desc;
1767 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, true);
1768 qs_weight = (struct hclge_qs_weight_cmd *)desc.data;
1769 qs_weight->qs_id = cpu_to_le16(qset_id);
1770 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1772 dev_err(&hdev->pdev->dev,
1773 "failed to get qset weight, ret = %d\n", ret);
1777 *weight = qs_weight->dwrr;
1781 int hclge_tm_get_qset_shaper(struct hclge_dev *hdev, u16 qset_id,
1782 struct hclge_tm_shaper_para *para)
1784 struct hclge_qs_shapping_cmd *shap_cfg_cmd;
1785 struct hclge_desc desc;
1789 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true);
1790 shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
1791 shap_cfg_cmd->qs_id = cpu_to_le16(qset_id);
1792 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1794 dev_err(&hdev->pdev->dev,
1795 "failed to get qset %u shaper, ret = %d\n", qset_id,
1800 shapping_para = le32_to_cpu(shap_cfg_cmd->qs_shapping_para);
1801 para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
1802 para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
1803 para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
1804 para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
1805 para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
1806 para->flag = shap_cfg_cmd->flag;
1807 para->rate = le32_to_cpu(shap_cfg_cmd->qs_rate);
1811 int hclge_tm_get_pri_sch_mode(struct hclge_dev *hdev, u8 pri_id, u8 *mode)
1813 struct hclge_pri_sch_mode_cfg_cmd *pri_sch_mode;
1814 struct hclge_desc desc;
1817 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, true);
1818 pri_sch_mode = (struct hclge_pri_sch_mode_cfg_cmd *)desc.data;
1819 pri_sch_mode->pri_id = pri_id;
1820 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1822 dev_err(&hdev->pdev->dev,
1823 "failed to get priority sch mode, ret = %d\n", ret);
1827 *mode = pri_sch_mode->sch_mode;
1831 int hclge_tm_get_pri_weight(struct hclge_dev *hdev, u8 pri_id, u8 *weight)
1833 struct hclge_priority_weight_cmd *priority_weight;
1834 struct hclge_desc desc;
1837 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, true);
1838 priority_weight = (struct hclge_priority_weight_cmd *)desc.data;
1839 priority_weight->pri_id = pri_id;
1840 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1842 dev_err(&hdev->pdev->dev,
1843 "failed to get priority weight, ret = %d\n", ret);
1847 *weight = priority_weight->dwrr;
1851 int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id,
1852 enum hclge_opcode_type cmd,
1853 struct hclge_tm_shaper_para *para)
1855 struct hclge_pri_shapping_cmd *shap_cfg_cmd;
1856 struct hclge_desc desc;
1860 if (cmd != HCLGE_OPC_TM_PRI_C_SHAPPING &&
1861 cmd != HCLGE_OPC_TM_PRI_P_SHAPPING)
1864 hclge_cmd_setup_basic_desc(&desc, cmd, true);
1865 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
1866 shap_cfg_cmd->pri_id = pri_id;
1867 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1869 dev_err(&hdev->pdev->dev,
1870 "failed to get priority shaper(%#x), ret = %d\n",
1875 shapping_para = le32_to_cpu(shap_cfg_cmd->pri_shapping_para);
1876 para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
1877 para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
1878 para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
1879 para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
1880 para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
1881 para->flag = shap_cfg_cmd->flag;
1882 para->rate = le32_to_cpu(shap_cfg_cmd->pri_rate);
1886 int hclge_tm_get_q_to_qs_map(struct hclge_dev *hdev, u16 q_id, u16 *qset_id)
1888 struct hclge_nq_to_qs_link_cmd *map;
1889 struct hclge_desc desc;
1894 map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
1895 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, true);
1896 map->nq_id = cpu_to_le16(q_id);
1897 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1899 dev_err(&hdev->pdev->dev,
1900 "failed to get queue to qset map, ret = %d\n", ret);
1903 *qset_id = le16_to_cpu(map->qset_id);
1905 /* convert qset_id to the following format, drop the vld bit
1906 * | qs_id_h | vld | qs_id_l |
1907 * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 |
1910 * qset_id: | 15 | 14 ~ 10 | 9 ~ 0 |
1912 qs_id_l = hnae3_get_field(*qset_id, HCLGE_TM_QS_ID_L_MSK,
1913 HCLGE_TM_QS_ID_L_S);
1914 qs_id_h = hnae3_get_field(*qset_id, HCLGE_TM_QS_ID_H_EXT_MSK,
1915 HCLGE_TM_QS_ID_H_EXT_S);
1917 hnae3_set_field(*qset_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S,
1919 hnae3_set_field(*qset_id, HCLGE_TM_QS_ID_H_MSK, HCLGE_TM_QS_ID_H_S,
1924 int hclge_tm_get_q_to_tc(struct hclge_dev *hdev, u16 q_id, u8 *tc_id)
1926 #define HCLGE_TM_TC_MASK 0x7
1928 struct hclge_tqp_tx_queue_tc_cmd *tc;
1929 struct hclge_desc desc;
1932 tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data;
1933 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TQP_TX_QUEUE_TC, true);
1934 tc->queue_id = cpu_to_le16(q_id);
1935 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1937 dev_err(&hdev->pdev->dev,
1938 "failed to get queue to tc map, ret = %d\n", ret);
1942 *tc_id = tc->tc_id & HCLGE_TM_TC_MASK;
1946 int hclge_tm_get_pg_to_pri_map(struct hclge_dev *hdev, u8 pg_id,
1949 struct hclge_pg_to_pri_link_cmd *map;
1950 struct hclge_desc desc;
1953 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, true);
1954 map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
1956 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1958 dev_err(&hdev->pdev->dev,
1959 "failed to get pg to pri map, ret = %d\n", ret);
1963 *pri_bit_map = map->pri_bit_map;
1967 int hclge_tm_get_pg_weight(struct hclge_dev *hdev, u8 pg_id, u8 *weight)
1969 struct hclge_pg_weight_cmd *pg_weight_cmd;
1970 struct hclge_desc desc;
1973 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, true);
1974 pg_weight_cmd = (struct hclge_pg_weight_cmd *)desc.data;
1975 pg_weight_cmd->pg_id = pg_id;
1976 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1978 dev_err(&hdev->pdev->dev,
1979 "failed to get pg weight, ret = %d\n", ret);
1983 *weight = pg_weight_cmd->dwrr;
1987 int hclge_tm_get_pg_sch_mode(struct hclge_dev *hdev, u8 pg_id, u8 *mode)
1989 struct hclge_desc desc;
1992 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, true);
1993 desc.data[0] = cpu_to_le32(pg_id);
1994 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1996 dev_err(&hdev->pdev->dev,
1997 "failed to get pg sch mode, ret = %d\n", ret);
2001 *mode = (u8)le32_to_cpu(desc.data[1]);
2005 int hclge_tm_get_pg_shaper(struct hclge_dev *hdev, u8 pg_id,
2006 enum hclge_opcode_type cmd,
2007 struct hclge_tm_shaper_para *para)
2009 struct hclge_pg_shapping_cmd *shap_cfg_cmd;
2010 struct hclge_desc desc;
2014 if (cmd != HCLGE_OPC_TM_PG_C_SHAPPING &&
2015 cmd != HCLGE_OPC_TM_PG_P_SHAPPING)
2018 hclge_cmd_setup_basic_desc(&desc, cmd, true);
2019 shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
2020 shap_cfg_cmd->pg_id = pg_id;
2021 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2023 dev_err(&hdev->pdev->dev,
2024 "failed to get pg shaper(%#x), ret = %d\n",
2029 shapping_para = le32_to_cpu(shap_cfg_cmd->pg_shapping_para);
2030 para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
2031 para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
2032 para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
2033 para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
2034 para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
2035 para->flag = shap_cfg_cmd->flag;
2036 para->rate = le32_to_cpu(shap_cfg_cmd->pg_rate);
2040 int hclge_tm_get_port_shaper(struct hclge_dev *hdev,
2041 struct hclge_tm_shaper_para *para)
2043 struct hclge_port_shapping_cmd *port_shap_cfg_cmd;
2044 struct hclge_desc desc;
2048 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, true);
2049 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2051 dev_err(&hdev->pdev->dev,
2052 "failed to get port shaper, ret = %d\n", ret);
2056 port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
2057 shapping_para = le32_to_cpu(port_shap_cfg_cmd->port_shapping_para);
2058 para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
2059 para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
2060 para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
2061 para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
2062 para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
2063 para->flag = port_shap_cfg_cmd->flag;
2064 para->rate = le32_to_cpu(port_shap_cfg_cmd->port_rate);