1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/etherdevice.h>
7 #include "hclge_main.h"
10 enum hclge_shaper_level {
11 HCLGE_SHAPER_LVL_PRI = 0,
12 HCLGE_SHAPER_LVL_PG = 1,
13 HCLGE_SHAPER_LVL_PORT = 2,
14 HCLGE_SHAPER_LVL_QSET = 3,
15 HCLGE_SHAPER_LVL_CNT = 4,
16 HCLGE_SHAPER_LVL_VF = 0,
17 HCLGE_SHAPER_LVL_PF = 1,
20 #define HCLGE_TM_PFC_PKT_GET_CMD_NUM 3
21 #define HCLGE_TM_PFC_NUM_GET_PER_CMD 3
23 #define HCLGE_SHAPER_BS_U_DEF 5
24 #define HCLGE_SHAPER_BS_S_DEF 20
26 /* hclge_shaper_para_calc: calculate ir parameter for the shaper
27 * @ir: Rate to be config, its unit is Mbps
28 * @shaper_level: the shaper level. eg: port, pg, priority, queueset
29 * @ir_para: parameters of IR shaper
30 * @max_tm_rate: max tm rate is available to config
34 * IR_b * (2 ^ IR_u) * 8
35 * IR(Mbps) = ------------------------- * CLOCK(1000Mbps)
38 * @return: 0: calculate sucessful, negative: fail
40 static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
41 struct hclge_shaper_ir_para *ir_para,
44 #define DEFAULT_SHAPER_IR_B 126
45 #define DIVISOR_CLK (1000 * 8)
46 #define DEFAULT_DIVISOR_IR_B (DEFAULT_SHAPER_IR_B * DIVISOR_CLK)
48 static const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
49 6 * 256, /* Prioriy level */
50 6 * 32, /* Prioriy group level */
51 6 * 8, /* Port level */
52 6 * 256 /* Qset level */
60 if (shaper_level >= HCLGE_SHAPER_LVL_CNT ||
64 tick = tick_array[shaper_level];
67 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
68 * the formula is changed to:
70 * ir_calc = ---------------- * 1000
73 ir_calc = (DEFAULT_DIVISOR_IR_B + (tick >> 1) - 1) / tick;
76 ir_para->ir_b = DEFAULT_SHAPER_IR_B;
81 } else if (ir_calc > ir) {
82 /* Increasing the denominator to select ir_s value */
83 while (ir_calc >= ir && ir) {
85 ir_calc = DEFAULT_DIVISOR_IR_B /
86 (tick * (1 << ir_s_calc));
89 ir_para->ir_b = (ir * tick * (1 << ir_s_calc) +
90 (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
92 /* Increasing the numerator to select ir_u value */
95 while (ir_calc < ir) {
97 numerator = DEFAULT_DIVISOR_IR_B * (1 << ir_u_calc);
98 ir_calc = (numerator + (tick >> 1)) / tick;
102 ir_para->ir_b = DEFAULT_SHAPER_IR_B;
104 u32 denominator = DIVISOR_CLK * (1 << --ir_u_calc);
105 ir_para->ir_b = (ir * tick + (denominator >> 1)) /
110 ir_para->ir_u = ir_u_calc;
111 ir_para->ir_s = ir_s_calc;
116 static const u16 hclge_pfc_tx_stats_offset[] = {
117 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num),
118 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num),
119 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num),
120 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num),
121 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num),
122 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num),
123 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num),
124 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)
127 static const u16 hclge_pfc_rx_stats_offset[] = {
128 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num),
129 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num),
130 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num),
131 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num),
132 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num),
133 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num),
134 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num),
135 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)
138 static void hclge_pfc_stats_get(struct hclge_dev *hdev, bool tx, u64 *stats)
144 offset = hclge_pfc_tx_stats_offset;
146 offset = hclge_pfc_rx_stats_offset;
148 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
149 stats[i] = HCLGE_STATS_READ(&hdev->mac_stats, offset[i]);
152 void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats)
154 hclge_pfc_stats_get(hdev, false, stats);
157 void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats)
159 hclge_pfc_stats_get(hdev, true, stats);
162 int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
164 struct hclge_desc desc;
166 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false);
168 desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) |
169 (rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0));
171 return hclge_cmd_send(&hdev->hw, &desc, 1);
174 int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
177 struct hclge_desc desc;
178 struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data;
180 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false);
182 pfc->tx_rx_en_bitmap = tx_rx_bitmap;
183 pfc->pri_en_bitmap = pfc_bitmap;
185 return hclge_cmd_send(&hdev->hw, &desc, 1);
188 static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
189 u8 pause_trans_gap, u16 pause_trans_time)
191 struct hclge_cfg_pause_param_cmd *pause_param;
192 struct hclge_desc desc;
194 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
196 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
198 ether_addr_copy(pause_param->mac_addr, addr);
199 ether_addr_copy(pause_param->mac_addr_extra, addr);
200 pause_param->pause_trans_gap = pause_trans_gap;
201 pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
203 return hclge_cmd_send(&hdev->hw, &desc, 1);
206 int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
208 struct hclge_cfg_pause_param_cmd *pause_param;
209 struct hclge_desc desc;
214 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
216 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
218 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
222 trans_gap = pause_param->pause_trans_gap;
223 trans_time = le16_to_cpu(pause_param->pause_trans_time);
225 return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time);
228 static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
232 tc = hdev->tm_info.prio_tc[pri_id];
234 if (tc >= hdev->tm_info.num_tc)
238 * the register for priority has four bytes, the first bytes includes
239 * priority0 and priority1, the higher 4bit stands for priority1
240 * while the lower 4bit stands for priority0, as below:
241 * first byte: | pri_1 | pri_0 |
242 * second byte: | pri_3 | pri_2 |
243 * third byte: | pri_5 | pri_4 |
244 * fourth byte: | pri_7 | pri_6 |
246 pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4);
251 int hclge_up_to_tc_map(struct hclge_dev *hdev)
253 struct hclge_desc desc;
254 u8 *pri = (u8 *)desc.data;
258 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
260 for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
261 ret = hclge_fill_pri_array(hdev, pri, pri_id);
266 return hclge_cmd_send(&hdev->hw, &desc, 1);
269 static void hclge_dscp_to_prio_map_init(struct hclge_dev *hdev)
273 hdev->vport[0].nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_PRIO;
274 hdev->vport[0].nic.kinfo.dscp_app_cnt = 0;
275 for (i = 0; i < HNAE3_MAX_DSCP; i++)
276 hdev->vport[0].nic.kinfo.dscp_prio[i] = HNAE3_PRIO_ID_INVALID;
279 int hclge_dscp_to_tc_map(struct hclge_dev *hdev)
281 struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM];
282 u8 *req0 = (u8 *)desc[0].data;
283 u8 *req1 = (u8 *)desc[1].data;
284 u8 pri_id, tc_id, i, j;
286 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QOS_MAP, false);
287 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
288 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_QOS_MAP, false);
290 /* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */
291 for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) {
292 pri_id = hdev->vport[0].nic.kinfo.dscp_prio[i];
293 pri_id = pri_id == HNAE3_PRIO_ID_INVALID ? 0 : pri_id;
294 tc_id = hdev->tm_info.prio_tc[pri_id];
295 /* Each dscp setting has 4 bits, so each byte saves two dscp
298 req0[i >> 1] |= tc_id << HCLGE_DSCP_TC_SHIFT(i);
300 j = i + HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM;
301 pri_id = hdev->vport[0].nic.kinfo.dscp_prio[j];
302 pri_id = pri_id == HNAE3_PRIO_ID_INVALID ? 0 : pri_id;
303 tc_id = hdev->tm_info.prio_tc[pri_id];
304 req1[i >> 1] |= tc_id << HCLGE_DSCP_TC_SHIFT(i);
307 return hclge_cmd_send(&hdev->hw, desc, HCLGE_DSCP_MAP_TC_BD_NUM);
310 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
311 u8 pg_id, u8 pri_bit_map)
313 struct hclge_pg_to_pri_link_cmd *map;
314 struct hclge_desc desc;
316 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false);
318 map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
321 map->pri_bit_map = pri_bit_map;
323 return hclge_cmd_send(&hdev->hw, &desc, 1);
326 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, u16 qs_id, u8 pri,
329 struct hclge_qs_to_pri_link_cmd *map;
330 struct hclge_desc desc;
332 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false);
334 map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
336 map->qs_id = cpu_to_le16(qs_id);
338 map->link_vld = link_vld ? HCLGE_TM_QS_PRI_LINK_VLD_MSK : 0;
340 return hclge_cmd_send(&hdev->hw, &desc, 1);
343 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
346 struct hclge_nq_to_qs_link_cmd *map;
347 struct hclge_desc desc;
351 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false);
353 map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
355 map->nq_id = cpu_to_le16(q_id);
357 /* convert qs_id to the following format to support qset_id >= 1024
358 * qs_id: | 15 | 14 ~ 10 | 9 ~ 0 |
361 * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 |
362 * | qs_id_h | vld | qs_id_l |
364 qs_id_l = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_L_MSK,
366 qs_id_h = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_H_MSK,
368 hnae3_set_field(qs_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S,
370 hnae3_set_field(qs_id, HCLGE_TM_QS_ID_H_EXT_MSK, HCLGE_TM_QS_ID_H_EXT_S,
372 map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK);
374 return hclge_cmd_send(&hdev->hw, &desc, 1);
377 static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id,
380 struct hclge_pg_weight_cmd *weight;
381 struct hclge_desc desc;
383 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false);
385 weight = (struct hclge_pg_weight_cmd *)desc.data;
387 weight->pg_id = pg_id;
390 return hclge_cmd_send(&hdev->hw, &desc, 1);
393 static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id,
396 struct hclge_priority_weight_cmd *weight;
397 struct hclge_desc desc;
399 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false);
401 weight = (struct hclge_priority_weight_cmd *)desc.data;
403 weight->pri_id = pri_id;
406 return hclge_cmd_send(&hdev->hw, &desc, 1);
409 static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
412 struct hclge_qs_weight_cmd *weight;
413 struct hclge_desc desc;
415 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false);
417 weight = (struct hclge_qs_weight_cmd *)desc.data;
419 weight->qs_id = cpu_to_le16(qs_id);
422 return hclge_cmd_send(&hdev->hw, &desc, 1);
425 static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s,
428 u32 shapping_para = 0;
430 hclge_tm_set_field(shapping_para, IR_B, ir_b);
431 hclge_tm_set_field(shapping_para, IR_U, ir_u);
432 hclge_tm_set_field(shapping_para, IR_S, ir_s);
433 hclge_tm_set_field(shapping_para, BS_B, bs_b);
434 hclge_tm_set_field(shapping_para, BS_S, bs_s);
436 return shapping_para;
439 static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
440 enum hclge_shap_bucket bucket, u8 pg_id,
441 u32 shapping_para, u32 rate)
443 struct hclge_pg_shapping_cmd *shap_cfg_cmd;
444 enum hclge_opcode_type opcode;
445 struct hclge_desc desc;
447 opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
448 HCLGE_OPC_TM_PG_C_SHAPPING;
449 hclge_cmd_setup_basic_desc(&desc, opcode, false);
451 shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
453 shap_cfg_cmd->pg_id = pg_id;
455 shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para);
457 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
459 shap_cfg_cmd->pg_rate = cpu_to_le32(rate);
461 return hclge_cmd_send(&hdev->hw, &desc, 1);
464 int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
466 struct hclge_port_shapping_cmd *shap_cfg_cmd;
467 struct hclge_shaper_ir_para ir_para;
468 struct hclge_desc desc;
472 ret = hclge_shaper_para_calc(hdev->hw.mac.speed, HCLGE_SHAPER_LVL_PORT,
474 hdev->ae_dev->dev_specs.max_tm_rate);
478 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
479 shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
481 shapping_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
483 HCLGE_SHAPER_BS_U_DEF,
484 HCLGE_SHAPER_BS_S_DEF);
486 shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para);
488 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
490 shap_cfg_cmd->port_rate = cpu_to_le32(hdev->hw.mac.speed);
492 return hclge_cmd_send(&hdev->hw, &desc, 1);
495 static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
496 enum hclge_shap_bucket bucket, u8 pri_id,
497 u32 shapping_para, u32 rate)
499 struct hclge_pri_shapping_cmd *shap_cfg_cmd;
500 enum hclge_opcode_type opcode;
501 struct hclge_desc desc;
503 opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
504 HCLGE_OPC_TM_PRI_C_SHAPPING;
506 hclge_cmd_setup_basic_desc(&desc, opcode, false);
508 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
510 shap_cfg_cmd->pri_id = pri_id;
512 shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para);
514 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
516 shap_cfg_cmd->pri_rate = cpu_to_le32(rate);
518 return hclge_cmd_send(&hdev->hw, &desc, 1);
521 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id)
523 struct hclge_desc desc;
525 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false);
527 if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR)
528 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
532 desc.data[0] = cpu_to_le32(pg_id);
534 return hclge_cmd_send(&hdev->hw, &desc, 1);
537 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id)
539 struct hclge_desc desc;
541 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false);
543 if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
544 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
548 desc.data[0] = cpu_to_le32(pri_id);
550 return hclge_cmd_send(&hdev->hw, &desc, 1);
553 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode)
555 struct hclge_desc desc;
557 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false);
559 if (mode == HCLGE_SCH_MODE_DWRR)
560 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
564 desc.data[0] = cpu_to_le32(qs_id);
566 return hclge_cmd_send(&hdev->hw, &desc, 1);
569 static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id,
572 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
573 struct hclge_desc desc;
575 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
578 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
580 bp_to_qs_map_cmd->tc_id = tc;
581 bp_to_qs_map_cmd->qs_group_id = grp_id;
582 bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map);
584 return hclge_cmd_send(&hdev->hw, &desc, 1);
587 int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
589 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
590 struct hclge_qs_shapping_cmd *shap_cfg_cmd;
591 struct hclge_shaper_ir_para ir_para;
592 struct hclge_dev *hdev = vport->back;
593 struct hclge_desc desc;
598 max_tx_rate = hdev->ae_dev->dev_specs.max_tm_rate;
600 ret = hclge_shaper_para_calc(max_tx_rate, HCLGE_SHAPER_LVL_QSET,
602 hdev->ae_dev->dev_specs.max_tm_rate);
606 shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
608 HCLGE_SHAPER_BS_U_DEF,
609 HCLGE_SHAPER_BS_S_DEF);
611 for (i = 0; i < kinfo->tc_info.num_tc; i++) {
612 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG,
615 shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
616 shap_cfg_cmd->qs_id = cpu_to_le16(vport->qs_offset + i);
617 shap_cfg_cmd->qs_shapping_para = cpu_to_le32(shaper_para);
619 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
620 shap_cfg_cmd->qs_rate = cpu_to_le32(max_tx_rate);
622 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
624 dev_err(&hdev->pdev->dev,
625 "vport%u, qs%u failed to set tx_rate:%d, ret=%d\n",
626 vport->vport_id, shap_cfg_cmd->qs_id,
635 static u16 hclge_vport_get_max_rss_size(struct hclge_vport *vport)
637 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
638 struct hnae3_tc_info *tc_info = &kinfo->tc_info;
639 struct hclge_dev *hdev = vport->back;
640 u16 max_rss_size = 0;
643 if (!tc_info->mqprio_active)
644 return vport->alloc_tqps / tc_info->num_tc;
646 for (i = 0; i < HNAE3_MAX_TC; i++) {
647 if (!(hdev->hw_tc_map & BIT(i)) || i >= tc_info->num_tc)
649 if (max_rss_size < tc_info->tqp_count[i])
650 max_rss_size = tc_info->tqp_count[i];
656 static u16 hclge_vport_get_tqp_num(struct hclge_vport *vport)
658 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
659 struct hnae3_tc_info *tc_info = &kinfo->tc_info;
660 struct hclge_dev *hdev = vport->back;
664 if (!tc_info->mqprio_active)
665 return kinfo->rss_size * tc_info->num_tc;
667 for (i = 0; i < HNAE3_MAX_TC; i++) {
668 if (hdev->hw_tc_map & BIT(i) && i < tc_info->num_tc)
669 sum += tc_info->tqp_count[i];
675 static void hclge_tm_update_kinfo_rss_size(struct hclge_vport *vport)
677 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
678 struct hclge_dev *hdev = vport->back;
679 u16 vport_max_rss_size;
682 /* TC configuration is shared by PF/VF in one port, only allow
683 * one tc for VF for simplicity. VF's vport_id is non zero.
685 if (vport->vport_id) {
686 kinfo->tc_info.max_tc = 1;
687 kinfo->tc_info.num_tc = 1;
688 vport->qs_offset = HNAE3_MAX_TC +
689 vport->vport_id - HCLGE_VF_VPORT_START_NUM;
690 vport_max_rss_size = hdev->vf_rss_size_max;
692 kinfo->tc_info.max_tc = hdev->tc_max;
693 kinfo->tc_info.num_tc =
694 min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
695 vport->qs_offset = 0;
696 vport_max_rss_size = hdev->pf_rss_size_max;
699 max_rss_size = min_t(u16, vport_max_rss_size,
700 hclge_vport_get_max_rss_size(vport));
702 /* Set to user value, no larger than max_rss_size. */
703 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
704 kinfo->req_rss_size <= max_rss_size) {
705 dev_info(&hdev->pdev->dev, "rss changes from %u to %u\n",
706 kinfo->rss_size, kinfo->req_rss_size);
707 kinfo->rss_size = kinfo->req_rss_size;
708 } else if (kinfo->rss_size > max_rss_size ||
709 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
710 /* Set to the maximum specification value (max_rss_size). */
711 kinfo->rss_size = max_rss_size;
715 static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
717 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
718 struct hclge_dev *hdev = vport->back;
721 hclge_tm_update_kinfo_rss_size(vport);
722 kinfo->num_tqps = hclge_vport_get_tqp_num(vport);
723 vport->dwrr = 100; /* 100 percent as init */
724 vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
726 if (vport->vport_id == PF_VPORT_ID)
727 hdev->rss_cfg.rss_size = kinfo->rss_size;
729 /* when enable mqprio, the tc_info has been updated. */
730 if (kinfo->tc_info.mqprio_active)
733 for (i = 0; i < HNAE3_MAX_TC; i++) {
734 if (hdev->hw_tc_map & BIT(i) && i < kinfo->tc_info.num_tc) {
735 kinfo->tc_info.tqp_offset[i] = i * kinfo->rss_size;
736 kinfo->tc_info.tqp_count[i] = kinfo->rss_size;
738 /* Set to default queue if TC is disable */
739 kinfo->tc_info.tqp_offset[i] = 0;
740 kinfo->tc_info.tqp_count[i] = 1;
744 memcpy(kinfo->tc_info.prio_tc, hdev->tm_info.prio_tc,
745 sizeof_field(struct hnae3_tc_info, prio_tc));
748 static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
750 struct hclge_vport *vport = hdev->vport;
753 for (i = 0; i < hdev->num_alloc_vport; i++) {
754 hclge_tm_vport_tc_info_update(vport);
760 static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
765 for (i = 0; i < hdev->tc_max; i++) {
766 if (i < hdev->tm_info.num_tc) {
767 tc_sch_mode = HCLGE_SCH_MODE_DWRR;
768 bw_limit = hdev->tm_info.pg_info[0].bw_limit;
770 tc_sch_mode = HCLGE_SCH_MODE_SP;
774 hdev->tm_info.tc_info[i].tc_id = i;
775 hdev->tm_info.tc_info[i].tc_sch_mode = tc_sch_mode;
776 hdev->tm_info.tc_info[i].pgid = 0;
777 hdev->tm_info.tc_info[i].bw_limit = bw_limit;
780 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
781 hdev->tm_info.prio_tc[i] =
782 (i >= hdev->tm_info.num_tc) ? 0 : i;
785 static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
787 #define BW_PERCENT 100
791 for (i = 0; i < hdev->tm_info.num_pg; i++) {
794 hdev->tm_info.pg_dwrr[i] = i ? 0 : BW_PERCENT;
796 hdev->tm_info.pg_info[i].pg_id = i;
797 hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
799 hdev->tm_info.pg_info[i].bw_limit =
800 hdev->ae_dev->dev_specs.max_tm_rate;
805 hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
806 for (k = 0; k < hdev->tm_info.num_tc; k++)
807 hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
808 for (; k < HNAE3_MAX_TC; k++)
809 hdev->tm_info.pg_info[i].tc_dwrr[k] = 0;
813 static void hclge_update_fc_mode_by_dcb_flag(struct hclge_dev *hdev)
815 if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) {
816 if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
817 dev_warn(&hdev->pdev->dev,
818 "Only 1 tc used, but last mode is FC_PFC\n");
820 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
821 } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
822 /* fc_mode_last_time record the last fc_mode when
823 * DCB is enabled, so that fc_mode can be set to
824 * the correct value when DCB is disabled.
826 hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
827 hdev->tm_info.fc_mode = HCLGE_FC_PFC;
831 static void hclge_update_fc_mode(struct hclge_dev *hdev)
833 if (!hdev->tm_info.pfc_en) {
834 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
838 if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
839 hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
840 hdev->tm_info.fc_mode = HCLGE_FC_PFC;
844 void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
846 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
847 hclge_update_fc_mode(hdev);
849 hclge_update_fc_mode_by_dcb_flag(hdev);
852 static void hclge_tm_schd_info_init(struct hclge_dev *hdev)
854 hclge_tm_pg_info_init(hdev);
856 hclge_tm_tc_info_init(hdev);
858 hclge_tm_vport_info_update(hdev);
860 hclge_tm_pfc_info_update(hdev);
863 static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
868 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
871 for (i = 0; i < hdev->tm_info.num_pg; i++) {
873 ret = hclge_tm_pg_to_pri_map_cfg(
874 hdev, i, hdev->tm_info.pg_info[i].tc_bit_map);
882 static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
884 u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
885 struct hclge_shaper_ir_para ir_para;
891 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
895 for (i = 0; i < hdev->tm_info.num_pg; i++) {
896 u32 rate = hdev->tm_info.pg_info[i].bw_limit;
898 /* Calc shaper para */
899 ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PG,
900 &ir_para, max_tm_rate);
904 shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
905 HCLGE_SHAPER_BS_U_DEF,
906 HCLGE_SHAPER_BS_S_DEF);
907 ret = hclge_tm_pg_shapping_cfg(hdev,
908 HCLGE_TM_SHAP_C_BUCKET, i,
913 shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b,
916 HCLGE_SHAPER_BS_U_DEF,
917 HCLGE_SHAPER_BS_S_DEF);
918 ret = hclge_tm_pg_shapping_cfg(hdev,
919 HCLGE_TM_SHAP_P_BUCKET, i,
928 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
934 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
938 for (i = 0; i < hdev->tm_info.num_pg; i++) {
940 ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]);
948 static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
949 struct hclge_vport *vport)
951 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
952 struct hnae3_tc_info *tc_info = &kinfo->tc_info;
953 struct hnae3_queue **tqp = kinfo->tqp;
957 for (i = 0; i < tc_info->num_tc; i++) {
958 for (j = 0; j < tc_info->tqp_count[i]; j++) {
959 struct hnae3_queue *q = tqp[tc_info->tqp_offset[i] + j];
961 ret = hclge_tm_q_to_qs_map_cfg(hdev,
962 hclge_get_queue_id(q),
963 vport->qs_offset + i);
972 static int hclge_tm_pri_q_qs_cfg_tc_base(struct hclge_dev *hdev)
974 struct hclge_vport *vport = hdev->vport;
978 /* Cfg qs -> pri mapping, one by one mapping */
979 for (k = 0; k < hdev->num_alloc_vport; k++) {
980 struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo;
982 for (i = 0; i < kinfo->tc_info.max_tc; i++) {
983 u8 pri = i < kinfo->tc_info.num_tc ? i : 0;
984 bool link_vld = i < kinfo->tc_info.num_tc;
986 ret = hclge_tm_qs_to_pri_map_cfg(hdev,
987 vport[k].qs_offset + i,
997 static int hclge_tm_pri_q_qs_cfg_vnet_base(struct hclge_dev *hdev)
999 struct hclge_vport *vport = hdev->vport;
1003 /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */
1004 for (k = 0; k < hdev->num_alloc_vport; k++)
1005 for (i = 0; i < HNAE3_MAX_TC; i++) {
1006 ret = hclge_tm_qs_to_pri_map_cfg(hdev,
1007 vport[k].qs_offset + i,
1016 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
1018 struct hclge_vport *vport = hdev->vport;
1022 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE)
1023 ret = hclge_tm_pri_q_qs_cfg_tc_base(hdev);
1024 else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
1025 ret = hclge_tm_pri_q_qs_cfg_vnet_base(hdev);
1032 /* Cfg q -> qs mapping */
1033 for (i = 0; i < hdev->num_alloc_vport; i++) {
1034 ret = hclge_vport_q_to_qs_map(hdev, vport);
1044 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
1046 u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
1047 struct hclge_shaper_ir_para ir_para;
1048 u32 shaper_para_c, shaper_para_p;
1052 for (i = 0; i < hdev->tc_max; i++) {
1053 u32 rate = hdev->tm_info.tc_info[i].bw_limit;
1056 ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI,
1057 &ir_para, max_tm_rate);
1061 shaper_para_c = hclge_tm_get_shapping_para(0, 0, 0,
1062 HCLGE_SHAPER_BS_U_DEF,
1063 HCLGE_SHAPER_BS_S_DEF);
1064 shaper_para_p = hclge_tm_get_shapping_para(ir_para.ir_b,
1067 HCLGE_SHAPER_BS_U_DEF,
1068 HCLGE_SHAPER_BS_S_DEF);
1074 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i,
1075 shaper_para_c, rate);
1079 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
1080 shaper_para_p, rate);
1088 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
1090 struct hclge_dev *hdev = vport->back;
1091 struct hclge_shaper_ir_para ir_para;
1095 ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
1097 hdev->ae_dev->dev_specs.max_tm_rate);
1101 shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
1102 HCLGE_SHAPER_BS_U_DEF,
1103 HCLGE_SHAPER_BS_S_DEF);
1104 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
1105 vport->vport_id, shaper_para,
1110 shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
1112 HCLGE_SHAPER_BS_U_DEF,
1113 HCLGE_SHAPER_BS_S_DEF);
1114 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
1115 vport->vport_id, shaper_para,
1123 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
1125 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1126 struct hclge_dev *hdev = vport->back;
1127 u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
1128 struct hclge_shaper_ir_para ir_para;
1132 for (i = 0; i < kinfo->tc_info.num_tc; i++) {
1133 ret = hclge_shaper_para_calc(hdev->tm_info.tc_info[i].bw_limit,
1134 HCLGE_SHAPER_LVL_QSET,
1135 &ir_para, max_tm_rate);
1143 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev)
1145 struct hclge_vport *vport = hdev->vport;
1149 /* Need config vport shaper */
1150 for (i = 0; i < hdev->num_alloc_vport; i++) {
1151 ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport);
1155 ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport);
1165 static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev)
1169 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1170 ret = hclge_tm_pri_tc_base_shaper_cfg(hdev);
1174 ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev);
1182 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
1184 struct hclge_vport *vport = hdev->vport;
1185 struct hclge_pg_info *pg_info;
1190 for (i = 0; i < hdev->tc_max; i++) {
1192 &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
1193 dwrr = pg_info->tc_dwrr[i];
1195 ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr);
1199 for (k = 0; k < hdev->num_alloc_vport; k++) {
1200 struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo;
1202 if (i >= kinfo->tc_info.max_tc)
1205 dwrr = i < kinfo->tc_info.num_tc ? vport[k].dwrr : 0;
1206 ret = hclge_tm_qs_weight_cfg(
1207 hdev, vport[k].qs_offset + i,
1217 static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev)
1219 #define DEFAULT_TC_OFFSET 14
1221 struct hclge_ets_tc_weight_cmd *ets_weight;
1222 struct hclge_desc desc;
1225 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false);
1226 ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
1228 for (i = 0; i < HNAE3_MAX_TC; i++) {
1229 struct hclge_pg_info *pg_info;
1231 pg_info = &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
1232 ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
1235 ets_weight->weight_offset = DEFAULT_TC_OFFSET;
1237 return hclge_cmd_send(&hdev->hw, &desc, 1);
1240 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
1242 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1243 struct hclge_dev *hdev = vport->back;
1248 ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr);
1253 for (i = 0; i < kinfo->tc_info.num_tc; i++) {
1254 ret = hclge_tm_qs_weight_cfg(
1255 hdev, vport->qs_offset + i,
1256 hdev->tm_info.pg_info[0].tc_dwrr[i]);
1264 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev)
1266 struct hclge_vport *vport = hdev->vport;
1270 for (i = 0; i < hdev->num_alloc_vport; i++) {
1271 ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport);
1281 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
1285 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1286 ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev);
1290 if (!hnae3_dev_dcb_supported(hdev))
1293 ret = hclge_tm_ets_tc_dwrr_cfg(hdev);
1294 if (ret == -EOPNOTSUPP) {
1295 dev_warn(&hdev->pdev->dev,
1296 "fw %08x doesn't support ets tc weight cmd\n",
1303 ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev);
1311 static int hclge_tm_map_cfg(struct hclge_dev *hdev)
1315 ret = hclge_up_to_tc_map(hdev);
1319 if (hdev->vport[0].nic.kinfo.tc_map_mode == HNAE3_TC_MAP_MODE_DSCP) {
1320 ret = hclge_dscp_to_tc_map(hdev);
1325 ret = hclge_tm_pg_to_pri_map(hdev);
1329 return hclge_tm_pri_q_qs_cfg(hdev);
1332 static int hclge_tm_shaper_cfg(struct hclge_dev *hdev)
1336 ret = hclge_tm_port_shaper_cfg(hdev);
1340 ret = hclge_tm_pg_shaper_cfg(hdev);
1344 return hclge_tm_pri_shaper_cfg(hdev);
1347 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev)
1351 ret = hclge_tm_pg_dwrr_cfg(hdev);
1355 return hclge_tm_pri_dwrr_cfg(hdev);
1358 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
1363 /* Only being config on TC-Based scheduler mode */
1364 if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
1367 for (i = 0; i < hdev->tm_info.num_pg; i++) {
1368 ret = hclge_tm_pg_schd_mode_cfg(hdev, i);
1376 static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id)
1378 struct hclge_vport *vport = hdev->vport;
1383 ret = hclge_tm_pri_schd_mode_cfg(hdev, pri_id);
1387 for (i = 0; i < hdev->num_alloc_vport; i++) {
1388 struct hnae3_knic_private_info *kinfo = &vport[i].nic.kinfo;
1390 if (pri_id >= kinfo->tc_info.max_tc)
1393 mode = pri_id < kinfo->tc_info.num_tc ? HCLGE_SCH_MODE_DWRR :
1395 ret = hclge_tm_qs_schd_mode_cfg(hdev,
1396 vport[i].qs_offset + pri_id,
1405 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
1407 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1408 struct hclge_dev *hdev = vport->back;
1412 if (vport->vport_id >= HNAE3_MAX_TC)
1415 ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
1419 for (i = 0; i < kinfo->tc_info.num_tc; i++) {
1420 u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode;
1422 ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i,
1431 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
1433 struct hclge_vport *vport = hdev->vport;
1437 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1438 for (i = 0; i < hdev->tc_max; i++) {
1439 ret = hclge_tm_schd_mode_tc_base_cfg(hdev, i);
1444 for (i = 0; i < hdev->num_alloc_vport; i++) {
1445 ret = hclge_tm_schd_mode_vnet_base_cfg(vport);
1456 static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
1460 ret = hclge_tm_lvl2_schd_mode_cfg(hdev);
1464 return hclge_tm_lvl34_schd_mode_cfg(hdev);
1467 int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
1471 /* Cfg tm mapping */
1472 ret = hclge_tm_map_cfg(hdev);
1477 ret = hclge_tm_shaper_cfg(hdev);
1482 ret = hclge_tm_dwrr_cfg(hdev);
1486 /* Cfg schd mode for each level schd */
1487 return hclge_tm_schd_mode_hw(hdev);
1490 static int hclge_pause_param_setup_hw(struct hclge_dev *hdev)
1492 struct hclge_mac *mac = &hdev->hw.mac;
1494 return hclge_pause_param_cfg(hdev, mac->mac_addr,
1495 HCLGE_DEFAULT_PAUSE_TRANS_GAP,
1496 HCLGE_DEFAULT_PAUSE_TRANS_TIME);
1499 static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
1501 u8 enable_bitmap = 0;
1503 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
1504 enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK |
1505 HCLGE_RX_MAC_PAUSE_EN_MSK;
1507 return hclge_pfc_pause_en_cfg(hdev, enable_bitmap,
1508 hdev->tm_info.pfc_en);
1511 /* for the queues that use for backpress, divides to several groups,
1512 * each group contains 32 queue sets, which can be represented by u32 bitmap.
1514 static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
1516 u16 grp_id_shift = HCLGE_BP_GRP_ID_S;
1517 u16 grp_id_mask = HCLGE_BP_GRP_ID_M;
1518 u8 grp_num = HCLGE_BP_GRP_NUM;
1521 if (hdev->num_tqps > HCLGE_TQP_MAX_SIZE_DEV_V2) {
1522 grp_num = HCLGE_BP_EXT_GRP_NUM;
1523 grp_id_mask = HCLGE_BP_EXT_GRP_ID_M;
1524 grp_id_shift = HCLGE_BP_EXT_GRP_ID_S;
1527 for (i = 0; i < grp_num; i++) {
1531 for (k = 0; k < hdev->num_alloc_vport; k++) {
1532 struct hclge_vport *vport = &hdev->vport[k];
1533 u16 qs_id = vport->qs_offset + tc;
1536 grp = hnae3_get_field(qs_id, grp_id_mask, grp_id_shift);
1537 sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
1538 HCLGE_BP_SUB_GRP_ID_S);
1540 qs_bitmap |= (1 << sub_grp);
1543 ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);
1551 static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
1555 switch (hdev->tm_info.fc_mode) {
1560 case HCLGE_FC_RX_PAUSE:
1564 case HCLGE_FC_TX_PAUSE:
1581 return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
1584 static int hclge_tm_bp_setup(struct hclge_dev *hdev)
1589 for (i = 0; i < hdev->tm_info.num_tc; i++) {
1590 ret = hclge_bp_setup_hw(hdev, i);
1598 int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
1602 ret = hclge_pause_param_setup_hw(hdev);
1606 ret = hclge_mac_pause_setup_hw(hdev);
1610 /* Only DCB-supported dev supports qset back pressure and pfc cmd */
1611 if (!hnae3_dev_dcb_supported(hdev))
1614 /* GE MAC does not support PFC, when driver is initializing and MAC
1615 * is in GE Mode, ignore the error here, otherwise initialization
1618 ret = hclge_pfc_setup_hw(hdev);
1619 if (init && ret == -EOPNOTSUPP)
1620 dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n");
1622 dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n",
1627 return hclge_tm_bp_setup(hdev);
1630 void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
1632 struct hclge_vport *vport = hdev->vport;
1633 struct hnae3_knic_private_info *kinfo;
1636 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
1637 hdev->tm_info.prio_tc[i] = prio_tc[i];
1639 for (k = 0; k < hdev->num_alloc_vport; k++) {
1640 kinfo = &vport[k].nic.kinfo;
1641 kinfo->tc_info.prio_tc[i] = prio_tc[i];
1646 void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
1651 hdev->tm_info.num_tc = num_tc;
1653 for (i = 0; i < hdev->tm_info.num_tc; i++)
1658 hdev->tm_info.num_tc = 1;
1661 hdev->hw_tc_map = bit_map;
1663 hclge_tm_schd_info_init(hdev);
1666 int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
1670 if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
1671 (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE))
1674 ret = hclge_tm_schd_setup_hw(hdev);
1678 ret = hclge_pause_setup_hw(hdev, init);
1685 int hclge_tm_schd_init(struct hclge_dev *hdev)
1687 /* fc_mode is HCLGE_FC_FULL on reset */
1688 hdev->tm_info.fc_mode = HCLGE_FC_FULL;
1689 hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
1691 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE &&
1692 hdev->tm_info.num_pg != 1)
1695 hclge_tm_schd_info_init(hdev);
1696 hclge_dscp_to_prio_map_init(hdev);
1698 return hclge_tm_init_hw(hdev, true);
1701 int hclge_tm_vport_map_update(struct hclge_dev *hdev)
1703 struct hclge_vport *vport = hdev->vport;
1706 hclge_tm_vport_tc_info_update(vport);
1708 ret = hclge_vport_q_to_qs_map(hdev, vport);
1712 if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en)
1715 return hclge_tm_bp_setup(hdev);
1718 int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num)
1720 struct hclge_tm_nodes_cmd *nodes;
1721 struct hclge_desc desc;
1724 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) {
1725 /* Each PF has 8 qsets and each VF has 1 qset */
1726 *qset_num = HCLGE_TM_PF_MAX_QSET_NUM + pci_num_vf(hdev->pdev);
1730 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
1731 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1733 dev_err(&hdev->pdev->dev,
1734 "failed to get qset num, ret = %d\n", ret);
1738 nodes = (struct hclge_tm_nodes_cmd *)desc.data;
1739 *qset_num = le16_to_cpu(nodes->qset_num);
1743 int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num)
1745 struct hclge_tm_nodes_cmd *nodes;
1746 struct hclge_desc desc;
1749 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) {
1750 *pri_num = HCLGE_TM_PF_MAX_PRI_NUM;
1754 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
1755 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1757 dev_err(&hdev->pdev->dev,
1758 "failed to get pri num, ret = %d\n", ret);
1762 nodes = (struct hclge_tm_nodes_cmd *)desc.data;
1763 *pri_num = nodes->pri_num;
1767 int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority,
1770 struct hclge_qs_to_pri_link_cmd *map;
1771 struct hclge_desc desc;
1774 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, true);
1775 map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
1776 map->qs_id = cpu_to_le16(qset_id);
1777 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1779 dev_err(&hdev->pdev->dev,
1780 "failed to get qset map priority, ret = %d\n", ret);
1784 *priority = map->priority;
1785 *link_vld = map->link_vld;
1789 int hclge_tm_get_qset_sch_mode(struct hclge_dev *hdev, u16 qset_id, u8 *mode)
1791 struct hclge_qs_sch_mode_cfg_cmd *qs_sch_mode;
1792 struct hclge_desc desc;
1795 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, true);
1796 qs_sch_mode = (struct hclge_qs_sch_mode_cfg_cmd *)desc.data;
1797 qs_sch_mode->qs_id = cpu_to_le16(qset_id);
1798 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1800 dev_err(&hdev->pdev->dev,
1801 "failed to get qset sch mode, ret = %d\n", ret);
1805 *mode = qs_sch_mode->sch_mode;
1809 int hclge_tm_get_qset_weight(struct hclge_dev *hdev, u16 qset_id, u8 *weight)
1811 struct hclge_qs_weight_cmd *qs_weight;
1812 struct hclge_desc desc;
1815 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, true);
1816 qs_weight = (struct hclge_qs_weight_cmd *)desc.data;
1817 qs_weight->qs_id = cpu_to_le16(qset_id);
1818 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1820 dev_err(&hdev->pdev->dev,
1821 "failed to get qset weight, ret = %d\n", ret);
1825 *weight = qs_weight->dwrr;
1829 int hclge_tm_get_qset_shaper(struct hclge_dev *hdev, u16 qset_id,
1830 struct hclge_tm_shaper_para *para)
1832 struct hclge_qs_shapping_cmd *shap_cfg_cmd;
1833 struct hclge_desc desc;
1837 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true);
1838 shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
1839 shap_cfg_cmd->qs_id = cpu_to_le16(qset_id);
1840 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1842 dev_err(&hdev->pdev->dev,
1843 "failed to get qset %u shaper, ret = %d\n", qset_id,
1848 shapping_para = le32_to_cpu(shap_cfg_cmd->qs_shapping_para);
1849 para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
1850 para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
1851 para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
1852 para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
1853 para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
1854 para->flag = shap_cfg_cmd->flag;
1855 para->rate = le32_to_cpu(shap_cfg_cmd->qs_rate);
1859 int hclge_tm_get_pri_sch_mode(struct hclge_dev *hdev, u8 pri_id, u8 *mode)
1861 struct hclge_pri_sch_mode_cfg_cmd *pri_sch_mode;
1862 struct hclge_desc desc;
1865 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, true);
1866 pri_sch_mode = (struct hclge_pri_sch_mode_cfg_cmd *)desc.data;
1867 pri_sch_mode->pri_id = pri_id;
1868 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1870 dev_err(&hdev->pdev->dev,
1871 "failed to get priority sch mode, ret = %d\n", ret);
1875 *mode = pri_sch_mode->sch_mode;
1879 int hclge_tm_get_pri_weight(struct hclge_dev *hdev, u8 pri_id, u8 *weight)
1881 struct hclge_priority_weight_cmd *priority_weight;
1882 struct hclge_desc desc;
1885 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, true);
1886 priority_weight = (struct hclge_priority_weight_cmd *)desc.data;
1887 priority_weight->pri_id = pri_id;
1888 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1890 dev_err(&hdev->pdev->dev,
1891 "failed to get priority weight, ret = %d\n", ret);
1895 *weight = priority_weight->dwrr;
1899 int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id,
1900 enum hclge_opcode_type cmd,
1901 struct hclge_tm_shaper_para *para)
1903 struct hclge_pri_shapping_cmd *shap_cfg_cmd;
1904 struct hclge_desc desc;
1908 if (cmd != HCLGE_OPC_TM_PRI_C_SHAPPING &&
1909 cmd != HCLGE_OPC_TM_PRI_P_SHAPPING)
1912 hclge_cmd_setup_basic_desc(&desc, cmd, true);
1913 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
1914 shap_cfg_cmd->pri_id = pri_id;
1915 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1917 dev_err(&hdev->pdev->dev,
1918 "failed to get priority shaper(%#x), ret = %d\n",
1923 shapping_para = le32_to_cpu(shap_cfg_cmd->pri_shapping_para);
1924 para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
1925 para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
1926 para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
1927 para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
1928 para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
1929 para->flag = shap_cfg_cmd->flag;
1930 para->rate = le32_to_cpu(shap_cfg_cmd->pri_rate);
1934 int hclge_tm_get_q_to_qs_map(struct hclge_dev *hdev, u16 q_id, u16 *qset_id)
1936 struct hclge_nq_to_qs_link_cmd *map;
1937 struct hclge_desc desc;
1942 map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
1943 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, true);
1944 map->nq_id = cpu_to_le16(q_id);
1945 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1947 dev_err(&hdev->pdev->dev,
1948 "failed to get queue to qset map, ret = %d\n", ret);
1951 *qset_id = le16_to_cpu(map->qset_id);
1953 /* convert qset_id to the following format, drop the vld bit
1954 * | qs_id_h | vld | qs_id_l |
1955 * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 |
1958 * qset_id: | 15 | 14 ~ 10 | 9 ~ 0 |
1960 qs_id_l = hnae3_get_field(*qset_id, HCLGE_TM_QS_ID_L_MSK,
1961 HCLGE_TM_QS_ID_L_S);
1962 qs_id_h = hnae3_get_field(*qset_id, HCLGE_TM_QS_ID_H_EXT_MSK,
1963 HCLGE_TM_QS_ID_H_EXT_S);
1965 hnae3_set_field(*qset_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S,
1967 hnae3_set_field(*qset_id, HCLGE_TM_QS_ID_H_MSK, HCLGE_TM_QS_ID_H_S,
1972 int hclge_tm_get_q_to_tc(struct hclge_dev *hdev, u16 q_id, u8 *tc_id)
1974 #define HCLGE_TM_TC_MASK 0x7
1976 struct hclge_tqp_tx_queue_tc_cmd *tc;
1977 struct hclge_desc desc;
1980 tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data;
1981 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TQP_TX_QUEUE_TC, true);
1982 tc->queue_id = cpu_to_le16(q_id);
1983 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1985 dev_err(&hdev->pdev->dev,
1986 "failed to get queue to tc map, ret = %d\n", ret);
1990 *tc_id = tc->tc_id & HCLGE_TM_TC_MASK;
1994 int hclge_tm_get_pg_to_pri_map(struct hclge_dev *hdev, u8 pg_id,
1997 struct hclge_pg_to_pri_link_cmd *map;
1998 struct hclge_desc desc;
2001 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, true);
2002 map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
2004 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2006 dev_err(&hdev->pdev->dev,
2007 "failed to get pg to pri map, ret = %d\n", ret);
2011 *pri_bit_map = map->pri_bit_map;
2015 int hclge_tm_get_pg_weight(struct hclge_dev *hdev, u8 pg_id, u8 *weight)
2017 struct hclge_pg_weight_cmd *pg_weight_cmd;
2018 struct hclge_desc desc;
2021 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, true);
2022 pg_weight_cmd = (struct hclge_pg_weight_cmd *)desc.data;
2023 pg_weight_cmd->pg_id = pg_id;
2024 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2026 dev_err(&hdev->pdev->dev,
2027 "failed to get pg weight, ret = %d\n", ret);
2031 *weight = pg_weight_cmd->dwrr;
2035 int hclge_tm_get_pg_sch_mode(struct hclge_dev *hdev, u8 pg_id, u8 *mode)
2037 struct hclge_desc desc;
2040 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, true);
2041 desc.data[0] = cpu_to_le32(pg_id);
2042 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2044 dev_err(&hdev->pdev->dev,
2045 "failed to get pg sch mode, ret = %d\n", ret);
2049 *mode = (u8)le32_to_cpu(desc.data[1]);
2053 int hclge_tm_get_pg_shaper(struct hclge_dev *hdev, u8 pg_id,
2054 enum hclge_opcode_type cmd,
2055 struct hclge_tm_shaper_para *para)
2057 struct hclge_pg_shapping_cmd *shap_cfg_cmd;
2058 struct hclge_desc desc;
2062 if (cmd != HCLGE_OPC_TM_PG_C_SHAPPING &&
2063 cmd != HCLGE_OPC_TM_PG_P_SHAPPING)
2066 hclge_cmd_setup_basic_desc(&desc, cmd, true);
2067 shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
2068 shap_cfg_cmd->pg_id = pg_id;
2069 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2071 dev_err(&hdev->pdev->dev,
2072 "failed to get pg shaper(%#x), ret = %d\n",
2077 shapping_para = le32_to_cpu(shap_cfg_cmd->pg_shapping_para);
2078 para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
2079 para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
2080 para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
2081 para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
2082 para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
2083 para->flag = shap_cfg_cmd->flag;
2084 para->rate = le32_to_cpu(shap_cfg_cmd->pg_rate);
2088 int hclge_tm_get_port_shaper(struct hclge_dev *hdev,
2089 struct hclge_tm_shaper_para *para)
2091 struct hclge_port_shapping_cmd *port_shap_cfg_cmd;
2092 struct hclge_desc desc;
2096 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, true);
2097 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2099 dev_err(&hdev->pdev->dev,
2100 "failed to get port shaper, ret = %d\n", ret);
2104 port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
2105 shapping_para = le32_to_cpu(port_shap_cfg_cmd->port_shapping_para);
2106 para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
2107 para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
2108 para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
2109 para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
2110 para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
2111 para->flag = port_shap_cfg_cmd->flag;
2112 para->rate = le32_to_cpu(port_shap_cfg_cmd->port_rate);