1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/etherdevice.h>
7 #include "hclge_main.h"
10 enum hclge_shaper_level {
11 HCLGE_SHAPER_LVL_PRI = 0,
12 HCLGE_SHAPER_LVL_PG = 1,
13 HCLGE_SHAPER_LVL_PORT = 2,
14 HCLGE_SHAPER_LVL_QSET = 3,
15 HCLGE_SHAPER_LVL_CNT = 4,
16 HCLGE_SHAPER_LVL_VF = 0,
17 HCLGE_SHAPER_LVL_PF = 1,
20 #define HCLGE_TM_PFC_PKT_GET_CMD_NUM 3
21 #define HCLGE_TM_PFC_NUM_GET_PER_CMD 3
23 #define HCLGE_SHAPER_BS_U_DEF 5
24 #define HCLGE_SHAPER_BS_S_DEF 20
26 #define HCLGE_ETHER_MAX_RATE 100000
28 /* hclge_shaper_para_calc: calculate ir parameter for the shaper
29 * @ir: Rate to be config, its unit is Mbps
30 * @shaper_level: the shaper level. eg: port, pg, priority, queueset
31 * @ir_b: IR_B parameter of IR shaper
32 * @ir_u: IR_U parameter of IR shaper
33 * @ir_s: IR_S parameter of IR shaper
37 * IR_b * (2 ^ IR_u) * 8
38 * IR(Mbps) = ------------------------- * CLOCK(1000Mbps)
41 * @return: 0: calculate sucessful, negative: fail
43 static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
44 u8 *ir_b, u8 *ir_u, u8 *ir_s)
46 #define DIVISOR_CLK (1000 * 8)
47 #define DIVISOR_IR_B_126 (126 * DIVISOR_CLK)
49 const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
50 6 * 256, /* Prioriy level */
51 6 * 32, /* Prioriy group level */
52 6 * 8, /* Port level */
53 6 * 256 /* Qset level */
61 if (shaper_level >= HCLGE_SHAPER_LVL_CNT ||
62 ir > HCLGE_ETHER_MAX_RATE)
65 tick = tick_array[shaper_level];
68 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
69 * the formula is changed to:
71 * ir_calc = ---------------- * 1000
74 ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
82 } else if (ir_calc > ir) {
83 /* Increasing the denominator to select ir_s value */
84 while (ir_calc > ir) {
86 ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
92 *ir_b = (ir * tick * (1 << ir_s_calc) +
93 (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
95 /* Increasing the numerator to select ir_u value */
98 while (ir_calc < ir) {
100 numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc);
101 ir_calc = (numerator + (tick >> 1)) / tick;
107 u32 denominator = (DIVISOR_CLK * (1 << --ir_u_calc));
108 *ir_b = (ir * tick + (denominator >> 1)) / denominator;
118 static int hclge_pfc_stats_get(struct hclge_dev *hdev,
119 enum hclge_opcode_type opcode, u64 *stats)
121 struct hclge_desc desc[HCLGE_TM_PFC_PKT_GET_CMD_NUM];
124 if (!(opcode == HCLGE_OPC_QUERY_PFC_RX_PKT_CNT ||
125 opcode == HCLGE_OPC_QUERY_PFC_TX_PKT_CNT))
128 for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1; i++) {
129 hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
130 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
133 hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
135 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM);
139 for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) {
140 struct hclge_pfc_stats_cmd *pfc_stats =
141 (struct hclge_pfc_stats_cmd *)desc[i].data;
143 for (j = 0; j < HCLGE_TM_PFC_NUM_GET_PER_CMD; j++) {
144 u32 index = i * HCLGE_TM_PFC_PKT_GET_CMD_NUM + j;
146 if (index < HCLGE_MAX_TC_NUM)
148 le64_to_cpu(pfc_stats->pkt_num[j]);
154 int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats)
156 return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_RX_PKT_CNT, stats);
159 int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats)
161 return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_TX_PKT_CNT, stats);
164 int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
166 struct hclge_desc desc;
168 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false);
170 desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) |
171 (rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0));
173 return hclge_cmd_send(&hdev->hw, &desc, 1);
176 static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
179 struct hclge_desc desc;
180 struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data;
182 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false);
184 pfc->tx_rx_en_bitmap = tx_rx_bitmap;
185 pfc->pri_en_bitmap = pfc_bitmap;
187 return hclge_cmd_send(&hdev->hw, &desc, 1);
190 static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
191 u8 pause_trans_gap, u16 pause_trans_time)
193 struct hclge_cfg_pause_param_cmd *pause_param;
194 struct hclge_desc desc;
196 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
198 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
200 ether_addr_copy(pause_param->mac_addr, addr);
201 ether_addr_copy(pause_param->mac_addr_extra, addr);
202 pause_param->pause_trans_gap = pause_trans_gap;
203 pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
205 return hclge_cmd_send(&hdev->hw, &desc, 1);
208 int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
210 struct hclge_cfg_pause_param_cmd *pause_param;
211 struct hclge_desc desc;
216 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
218 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
220 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
224 trans_gap = pause_param->pause_trans_gap;
225 trans_time = le16_to_cpu(pause_param->pause_trans_time);
227 return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time);
230 static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
234 tc = hdev->tm_info.prio_tc[pri_id];
236 if (tc >= hdev->tm_info.num_tc)
240 * the register for priority has four bytes, the first bytes includes
241 * priority0 and priority1, the higher 4bit stands for priority1
242 * while the lower 4bit stands for priority0, as below:
243 * first byte: | pri_1 | pri_0 |
244 * second byte: | pri_3 | pri_2 |
245 * third byte: | pri_5 | pri_4 |
246 * fourth byte: | pri_7 | pri_6 |
248 pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4);
253 static int hclge_up_to_tc_map(struct hclge_dev *hdev)
255 struct hclge_desc desc;
256 u8 *pri = (u8 *)desc.data;
260 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
262 for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
263 ret = hclge_fill_pri_array(hdev, pri, pri_id);
268 return hclge_cmd_send(&hdev->hw, &desc, 1);
271 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
272 u8 pg_id, u8 pri_bit_map)
274 struct hclge_pg_to_pri_link_cmd *map;
275 struct hclge_desc desc;
277 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false);
279 map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
282 map->pri_bit_map = pri_bit_map;
284 return hclge_cmd_send(&hdev->hw, &desc, 1);
287 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
290 struct hclge_qs_to_pri_link_cmd *map;
291 struct hclge_desc desc;
293 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false);
295 map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
297 map->qs_id = cpu_to_le16(qs_id);
299 map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK;
301 return hclge_cmd_send(&hdev->hw, &desc, 1);
304 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
307 struct hclge_nq_to_qs_link_cmd *map;
308 struct hclge_desc desc;
310 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false);
312 map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
314 map->nq_id = cpu_to_le16(q_id);
315 map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK);
317 return hclge_cmd_send(&hdev->hw, &desc, 1);
320 static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id,
323 struct hclge_pg_weight_cmd *weight;
324 struct hclge_desc desc;
326 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false);
328 weight = (struct hclge_pg_weight_cmd *)desc.data;
330 weight->pg_id = pg_id;
333 return hclge_cmd_send(&hdev->hw, &desc, 1);
336 static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id,
339 struct hclge_priority_weight_cmd *weight;
340 struct hclge_desc desc;
342 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false);
344 weight = (struct hclge_priority_weight_cmd *)desc.data;
346 weight->pri_id = pri_id;
349 return hclge_cmd_send(&hdev->hw, &desc, 1);
352 static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
355 struct hclge_qs_weight_cmd *weight;
356 struct hclge_desc desc;
358 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false);
360 weight = (struct hclge_qs_weight_cmd *)desc.data;
362 weight->qs_id = cpu_to_le16(qs_id);
365 return hclge_cmd_send(&hdev->hw, &desc, 1);
368 static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s,
371 u32 shapping_para = 0;
373 hclge_tm_set_field(shapping_para, IR_B, ir_b);
374 hclge_tm_set_field(shapping_para, IR_U, ir_u);
375 hclge_tm_set_field(shapping_para, IR_S, ir_s);
376 hclge_tm_set_field(shapping_para, BS_B, bs_b);
377 hclge_tm_set_field(shapping_para, BS_S, bs_s);
379 return shapping_para;
382 static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
383 enum hclge_shap_bucket bucket, u8 pg_id,
386 struct hclge_pg_shapping_cmd *shap_cfg_cmd;
387 enum hclge_opcode_type opcode;
388 struct hclge_desc desc;
390 opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
391 HCLGE_OPC_TM_PG_C_SHAPPING;
392 hclge_cmd_setup_basic_desc(&desc, opcode, false);
394 shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
396 shap_cfg_cmd->pg_id = pg_id;
398 shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para);
400 return hclge_cmd_send(&hdev->hw, &desc, 1);
403 static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
405 struct hclge_port_shapping_cmd *shap_cfg_cmd;
406 struct hclge_desc desc;
407 u32 shapping_para = 0;
411 ret = hclge_shaper_para_calc(hdev->hw.mac.speed,
412 HCLGE_SHAPER_LVL_PORT,
413 &ir_b, &ir_u, &ir_s);
417 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
418 shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
420 shapping_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
421 HCLGE_SHAPER_BS_U_DEF,
422 HCLGE_SHAPER_BS_S_DEF);
424 shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para);
426 return hclge_cmd_send(&hdev->hw, &desc, 1);
429 static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
430 enum hclge_shap_bucket bucket, u8 pri_id,
433 struct hclge_pri_shapping_cmd *shap_cfg_cmd;
434 enum hclge_opcode_type opcode;
435 struct hclge_desc desc;
437 opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
438 HCLGE_OPC_TM_PRI_C_SHAPPING;
440 hclge_cmd_setup_basic_desc(&desc, opcode, false);
442 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
444 shap_cfg_cmd->pri_id = pri_id;
446 shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para);
448 return hclge_cmd_send(&hdev->hw, &desc, 1);
451 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id)
453 struct hclge_desc desc;
455 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false);
457 if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR)
458 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
462 desc.data[0] = cpu_to_le32(pg_id);
464 return hclge_cmd_send(&hdev->hw, &desc, 1);
467 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id)
469 struct hclge_desc desc;
471 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false);
473 if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
474 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
478 desc.data[0] = cpu_to_le32(pri_id);
480 return hclge_cmd_send(&hdev->hw, &desc, 1);
483 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode)
485 struct hclge_desc desc;
487 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false);
489 if (mode == HCLGE_SCH_MODE_DWRR)
490 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
494 desc.data[0] = cpu_to_le32(qs_id);
496 return hclge_cmd_send(&hdev->hw, &desc, 1);
499 static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id,
502 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
503 struct hclge_desc desc;
505 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
508 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
510 bp_to_qs_map_cmd->tc_id = tc;
511 bp_to_qs_map_cmd->qs_group_id = grp_id;
512 bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map);
514 return hclge_cmd_send(&hdev->hw, &desc, 1);
517 static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
519 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
520 struct hclge_dev *hdev = vport->back;
524 /* TC configuration is shared by PF/VF in one port, only allow
525 * one tc for VF for simplicity. VF's vport_id is non zero.
527 kinfo->num_tc = vport->vport_id ? 1 :
528 min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
529 vport->qs_offset = (vport->vport_id ? hdev->tm_info.num_tc : 0) +
530 (vport->vport_id ? (vport->vport_id - 1) : 0);
532 max_rss_size = min_t(u16, hdev->rss_size_max,
533 vport->alloc_tqps / kinfo->num_tc);
535 /* Set to user value, no larger than max_rss_size. */
536 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
537 kinfo->req_rss_size <= max_rss_size) {
538 dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
539 kinfo->rss_size, kinfo->req_rss_size);
540 kinfo->rss_size = kinfo->req_rss_size;
541 } else if (kinfo->rss_size > max_rss_size ||
542 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
543 /* Set to the maximum specification value (max_rss_size). */
544 dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
545 kinfo->rss_size, max_rss_size);
546 kinfo->rss_size = max_rss_size;
549 kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size;
550 vport->dwrr = 100; /* 100 percent as init */
551 vport->alloc_rss_size = kinfo->rss_size;
552 vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
554 for (i = 0; i < HNAE3_MAX_TC; i++) {
555 if (hdev->hw_tc_map & BIT(i) && i < kinfo->num_tc) {
556 kinfo->tc_info[i].enable = true;
557 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
558 kinfo->tc_info[i].tqp_count = kinfo->rss_size;
559 kinfo->tc_info[i].tc = i;
561 /* Set to default queue if TC is disable */
562 kinfo->tc_info[i].enable = false;
563 kinfo->tc_info[i].tqp_offset = 0;
564 kinfo->tc_info[i].tqp_count = 1;
565 kinfo->tc_info[i].tc = 0;
569 memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc,
570 FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc));
573 static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
575 struct hclge_vport *vport = hdev->vport;
578 for (i = 0; i < hdev->num_alloc_vport; i++) {
579 hclge_tm_vport_tc_info_update(vport);
585 static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
589 for (i = 0; i < hdev->tm_info.num_tc; i++) {
590 hdev->tm_info.tc_info[i].tc_id = i;
591 hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
592 hdev->tm_info.tc_info[i].pgid = 0;
593 hdev->tm_info.tc_info[i].bw_limit =
594 hdev->tm_info.pg_info[0].bw_limit;
597 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
598 hdev->tm_info.prio_tc[i] =
599 (i >= hdev->tm_info.num_tc) ? 0 : i;
601 /* DCB is enabled if we have more than 1 TC or pfc_en is
604 if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
605 hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
607 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
610 static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
612 #define BW_PERCENT 100
616 for (i = 0; i < hdev->tm_info.num_pg; i++) {
619 hdev->tm_info.pg_dwrr[i] = i ? 0 : BW_PERCENT;
621 hdev->tm_info.pg_info[i].pg_id = i;
622 hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
624 hdev->tm_info.pg_info[i].bw_limit = HCLGE_ETHER_MAX_RATE;
629 hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
630 for (k = 0; k < hdev->tm_info.num_tc; k++)
631 hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
635 static void hclge_pfc_info_init(struct hclge_dev *hdev)
637 if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) {
638 if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
639 dev_warn(&hdev->pdev->dev,
640 "DCB is disable, but last mode is FC_PFC\n");
642 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
643 } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
644 /* fc_mode_last_time record the last fc_mode when
645 * DCB is enabled, so that fc_mode can be set to
646 * the correct value when DCB is disabled.
648 hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
649 hdev->tm_info.fc_mode = HCLGE_FC_PFC;
653 static void hclge_tm_schd_info_init(struct hclge_dev *hdev)
655 hclge_tm_pg_info_init(hdev);
657 hclge_tm_tc_info_init(hdev);
659 hclge_tm_vport_info_update(hdev);
661 hclge_pfc_info_init(hdev);
664 static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
669 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
672 for (i = 0; i < hdev->tm_info.num_pg; i++) {
674 ret = hclge_tm_pg_to_pri_map_cfg(
675 hdev, i, hdev->tm_info.pg_info[i].tc_bit_map);
683 static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
691 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
695 for (i = 0; i < hdev->tm_info.num_pg; i++) {
696 /* Calc shaper para */
697 ret = hclge_shaper_para_calc(
698 hdev->tm_info.pg_info[i].bw_limit,
700 &ir_b, &ir_u, &ir_s);
704 shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
705 HCLGE_SHAPER_BS_U_DEF,
706 HCLGE_SHAPER_BS_S_DEF);
707 ret = hclge_tm_pg_shapping_cfg(hdev,
708 HCLGE_TM_SHAP_C_BUCKET, i,
713 shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
714 HCLGE_SHAPER_BS_U_DEF,
715 HCLGE_SHAPER_BS_S_DEF);
716 ret = hclge_tm_pg_shapping_cfg(hdev,
717 HCLGE_TM_SHAP_P_BUCKET, i,
726 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
732 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
736 for (i = 0; i < hdev->tm_info.num_pg; i++) {
738 ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]);
746 static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
747 struct hclge_vport *vport)
749 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
750 struct hnae3_queue **tqp = kinfo->tqp;
751 struct hnae3_tc_info *v_tc_info;
755 for (i = 0; i < kinfo->num_tc; i++) {
756 v_tc_info = &kinfo->tc_info[i];
757 for (j = 0; j < v_tc_info->tqp_count; j++) {
758 struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j];
760 ret = hclge_tm_q_to_qs_map_cfg(hdev,
761 hclge_get_queue_id(q),
762 vport->qs_offset + i);
771 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
773 struct hclge_vport *vport = hdev->vport;
777 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
778 /* Cfg qs -> pri mapping, one by one mapping */
779 for (k = 0; k < hdev->num_alloc_vport; k++) {
780 struct hnae3_knic_private_info *kinfo =
783 for (i = 0; i < kinfo->num_tc; i++) {
784 ret = hclge_tm_qs_to_pri_map_cfg(
785 hdev, vport[k].qs_offset + i, i);
790 } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) {
791 /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */
792 for (k = 0; k < hdev->num_alloc_vport; k++)
793 for (i = 0; i < HNAE3_MAX_TC; i++) {
794 ret = hclge_tm_qs_to_pri_map_cfg(
795 hdev, vport[k].qs_offset + i, k);
803 /* Cfg q -> qs mapping */
804 for (i = 0; i < hdev->num_alloc_vport; i++) {
805 ret = hclge_vport_q_to_qs_map(hdev, vport);
815 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
822 for (i = 0; i < hdev->tm_info.num_tc; i++) {
823 ret = hclge_shaper_para_calc(
824 hdev->tm_info.tc_info[i].bw_limit,
825 HCLGE_SHAPER_LVL_PRI,
826 &ir_b, &ir_u, &ir_s);
830 shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
831 HCLGE_SHAPER_BS_U_DEF,
832 HCLGE_SHAPER_BS_S_DEF);
833 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i,
838 shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
839 HCLGE_SHAPER_BS_U_DEF,
840 HCLGE_SHAPER_BS_S_DEF);
841 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
850 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
852 struct hclge_dev *hdev = vport->back;
857 ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
858 &ir_b, &ir_u, &ir_s);
862 shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
863 HCLGE_SHAPER_BS_U_DEF,
864 HCLGE_SHAPER_BS_S_DEF);
865 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
866 vport->vport_id, shaper_para);
870 shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
871 HCLGE_SHAPER_BS_U_DEF,
872 HCLGE_SHAPER_BS_S_DEF);
873 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
874 vport->vport_id, shaper_para);
881 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
883 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
884 struct hclge_dev *hdev = vport->back;
889 for (i = 0; i < kinfo->num_tc; i++) {
890 ret = hclge_shaper_para_calc(
891 hdev->tm_info.tc_info[i].bw_limit,
892 HCLGE_SHAPER_LVL_QSET,
893 &ir_b, &ir_u, &ir_s);
901 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev)
903 struct hclge_vport *vport = hdev->vport;
907 /* Need config vport shaper */
908 for (i = 0; i < hdev->num_alloc_vport; i++) {
909 ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport);
913 ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport);
923 static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev)
927 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
928 ret = hclge_tm_pri_tc_base_shaper_cfg(hdev);
932 ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev);
940 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
942 struct hclge_vport *vport = hdev->vport;
943 struct hclge_pg_info *pg_info;
948 for (i = 0; i < hdev->tm_info.num_tc; i++) {
950 &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
951 dwrr = pg_info->tc_dwrr[i];
953 ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr);
957 for (k = 0; k < hdev->num_alloc_vport; k++) {
958 ret = hclge_tm_qs_weight_cfg(
959 hdev, vport[k].qs_offset + i,
969 static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev)
971 #define DEFAULT_TC_WEIGHT 1
972 #define DEFAULT_TC_OFFSET 14
974 struct hclge_ets_tc_weight_cmd *ets_weight;
975 struct hclge_desc desc;
978 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false);
979 ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
981 for (i = 0; i < HNAE3_MAX_TC; i++) {
982 struct hclge_pg_info *pg_info;
984 ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT;
986 if (!(hdev->hw_tc_map & BIT(i)))
990 &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
991 ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
994 ets_weight->weight_offset = DEFAULT_TC_OFFSET;
996 return hclge_cmd_send(&hdev->hw, &desc, 1);
999 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
1001 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1002 struct hclge_dev *hdev = vport->back;
1007 ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr);
1012 for (i = 0; i < kinfo->num_tc; i++) {
1013 ret = hclge_tm_qs_weight_cfg(
1014 hdev, vport->qs_offset + i,
1015 hdev->tm_info.pg_info[0].tc_dwrr[i]);
1023 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev)
1025 struct hclge_vport *vport = hdev->vport;
1029 for (i = 0; i < hdev->num_alloc_vport; i++) {
1030 ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport);
1040 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
1044 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1045 ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev);
1049 if (!hnae3_dev_dcb_supported(hdev))
1052 ret = hclge_tm_ets_tc_dwrr_cfg(hdev);
1053 if (ret == -EOPNOTSUPP) {
1054 dev_warn(&hdev->pdev->dev,
1055 "fw %08x does't support ets tc weight cmd\n",
1062 ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev);
1070 static int hclge_tm_map_cfg(struct hclge_dev *hdev)
1074 ret = hclge_up_to_tc_map(hdev);
1078 ret = hclge_tm_pg_to_pri_map(hdev);
1082 return hclge_tm_pri_q_qs_cfg(hdev);
1085 static int hclge_tm_shaper_cfg(struct hclge_dev *hdev)
1089 ret = hclge_tm_port_shaper_cfg(hdev);
1093 ret = hclge_tm_pg_shaper_cfg(hdev);
1097 return hclge_tm_pri_shaper_cfg(hdev);
1100 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev)
1104 ret = hclge_tm_pg_dwrr_cfg(hdev);
1108 return hclge_tm_pri_dwrr_cfg(hdev);
1111 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
1116 /* Only being config on TC-Based scheduler mode */
1117 if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
1120 for (i = 0; i < hdev->tm_info.num_pg; i++) {
1121 ret = hclge_tm_pg_schd_mode_cfg(hdev, i);
1129 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
1131 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1132 struct hclge_dev *hdev = vport->back;
1136 if (vport->vport_id >= HNAE3_MAX_TC)
1139 ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
1143 for (i = 0; i < kinfo->num_tc; i++) {
1144 u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode;
1146 ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i,
1155 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
1157 struct hclge_vport *vport = hdev->vport;
1161 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1162 for (i = 0; i < hdev->tm_info.num_tc; i++) {
1163 ret = hclge_tm_pri_schd_mode_cfg(hdev, i);
1167 for (k = 0; k < hdev->num_alloc_vport; k++) {
1168 ret = hclge_tm_qs_schd_mode_cfg(
1169 hdev, vport[k].qs_offset + i,
1170 HCLGE_SCH_MODE_DWRR);
1176 for (i = 0; i < hdev->num_alloc_vport; i++) {
1177 ret = hclge_tm_schd_mode_vnet_base_cfg(vport);
1188 static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
1192 ret = hclge_tm_lvl2_schd_mode_cfg(hdev);
1196 return hclge_tm_lvl34_schd_mode_cfg(hdev);
1199 int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
1203 /* Cfg tm mapping */
1204 ret = hclge_tm_map_cfg(hdev);
1209 ret = hclge_tm_shaper_cfg(hdev);
1214 ret = hclge_tm_dwrr_cfg(hdev);
1218 /* Cfg schd mode for each level schd */
1219 return hclge_tm_schd_mode_hw(hdev);
1222 static int hclge_pause_param_setup_hw(struct hclge_dev *hdev)
1224 struct hclge_mac *mac = &hdev->hw.mac;
1226 return hclge_pause_param_cfg(hdev, mac->mac_addr,
1227 HCLGE_DEFAULT_PAUSE_TRANS_GAP,
1228 HCLGE_DEFAULT_PAUSE_TRANS_TIME);
1231 static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
1233 u8 enable_bitmap = 0;
1235 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
1236 enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK |
1237 HCLGE_RX_MAC_PAUSE_EN_MSK;
1239 return hclge_pfc_pause_en_cfg(hdev, enable_bitmap,
1240 hdev->tm_info.pfc_en);
1243 /* Each Tc has a 1024 queue sets to backpress, it divides to
1244 * 32 group, each group contains 32 queue sets, which can be
1245 * represented by u32 bitmap.
1247 static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
1251 for (i = 0; i < HCLGE_BP_GRP_NUM; i++) {
1255 for (k = 0; k < hdev->num_alloc_vport; k++) {
1256 struct hclge_vport *vport = &hdev->vport[k];
1257 u16 qs_id = vport->qs_offset + tc;
1260 grp = hnae3_get_field(qs_id, HCLGE_BP_GRP_ID_M,
1262 sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
1263 HCLGE_BP_SUB_GRP_ID_S);
1265 qs_bitmap |= (1 << sub_grp);
1268 ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);
1276 static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
1280 switch (hdev->tm_info.fc_mode) {
1285 case HCLGE_FC_RX_PAUSE:
1289 case HCLGE_FC_TX_PAUSE:
1306 return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
1309 static int hclge_tm_bp_setup(struct hclge_dev *hdev)
1314 for (i = 0; i < hdev->tm_info.num_tc; i++) {
1315 ret = hclge_bp_setup_hw(hdev, i);
1323 int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
1327 ret = hclge_pause_param_setup_hw(hdev);
1331 ret = hclge_mac_pause_setup_hw(hdev);
1335 /* Only DCB-supported dev supports qset back pressure and pfc cmd */
1336 if (!hnae3_dev_dcb_supported(hdev))
1339 /* GE MAC does not support PFC, when driver is initializing and MAC
1340 * is in GE Mode, ignore the error here, otherwise initialization
1343 ret = hclge_pfc_setup_hw(hdev);
1344 if (init && ret == -EOPNOTSUPP)
1345 dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n");
1347 dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n",
1352 return hclge_tm_bp_setup(hdev);
1355 void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
1357 struct hclge_vport *vport = hdev->vport;
1358 struct hnae3_knic_private_info *kinfo;
1361 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
1362 hdev->tm_info.prio_tc[i] = prio_tc[i];
1364 for (k = 0; k < hdev->num_alloc_vport; k++) {
1365 kinfo = &vport[k].nic.kinfo;
1366 kinfo->prio_tc[i] = prio_tc[i];
1371 void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
1376 hdev->tm_info.num_tc = num_tc;
1378 for (i = 0; i < hdev->tm_info.num_tc; i++)
1383 hdev->tm_info.num_tc = 1;
1386 hdev->hw_tc_map = bit_map;
1388 hclge_tm_schd_info_init(hdev);
1391 void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
1393 /* DCB is enabled if we have more than 1 TC or pfc_en is
1396 if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
1397 hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
1399 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
1401 hclge_pfc_info_init(hdev);
1404 int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
1408 if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
1409 (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE))
1412 ret = hclge_tm_schd_setup_hw(hdev);
1416 ret = hclge_pause_setup_hw(hdev, init);
1423 int hclge_tm_schd_init(struct hclge_dev *hdev)
1425 /* fc_mode is HCLGE_FC_FULL on reset */
1426 hdev->tm_info.fc_mode = HCLGE_FC_FULL;
1427 hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
1429 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE &&
1430 hdev->tm_info.num_pg != 1)
1433 hclge_tm_schd_info_init(hdev);
1435 return hclge_tm_init_hw(hdev, true);
1438 int hclge_tm_vport_map_update(struct hclge_dev *hdev)
1440 struct hclge_vport *vport = hdev->vport;
1443 hclge_tm_vport_tc_info_update(vport);
1445 ret = hclge_vport_q_to_qs_map(hdev, vport);
1449 if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE))
1452 return hclge_tm_bp_setup(hdev);