1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/etherdevice.h>
7 #include "hclge_main.h"
10 enum hclge_shaper_level {
11 HCLGE_SHAPER_LVL_PRI = 0,
12 HCLGE_SHAPER_LVL_PG = 1,
13 HCLGE_SHAPER_LVL_PORT = 2,
14 HCLGE_SHAPER_LVL_QSET = 3,
15 HCLGE_SHAPER_LVL_CNT = 4,
16 HCLGE_SHAPER_LVL_VF = 0,
17 HCLGE_SHAPER_LVL_PF = 1,
20 #define HCLGE_TM_PFC_PKT_GET_CMD_NUM 3
21 #define HCLGE_TM_PFC_NUM_GET_PER_CMD 3
23 #define HCLGE_SHAPER_BS_U_DEF 5
24 #define HCLGE_SHAPER_BS_S_DEF 20
26 #define HCLGE_ETHER_MAX_RATE 100000
28 /* hclge_shaper_para_calc: calculate ir parameter for the shaper
29 * @ir: Rate to be config, its unit is Mbps
30 * @shaper_level: the shaper level. eg: port, pg, priority, queueset
31 * @ir_b: IR_B parameter of IR shaper
32 * @ir_u: IR_U parameter of IR shaper
33 * @ir_s: IR_S parameter of IR shaper
37 * IR_b * (2 ^ IR_u) * 8
38 * IR(Mbps) = ------------------------- * CLOCK(1000Mbps)
41 * @return: 0: calculate sucessful, negative: fail
43 static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
44 u8 *ir_b, u8 *ir_u, u8 *ir_s)
46 const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
47 6 * 256, /* Prioriy level */
48 6 * 32, /* Prioriy group level */
49 6 * 8, /* Port level */
50 6 * 256 /* Qset level */
52 u8 ir_u_calc = 0, ir_s_calc = 0;
57 if (shaper_level >= HCLGE_SHAPER_LVL_CNT)
60 tick = tick_array[shaper_level];
63 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
64 * the formula is changed to:
66 * ir_calc = ---------------- * 1000
69 ir_calc = (1008000 + (tick >> 1) - 1) / tick;
77 } else if (ir_calc > ir) {
78 /* Increasing the denominator to select ir_s value */
79 while (ir_calc > ir) {
81 ir_calc = 1008000 / (tick * (1 << ir_s_calc));
87 *ir_b = (ir * tick * (1 << ir_s_calc) + 4000) / 8000;
89 /* Increasing the numerator to select ir_u value */
92 while (ir_calc < ir) {
94 numerator = 1008000 * (1 << ir_u_calc);
95 ir_calc = (numerator + (tick >> 1)) / tick;
101 u32 denominator = (8000 * (1 << --ir_u_calc));
102 *ir_b = (ir * tick + (denominator >> 1)) / denominator;
112 static int hclge_pfc_stats_get(struct hclge_dev *hdev,
113 enum hclge_opcode_type opcode, u64 *stats)
115 struct hclge_desc desc[HCLGE_TM_PFC_PKT_GET_CMD_NUM];
118 if (!(opcode == HCLGE_OPC_QUERY_PFC_RX_PKT_CNT ||
119 opcode == HCLGE_OPC_QUERY_PFC_TX_PKT_CNT))
122 for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) {
123 hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
124 if (i != (HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1))
125 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
127 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
130 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM);
134 for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) {
135 struct hclge_pfc_stats_cmd *pfc_stats =
136 (struct hclge_pfc_stats_cmd *)desc[i].data;
138 for (j = 0; j < HCLGE_TM_PFC_NUM_GET_PER_CMD; j++) {
139 u32 index = i * HCLGE_TM_PFC_PKT_GET_CMD_NUM + j;
141 if (index < HCLGE_MAX_TC_NUM)
143 le64_to_cpu(pfc_stats->pkt_num[j]);
149 int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats)
151 return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_RX_PKT_CNT, stats);
154 int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats)
156 return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_TX_PKT_CNT, stats);
159 int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
161 struct hclge_desc desc;
163 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false);
165 desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) |
166 (rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0));
168 return hclge_cmd_send(&hdev->hw, &desc, 1);
171 static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
174 struct hclge_desc desc;
175 struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data;
177 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false);
179 pfc->tx_rx_en_bitmap = tx_rx_bitmap;
180 pfc->pri_en_bitmap = pfc_bitmap;
182 return hclge_cmd_send(&hdev->hw, &desc, 1);
185 static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
186 u8 pause_trans_gap, u16 pause_trans_time)
188 struct hclge_cfg_pause_param_cmd *pause_param;
189 struct hclge_desc desc;
191 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
193 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
195 ether_addr_copy(pause_param->mac_addr, addr);
196 ether_addr_copy(pause_param->mac_addr_extra, addr);
197 pause_param->pause_trans_gap = pause_trans_gap;
198 pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
200 return hclge_cmd_send(&hdev->hw, &desc, 1);
203 int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
205 struct hclge_cfg_pause_param_cmd *pause_param;
206 struct hclge_desc desc;
211 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
213 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
215 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
219 trans_gap = pause_param->pause_trans_gap;
220 trans_time = le16_to_cpu(pause_param->pause_trans_time);
222 return hclge_pause_param_cfg(hdev, mac_addr, trans_gap,
226 static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
230 tc = hdev->tm_info.prio_tc[pri_id];
232 if (tc >= hdev->tm_info.num_tc)
236 * the register for priority has four bytes, the first bytes includes
237 * priority0 and priority1, the higher 4bit stands for priority1
238 * while the lower 4bit stands for priority0, as below:
239 * first byte: | pri_1 | pri_0 |
240 * second byte: | pri_3 | pri_2 |
241 * third byte: | pri_5 | pri_4 |
242 * fourth byte: | pri_7 | pri_6 |
244 pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4);
249 static int hclge_up_to_tc_map(struct hclge_dev *hdev)
251 struct hclge_desc desc;
252 u8 *pri = (u8 *)desc.data;
256 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
258 for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
259 ret = hclge_fill_pri_array(hdev, pri, pri_id);
264 return hclge_cmd_send(&hdev->hw, &desc, 1);
267 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
268 u8 pg_id, u8 pri_bit_map)
270 struct hclge_pg_to_pri_link_cmd *map;
271 struct hclge_desc desc;
273 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false);
275 map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
278 map->pri_bit_map = pri_bit_map;
280 return hclge_cmd_send(&hdev->hw, &desc, 1);
283 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
286 struct hclge_qs_to_pri_link_cmd *map;
287 struct hclge_desc desc;
289 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false);
291 map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
293 map->qs_id = cpu_to_le16(qs_id);
295 map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK;
297 return hclge_cmd_send(&hdev->hw, &desc, 1);
300 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
303 struct hclge_nq_to_qs_link_cmd *map;
304 struct hclge_desc desc;
306 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false);
308 map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
310 map->nq_id = cpu_to_le16(q_id);
311 map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK);
313 return hclge_cmd_send(&hdev->hw, &desc, 1);
316 static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id,
319 struct hclge_pg_weight_cmd *weight;
320 struct hclge_desc desc;
322 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false);
324 weight = (struct hclge_pg_weight_cmd *)desc.data;
326 weight->pg_id = pg_id;
329 return hclge_cmd_send(&hdev->hw, &desc, 1);
332 static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id,
335 struct hclge_priority_weight_cmd *weight;
336 struct hclge_desc desc;
338 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false);
340 weight = (struct hclge_priority_weight_cmd *)desc.data;
342 weight->pri_id = pri_id;
345 return hclge_cmd_send(&hdev->hw, &desc, 1);
348 static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
351 struct hclge_qs_weight_cmd *weight;
352 struct hclge_desc desc;
354 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false);
356 weight = (struct hclge_qs_weight_cmd *)desc.data;
358 weight->qs_id = cpu_to_le16(qs_id);
361 return hclge_cmd_send(&hdev->hw, &desc, 1);
364 static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
365 enum hclge_shap_bucket bucket, u8 pg_id,
366 u8 ir_b, u8 ir_u, u8 ir_s, u8 bs_b, u8 bs_s)
368 struct hclge_pg_shapping_cmd *shap_cfg_cmd;
369 enum hclge_opcode_type opcode;
370 struct hclge_desc desc;
371 u32 shapping_para = 0;
373 opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
374 HCLGE_OPC_TM_PG_C_SHAPPING;
375 hclge_cmd_setup_basic_desc(&desc, opcode, false);
377 shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
379 shap_cfg_cmd->pg_id = pg_id;
381 hclge_tm_set_field(shapping_para, IR_B, ir_b);
382 hclge_tm_set_field(shapping_para, IR_U, ir_u);
383 hclge_tm_set_field(shapping_para, IR_S, ir_s);
384 hclge_tm_set_field(shapping_para, BS_B, bs_b);
385 hclge_tm_set_field(shapping_para, BS_S, bs_s);
387 shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para);
389 return hclge_cmd_send(&hdev->hw, &desc, 1);
392 static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
394 struct hclge_port_shapping_cmd *shap_cfg_cmd;
395 struct hclge_desc desc;
396 u32 shapping_para = 0;
400 ret = hclge_shaper_para_calc(HCLGE_ETHER_MAX_RATE,
401 HCLGE_SHAPER_LVL_PORT,
402 &ir_b, &ir_u, &ir_s);
406 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
407 shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
409 hclge_tm_set_field(shapping_para, IR_B, ir_b);
410 hclge_tm_set_field(shapping_para, IR_U, ir_u);
411 hclge_tm_set_field(shapping_para, IR_S, ir_s);
412 hclge_tm_set_field(shapping_para, BS_B, HCLGE_SHAPER_BS_U_DEF);
413 hclge_tm_set_field(shapping_para, BS_S, HCLGE_SHAPER_BS_S_DEF);
415 shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para);
417 return hclge_cmd_send(&hdev->hw, &desc, 1);
420 static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
421 enum hclge_shap_bucket bucket, u8 pri_id,
422 u8 ir_b, u8 ir_u, u8 ir_s,
425 struct hclge_pri_shapping_cmd *shap_cfg_cmd;
426 enum hclge_opcode_type opcode;
427 struct hclge_desc desc;
428 u32 shapping_para = 0;
430 opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
431 HCLGE_OPC_TM_PRI_C_SHAPPING;
433 hclge_cmd_setup_basic_desc(&desc, opcode, false);
435 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
437 shap_cfg_cmd->pri_id = pri_id;
439 hclge_tm_set_field(shapping_para, IR_B, ir_b);
440 hclge_tm_set_field(shapping_para, IR_U, ir_u);
441 hclge_tm_set_field(shapping_para, IR_S, ir_s);
442 hclge_tm_set_field(shapping_para, BS_B, bs_b);
443 hclge_tm_set_field(shapping_para, BS_S, bs_s);
445 shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para);
447 return hclge_cmd_send(&hdev->hw, &desc, 1);
450 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id)
452 struct hclge_desc desc;
454 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false);
456 if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR)
457 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
461 desc.data[0] = cpu_to_le32(pg_id);
463 return hclge_cmd_send(&hdev->hw, &desc, 1);
466 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id)
468 struct hclge_desc desc;
470 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false);
472 if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
473 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
477 desc.data[0] = cpu_to_le32(pri_id);
479 return hclge_cmd_send(&hdev->hw, &desc, 1);
482 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode)
484 struct hclge_desc desc;
486 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false);
488 if (mode == HCLGE_SCH_MODE_DWRR)
489 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
493 desc.data[0] = cpu_to_le32(qs_id);
495 return hclge_cmd_send(&hdev->hw, &desc, 1);
498 static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id,
501 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
502 struct hclge_desc desc;
504 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
507 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
509 bp_to_qs_map_cmd->tc_id = tc;
510 bp_to_qs_map_cmd->qs_group_id = grp_id;
511 bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map);
513 return hclge_cmd_send(&hdev->hw, &desc, 1);
516 static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
518 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
519 struct hclge_dev *hdev = vport->back;
523 vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
524 kinfo->num_tc = min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
525 max_rss_size = min_t(u16, hdev->rss_size_max,
526 vport->alloc_tqps / kinfo->num_tc);
528 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
529 kinfo->req_rss_size <= max_rss_size) {
530 dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
531 kinfo->rss_size, kinfo->req_rss_size);
532 kinfo->rss_size = kinfo->req_rss_size;
533 } else if (kinfo->rss_size > max_rss_size ||
534 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
535 dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
536 kinfo->rss_size, max_rss_size);
537 kinfo->rss_size = max_rss_size;
540 kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size;
541 vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id;
542 vport->dwrr = 100; /* 100 percent as init */
543 vport->alloc_rss_size = kinfo->rss_size;
545 for (i = 0; i < HNAE3_MAX_TC; i++) {
546 if (hdev->hw_tc_map & BIT(i)) {
547 kinfo->tc_info[i].enable = true;
548 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
549 kinfo->tc_info[i].tqp_count = kinfo->rss_size;
550 kinfo->tc_info[i].tc = i;
552 /* Set to default queue if TC is disable */
553 kinfo->tc_info[i].enable = false;
554 kinfo->tc_info[i].tqp_offset = 0;
555 kinfo->tc_info[i].tqp_count = 1;
556 kinfo->tc_info[i].tc = 0;
560 memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc,
561 FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc));
564 static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
566 struct hclge_vport *vport = hdev->vport;
569 for (i = 0; i < hdev->num_alloc_vport; i++) {
570 hclge_tm_vport_tc_info_update(vport);
576 static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
580 for (i = 0; i < hdev->tm_info.num_tc; i++) {
581 hdev->tm_info.tc_info[i].tc_id = i;
582 hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
583 hdev->tm_info.tc_info[i].pgid = 0;
584 hdev->tm_info.tc_info[i].bw_limit =
585 hdev->tm_info.pg_info[0].bw_limit;
588 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
589 hdev->tm_info.prio_tc[i] =
590 (i >= hdev->tm_info.num_tc) ? 0 : i;
592 /* DCB is enabled if we have more than 1 TC */
593 if (hdev->tm_info.num_tc > 1)
594 hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
596 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
599 static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
603 for (i = 0; i < hdev->tm_info.num_pg; i++) {
606 hdev->tm_info.pg_dwrr[i] = i ? 0 : 100;
608 hdev->tm_info.pg_info[i].pg_id = i;
609 hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
611 hdev->tm_info.pg_info[i].bw_limit = HCLGE_ETHER_MAX_RATE;
616 hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
617 for (k = 0; k < hdev->tm_info.num_tc; k++)
618 hdev->tm_info.pg_info[i].tc_dwrr[k] = 100;
622 static void hclge_pfc_info_init(struct hclge_dev *hdev)
624 if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) {
625 if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
626 dev_warn(&hdev->pdev->dev,
627 "DCB is disable, but last mode is FC_PFC\n");
629 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
630 } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
631 /* fc_mode_last_time record the last fc_mode when
632 * DCB is enabled, so that fc_mode can be set to
633 * the correct value when DCB is disabled.
635 hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
636 hdev->tm_info.fc_mode = HCLGE_FC_PFC;
640 static int hclge_tm_schd_info_init(struct hclge_dev *hdev)
642 if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
643 (hdev->tm_info.num_pg != 1))
646 hclge_tm_pg_info_init(hdev);
648 hclge_tm_tc_info_init(hdev);
650 hclge_tm_vport_info_update(hdev);
652 hclge_pfc_info_init(hdev);
657 static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
662 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
665 for (i = 0; i < hdev->tm_info.num_pg; i++) {
667 ret = hclge_tm_pg_to_pri_map_cfg(
668 hdev, i, hdev->tm_info.pg_info[i].tc_bit_map);
676 static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
683 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
687 for (i = 0; i < hdev->tm_info.num_pg; i++) {
688 /* Calc shaper para */
689 ret = hclge_shaper_para_calc(
690 hdev->tm_info.pg_info[i].bw_limit,
692 &ir_b, &ir_u, &ir_s);
696 ret = hclge_tm_pg_shapping_cfg(hdev,
697 HCLGE_TM_SHAP_C_BUCKET, i,
698 0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
699 HCLGE_SHAPER_BS_S_DEF);
703 ret = hclge_tm_pg_shapping_cfg(hdev,
704 HCLGE_TM_SHAP_P_BUCKET, i,
706 HCLGE_SHAPER_BS_U_DEF,
707 HCLGE_SHAPER_BS_S_DEF);
715 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
721 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
725 for (i = 0; i < hdev->tm_info.num_pg; i++) {
727 ret = hclge_tm_pg_weight_cfg(hdev, i,
728 hdev->tm_info.pg_dwrr[i]);
736 static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
737 struct hclge_vport *vport)
739 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
740 struct hnae3_queue **tqp = kinfo->tqp;
741 struct hnae3_tc_info *v_tc_info;
745 for (i = 0; i < kinfo->num_tc; i++) {
746 v_tc_info = &kinfo->tc_info[i];
747 for (j = 0; j < v_tc_info->tqp_count; j++) {
748 struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j];
750 ret = hclge_tm_q_to_qs_map_cfg(hdev,
751 hclge_get_queue_id(q),
752 vport->qs_offset + i);
761 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
763 struct hclge_vport *vport = hdev->vport;
767 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
768 /* Cfg qs -> pri mapping, one by one mapping */
769 for (k = 0; k < hdev->num_alloc_vport; k++)
770 for (i = 0; i < hdev->tm_info.num_tc; i++) {
771 ret = hclge_tm_qs_to_pri_map_cfg(
772 hdev, vport[k].qs_offset + i, i);
776 } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) {
777 /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */
778 for (k = 0; k < hdev->num_alloc_vport; k++)
779 for (i = 0; i < HNAE3_MAX_TC; i++) {
780 ret = hclge_tm_qs_to_pri_map_cfg(
781 hdev, vport[k].qs_offset + i, k);
789 /* Cfg q -> qs mapping */
790 for (i = 0; i < hdev->num_alloc_vport; i++) {
791 ret = hclge_vport_q_to_qs_map(hdev, vport);
801 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
807 for (i = 0; i < hdev->tm_info.num_tc; i++) {
808 ret = hclge_shaper_para_calc(
809 hdev->tm_info.tc_info[i].bw_limit,
810 HCLGE_SHAPER_LVL_PRI,
811 &ir_b, &ir_u, &ir_s);
815 ret = hclge_tm_pri_shapping_cfg(
816 hdev, HCLGE_TM_SHAP_C_BUCKET, i,
817 0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
818 HCLGE_SHAPER_BS_S_DEF);
822 ret = hclge_tm_pri_shapping_cfg(
823 hdev, HCLGE_TM_SHAP_P_BUCKET, i,
824 ir_b, ir_u, ir_s, HCLGE_SHAPER_BS_U_DEF,
825 HCLGE_SHAPER_BS_S_DEF);
833 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
835 struct hclge_dev *hdev = vport->back;
839 ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
840 &ir_b, &ir_u, &ir_s);
844 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
846 0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
847 HCLGE_SHAPER_BS_S_DEF);
851 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
854 HCLGE_SHAPER_BS_U_DEF,
855 HCLGE_SHAPER_BS_S_DEF);
862 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
864 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
865 struct hclge_dev *hdev = vport->back;
870 for (i = 0; i < kinfo->num_tc; i++) {
871 ret = hclge_shaper_para_calc(
872 hdev->tm_info.tc_info[i].bw_limit,
873 HCLGE_SHAPER_LVL_QSET,
874 &ir_b, &ir_u, &ir_s);
882 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev)
884 struct hclge_vport *vport = hdev->vport;
888 /* Need config vport shaper */
889 for (i = 0; i < hdev->num_alloc_vport; i++) {
890 ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport);
894 ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport);
904 static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev)
908 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
909 ret = hclge_tm_pri_tc_base_shaper_cfg(hdev);
913 ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev);
921 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
923 struct hclge_vport *vport = hdev->vport;
924 struct hclge_pg_info *pg_info;
929 for (i = 0; i < hdev->tm_info.num_tc; i++) {
931 &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
932 dwrr = pg_info->tc_dwrr[i];
934 ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr);
938 for (k = 0; k < hdev->num_alloc_vport; k++) {
939 ret = hclge_tm_qs_weight_cfg(
940 hdev, vport[k].qs_offset + i,
950 static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev)
952 #define DEFAULT_TC_WEIGHT 1
953 #define DEFAULT_TC_OFFSET 14
955 struct hclge_ets_tc_weight_cmd *ets_weight;
956 struct hclge_desc desc;
959 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false);
960 ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
962 for (i = 0; i < HNAE3_MAX_TC; i++) {
963 struct hclge_pg_info *pg_info;
965 ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT;
967 if (!(hdev->hw_tc_map & BIT(i)))
971 &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
972 ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
975 ets_weight->weight_offset = DEFAULT_TC_OFFSET;
977 return hclge_cmd_send(&hdev->hw, &desc, 1);
980 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
982 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
983 struct hclge_dev *hdev = vport->back;
988 ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr);
993 for (i = 0; i < kinfo->num_tc; i++) {
994 ret = hclge_tm_qs_weight_cfg(
995 hdev, vport->qs_offset + i,
996 hdev->tm_info.pg_info[0].tc_dwrr[i]);
1004 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev)
1006 struct hclge_vport *vport = hdev->vport;
1010 for (i = 0; i < hdev->num_alloc_vport; i++) {
1011 ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport);
1021 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
1025 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1026 ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev);
1030 if (!hnae3_dev_dcb_supported(hdev))
1033 ret = hclge_tm_ets_tc_dwrr_cfg(hdev);
1034 if (ret == -EOPNOTSUPP) {
1035 dev_warn(&hdev->pdev->dev,
1036 "fw %08x does't support ets tc weight cmd\n",
1043 ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev);
1051 static int hclge_tm_map_cfg(struct hclge_dev *hdev)
1055 ret = hclge_up_to_tc_map(hdev);
1059 ret = hclge_tm_pg_to_pri_map(hdev);
1063 return hclge_tm_pri_q_qs_cfg(hdev);
1066 static int hclge_tm_shaper_cfg(struct hclge_dev *hdev)
1070 ret = hclge_tm_port_shaper_cfg(hdev);
1074 ret = hclge_tm_pg_shaper_cfg(hdev);
1078 return hclge_tm_pri_shaper_cfg(hdev);
1081 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev)
1085 ret = hclge_tm_pg_dwrr_cfg(hdev);
1089 return hclge_tm_pri_dwrr_cfg(hdev);
1092 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
1097 /* Only being config on TC-Based scheduler mode */
1098 if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
1101 for (i = 0; i < hdev->tm_info.num_pg; i++) {
1102 ret = hclge_tm_pg_schd_mode_cfg(hdev, i);
1110 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
1112 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1113 struct hclge_dev *hdev = vport->back;
1117 ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
1121 for (i = 0; i < kinfo->num_tc; i++) {
1122 u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode;
1124 ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i,
1133 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
1135 struct hclge_vport *vport = hdev->vport;
1139 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1140 for (i = 0; i < hdev->tm_info.num_tc; i++) {
1141 ret = hclge_tm_pri_schd_mode_cfg(hdev, i);
1145 for (k = 0; k < hdev->num_alloc_vport; k++) {
1146 ret = hclge_tm_qs_schd_mode_cfg(
1147 hdev, vport[k].qs_offset + i,
1148 HCLGE_SCH_MODE_DWRR);
1154 for (i = 0; i < hdev->num_alloc_vport; i++) {
1155 ret = hclge_tm_schd_mode_vnet_base_cfg(vport);
1166 static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
1170 ret = hclge_tm_lvl2_schd_mode_cfg(hdev);
1174 return hclge_tm_lvl34_schd_mode_cfg(hdev);
1177 int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
1181 /* Cfg tm mapping */
1182 ret = hclge_tm_map_cfg(hdev);
1187 ret = hclge_tm_shaper_cfg(hdev);
1192 ret = hclge_tm_dwrr_cfg(hdev);
1196 /* Cfg schd mode for each level schd */
1197 return hclge_tm_schd_mode_hw(hdev);
1200 static int hclge_pause_param_setup_hw(struct hclge_dev *hdev)
1202 struct hclge_mac *mac = &hdev->hw.mac;
1204 return hclge_pause_param_cfg(hdev, mac->mac_addr,
1205 HCLGE_DEFAULT_PAUSE_TRANS_GAP,
1206 HCLGE_DEFAULT_PAUSE_TRANS_TIME);
1209 static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
1211 u8 enable_bitmap = 0;
1213 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
1214 enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK |
1215 HCLGE_RX_MAC_PAUSE_EN_MSK;
1217 return hclge_pfc_pause_en_cfg(hdev, enable_bitmap,
1218 hdev->tm_info.pfc_en);
1221 /* Each Tc has a 1024 queue sets to backpress, it divides to
1222 * 32 group, each group contains 32 queue sets, which can be
1223 * represented by u32 bitmap.
1225 static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
1229 for (i = 0; i < HCLGE_BP_GRP_NUM; i++) {
1233 for (k = 0; k < hdev->num_alloc_vport; k++) {
1234 struct hclge_vport *vport = &hdev->vport[k];
1235 u16 qs_id = vport->qs_offset + tc;
1238 grp = hnae3_get_field(qs_id, HCLGE_BP_GRP_ID_M,
1240 sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
1241 HCLGE_BP_SUB_GRP_ID_S);
1243 qs_bitmap |= (1 << sub_grp);
1246 ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);
1254 static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
1258 switch (hdev->tm_info.fc_mode) {
1263 case HCLGE_FC_RX_PAUSE:
1267 case HCLGE_FC_TX_PAUSE:
1284 return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
1287 static int hclge_tm_bp_setup(struct hclge_dev *hdev)
1292 for (i = 0; i < hdev->tm_info.num_tc; i++) {
1293 ret = hclge_bp_setup_hw(hdev, i);
1301 int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
1305 ret = hclge_pause_param_setup_hw(hdev);
1309 ret = hclge_mac_pause_setup_hw(hdev);
1313 /* Only DCB-supported dev supports qset back pressure and pfc cmd */
1314 if (!hnae3_dev_dcb_supported(hdev))
1317 /* GE MAC does not support PFC, when driver is initializing and MAC
1318 * is in GE Mode, ignore the error here, otherwise initialization
1321 ret = hclge_pfc_setup_hw(hdev);
1322 if (init && ret == -EOPNOTSUPP)
1323 dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n");
1327 return hclge_tm_bp_setup(hdev);
1330 void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
1332 struct hclge_vport *vport = hdev->vport;
1333 struct hnae3_knic_private_info *kinfo;
1336 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
1337 hdev->tm_info.prio_tc[i] = prio_tc[i];
1339 for (k = 0; k < hdev->num_alloc_vport; k++) {
1340 kinfo = &vport[k].nic.kinfo;
1341 kinfo->prio_tc[i] = prio_tc[i];
1346 void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
1350 hdev->tm_info.num_tc = num_tc;
1352 for (i = 0; i < hdev->tm_info.num_tc; i++)
1357 hdev->tm_info.num_tc = 1;
1360 hdev->hw_tc_map = bit_map;
1362 hclge_tm_schd_info_init(hdev);
1365 int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
1369 if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
1370 (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE))
1373 ret = hclge_tm_schd_setup_hw(hdev);
1377 ret = hclge_pause_setup_hw(hdev, init);
1384 int hclge_tm_schd_init(struct hclge_dev *hdev)
1388 /* fc_mode is HCLGE_FC_FULL on reset */
1389 hdev->tm_info.fc_mode = HCLGE_FC_FULL;
1390 hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
1392 ret = hclge_tm_schd_info_init(hdev);
1396 return hclge_tm_init_hw(hdev, true);
1399 int hclge_tm_vport_map_update(struct hclge_dev *hdev)
1401 struct hclge_vport *vport = hdev->vport;
1404 hclge_tm_vport_tc_info_update(vport);
1406 ret = hclge_vport_q_to_qs_map(hdev, vport);
1410 if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE))
1413 return hclge_tm_bp_setup(hdev);