net: hns3: fix PFC not setting problem for DCB module
[platform/kernel/linux-rpi.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_tm.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/etherdevice.h>
5
6 #include "hclge_cmd.h"
7 #include "hclge_main.h"
8 #include "hclge_tm.h"
9
10 enum hclge_shaper_level {
11         HCLGE_SHAPER_LVL_PRI    = 0,
12         HCLGE_SHAPER_LVL_PG     = 1,
13         HCLGE_SHAPER_LVL_PORT   = 2,
14         HCLGE_SHAPER_LVL_QSET   = 3,
15         HCLGE_SHAPER_LVL_CNT    = 4,
16         HCLGE_SHAPER_LVL_VF     = 0,
17         HCLGE_SHAPER_LVL_PF     = 1,
18 };
19
20 #define HCLGE_TM_PFC_PKT_GET_CMD_NUM    3
21 #define HCLGE_TM_PFC_NUM_GET_PER_CMD    3
22
23 #define HCLGE_SHAPER_BS_U_DEF   5
24 #define HCLGE_SHAPER_BS_S_DEF   20
25
26 #define HCLGE_ETHER_MAX_RATE    100000
27
28 /* hclge_shaper_para_calc: calculate ir parameter for the shaper
29  * @ir: Rate to be config, its unit is Mbps
30  * @shaper_level: the shaper level. eg: port, pg, priority, queueset
31  * @ir_b: IR_B parameter of IR shaper
32  * @ir_u: IR_U parameter of IR shaper
33  * @ir_s: IR_S parameter of IR shaper
34  *
35  * the formula:
36  *
37  *              IR_b * (2 ^ IR_u) * 8
38  * IR(Mbps) = -------------------------  *  CLOCK(1000Mbps)
39  *              Tick * (2 ^ IR_s)
40  *
41  * @return: 0: calculate sucessful, negative: fail
42  */
43 static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
44                                   u8 *ir_b, u8 *ir_u, u8 *ir_s)
45 {
46         const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
47                 6 * 256,        /* Prioriy level */
48                 6 * 32,         /* Prioriy group level */
49                 6 * 8,          /* Port level */
50                 6 * 256         /* Qset level */
51         };
52         u8 ir_u_calc = 0, ir_s_calc = 0;
53         u32 ir_calc;
54         u32 tick;
55
56         /* Calc tick */
57         if (shaper_level >= HCLGE_SHAPER_LVL_CNT)
58                 return -EINVAL;
59
60         tick = tick_array[shaper_level];
61
62         /**
63          * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
64          * the formula is changed to:
65          *              126 * 1 * 8
66          * ir_calc = ---------------- * 1000
67          *              tick * 1
68          */
69         ir_calc = (1008000 + (tick >> 1) - 1) / tick;
70
71         if (ir_calc == ir) {
72                 *ir_b = 126;
73                 *ir_u = 0;
74                 *ir_s = 0;
75
76                 return 0;
77         } else if (ir_calc > ir) {
78                 /* Increasing the denominator to select ir_s value */
79                 while (ir_calc > ir) {
80                         ir_s_calc++;
81                         ir_calc = 1008000 / (tick * (1 << ir_s_calc));
82                 }
83
84                 if (ir_calc == ir)
85                         *ir_b = 126;
86                 else
87                         *ir_b = (ir * tick * (1 << ir_s_calc) + 4000) / 8000;
88         } else {
89                 /* Increasing the numerator to select ir_u value */
90                 u32 numerator;
91
92                 while (ir_calc < ir) {
93                         ir_u_calc++;
94                         numerator = 1008000 * (1 << ir_u_calc);
95                         ir_calc = (numerator + (tick >> 1)) / tick;
96                 }
97
98                 if (ir_calc == ir) {
99                         *ir_b = 126;
100                 } else {
101                         u32 denominator = (8000 * (1 << --ir_u_calc));
102                         *ir_b = (ir * tick + (denominator >> 1)) / denominator;
103                 }
104         }
105
106         *ir_u = ir_u_calc;
107         *ir_s = ir_s_calc;
108
109         return 0;
110 }
111
112 static int hclge_pfc_stats_get(struct hclge_dev *hdev,
113                                enum hclge_opcode_type opcode, u64 *stats)
114 {
115         struct hclge_desc desc[HCLGE_TM_PFC_PKT_GET_CMD_NUM];
116         int ret, i, j;
117
118         if (!(opcode == HCLGE_OPC_QUERY_PFC_RX_PKT_CNT ||
119               opcode == HCLGE_OPC_QUERY_PFC_TX_PKT_CNT))
120                 return -EINVAL;
121
122         for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) {
123                 hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
124                 if (i != (HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1))
125                         desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
126                 else
127                         desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
128         }
129
130         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM);
131         if (ret)
132                 return ret;
133
134         for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) {
135                 struct hclge_pfc_stats_cmd *pfc_stats =
136                                 (struct hclge_pfc_stats_cmd *)desc[i].data;
137
138                 for (j = 0; j < HCLGE_TM_PFC_NUM_GET_PER_CMD; j++) {
139                         u32 index = i * HCLGE_TM_PFC_PKT_GET_CMD_NUM + j;
140
141                         if (index < HCLGE_MAX_TC_NUM)
142                                 stats[index] =
143                                         le64_to_cpu(pfc_stats->pkt_num[j]);
144                 }
145         }
146         return 0;
147 }
148
149 int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats)
150 {
151         return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_RX_PKT_CNT, stats);
152 }
153
154 int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats)
155 {
156         return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_TX_PKT_CNT, stats);
157 }
158
159 int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
160 {
161         struct hclge_desc desc;
162
163         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false);
164
165         desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) |
166                 (rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0));
167
168         return hclge_cmd_send(&hdev->hw, &desc, 1);
169 }
170
171 static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
172                                   u8 pfc_bitmap)
173 {
174         struct hclge_desc desc;
175         struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data;
176
177         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false);
178
179         pfc->tx_rx_en_bitmap = tx_rx_bitmap;
180         pfc->pri_en_bitmap = pfc_bitmap;
181
182         return hclge_cmd_send(&hdev->hw, &desc, 1);
183 }
184
185 static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
186                                  u8 pause_trans_gap, u16 pause_trans_time)
187 {
188         struct hclge_cfg_pause_param_cmd *pause_param;
189         struct hclge_desc desc;
190
191         pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
192
193         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
194
195         ether_addr_copy(pause_param->mac_addr, addr);
196         ether_addr_copy(pause_param->mac_addr_extra, addr);
197         pause_param->pause_trans_gap = pause_trans_gap;
198         pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
199
200         return hclge_cmd_send(&hdev->hw, &desc, 1);
201 }
202
203 int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
204 {
205         struct hclge_cfg_pause_param_cmd *pause_param;
206         struct hclge_desc desc;
207         u16 trans_time;
208         u8 trans_gap;
209         int ret;
210
211         pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
212
213         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
214
215         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
216         if (ret)
217                 return ret;
218
219         trans_gap = pause_param->pause_trans_gap;
220         trans_time = le16_to_cpu(pause_param->pause_trans_time);
221
222         return hclge_pause_param_cfg(hdev, mac_addr, trans_gap,
223                                          trans_time);
224 }
225
226 static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
227 {
228         u8 tc;
229
230         tc = hdev->tm_info.prio_tc[pri_id];
231
232         if (tc >= hdev->tm_info.num_tc)
233                 return -EINVAL;
234
235         /**
236          * the register for priority has four bytes, the first bytes includes
237          *  priority0 and priority1, the higher 4bit stands for priority1
238          *  while the lower 4bit stands for priority0, as below:
239          * first byte:  | pri_1 | pri_0 |
240          * second byte: | pri_3 | pri_2 |
241          * third byte:  | pri_5 | pri_4 |
242          * fourth byte: | pri_7 | pri_6 |
243          */
244         pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4);
245
246         return 0;
247 }
248
249 static int hclge_up_to_tc_map(struct hclge_dev *hdev)
250 {
251         struct hclge_desc desc;
252         u8 *pri = (u8 *)desc.data;
253         u8 pri_id;
254         int ret;
255
256         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
257
258         for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
259                 ret = hclge_fill_pri_array(hdev, pri, pri_id);
260                 if (ret)
261                         return ret;
262         }
263
264         return hclge_cmd_send(&hdev->hw, &desc, 1);
265 }
266
267 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
268                                       u8 pg_id, u8 pri_bit_map)
269 {
270         struct hclge_pg_to_pri_link_cmd *map;
271         struct hclge_desc desc;
272
273         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false);
274
275         map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
276
277         map->pg_id = pg_id;
278         map->pri_bit_map = pri_bit_map;
279
280         return hclge_cmd_send(&hdev->hw, &desc, 1);
281 }
282
283 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
284                                       u16 qs_id, u8 pri)
285 {
286         struct hclge_qs_to_pri_link_cmd *map;
287         struct hclge_desc desc;
288
289         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false);
290
291         map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
292
293         map->qs_id = cpu_to_le16(qs_id);
294         map->priority = pri;
295         map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK;
296
297         return hclge_cmd_send(&hdev->hw, &desc, 1);
298 }
299
300 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
301                                     u16 q_id, u16 qs_id)
302 {
303         struct hclge_nq_to_qs_link_cmd *map;
304         struct hclge_desc desc;
305
306         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false);
307
308         map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
309
310         map->nq_id = cpu_to_le16(q_id);
311         map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK);
312
313         return hclge_cmd_send(&hdev->hw, &desc, 1);
314 }
315
316 static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id,
317                                   u8 dwrr)
318 {
319         struct hclge_pg_weight_cmd *weight;
320         struct hclge_desc desc;
321
322         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false);
323
324         weight = (struct hclge_pg_weight_cmd *)desc.data;
325
326         weight->pg_id = pg_id;
327         weight->dwrr = dwrr;
328
329         return hclge_cmd_send(&hdev->hw, &desc, 1);
330 }
331
332 static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id,
333                                    u8 dwrr)
334 {
335         struct hclge_priority_weight_cmd *weight;
336         struct hclge_desc desc;
337
338         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false);
339
340         weight = (struct hclge_priority_weight_cmd *)desc.data;
341
342         weight->pri_id = pri_id;
343         weight->dwrr = dwrr;
344
345         return hclge_cmd_send(&hdev->hw, &desc, 1);
346 }
347
348 static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
349                                   u8 dwrr)
350 {
351         struct hclge_qs_weight_cmd *weight;
352         struct hclge_desc desc;
353
354         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false);
355
356         weight = (struct hclge_qs_weight_cmd *)desc.data;
357
358         weight->qs_id = cpu_to_le16(qs_id);
359         weight->dwrr = dwrr;
360
361         return hclge_cmd_send(&hdev->hw, &desc, 1);
362 }
363
364 static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
365                                     enum hclge_shap_bucket bucket, u8 pg_id,
366                                     u8 ir_b, u8 ir_u, u8 ir_s, u8 bs_b, u8 bs_s)
367 {
368         struct hclge_pg_shapping_cmd *shap_cfg_cmd;
369         enum hclge_opcode_type opcode;
370         struct hclge_desc desc;
371         u32 shapping_para = 0;
372
373         opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
374                 HCLGE_OPC_TM_PG_C_SHAPPING;
375         hclge_cmd_setup_basic_desc(&desc, opcode, false);
376
377         shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
378
379         shap_cfg_cmd->pg_id = pg_id;
380
381         hclge_tm_set_field(shapping_para, IR_B, ir_b);
382         hclge_tm_set_field(shapping_para, IR_U, ir_u);
383         hclge_tm_set_field(shapping_para, IR_S, ir_s);
384         hclge_tm_set_field(shapping_para, BS_B, bs_b);
385         hclge_tm_set_field(shapping_para, BS_S, bs_s);
386
387         shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para);
388
389         return hclge_cmd_send(&hdev->hw, &desc, 1);
390 }
391
392 static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
393 {
394         struct hclge_port_shapping_cmd *shap_cfg_cmd;
395         struct hclge_desc desc;
396         u32 shapping_para = 0;
397         u8 ir_u, ir_b, ir_s;
398         int ret;
399
400         ret = hclge_shaper_para_calc(HCLGE_ETHER_MAX_RATE,
401                                      HCLGE_SHAPER_LVL_PORT,
402                                      &ir_b, &ir_u, &ir_s);
403         if (ret)
404                 return ret;
405
406         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
407         shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
408
409         hclge_tm_set_field(shapping_para, IR_B, ir_b);
410         hclge_tm_set_field(shapping_para, IR_U, ir_u);
411         hclge_tm_set_field(shapping_para, IR_S, ir_s);
412         hclge_tm_set_field(shapping_para, BS_B, HCLGE_SHAPER_BS_U_DEF);
413         hclge_tm_set_field(shapping_para, BS_S, HCLGE_SHAPER_BS_S_DEF);
414
415         shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para);
416
417         return hclge_cmd_send(&hdev->hw, &desc, 1);
418 }
419
420 static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
421                                      enum hclge_shap_bucket bucket, u8 pri_id,
422                                      u8 ir_b, u8 ir_u, u8 ir_s,
423                                      u8 bs_b, u8 bs_s)
424 {
425         struct hclge_pri_shapping_cmd *shap_cfg_cmd;
426         enum hclge_opcode_type opcode;
427         struct hclge_desc desc;
428         u32 shapping_para = 0;
429
430         opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
431                 HCLGE_OPC_TM_PRI_C_SHAPPING;
432
433         hclge_cmd_setup_basic_desc(&desc, opcode, false);
434
435         shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
436
437         shap_cfg_cmd->pri_id = pri_id;
438
439         hclge_tm_set_field(shapping_para, IR_B, ir_b);
440         hclge_tm_set_field(shapping_para, IR_U, ir_u);
441         hclge_tm_set_field(shapping_para, IR_S, ir_s);
442         hclge_tm_set_field(shapping_para, BS_B, bs_b);
443         hclge_tm_set_field(shapping_para, BS_S, bs_s);
444
445         shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para);
446
447         return hclge_cmd_send(&hdev->hw, &desc, 1);
448 }
449
450 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id)
451 {
452         struct hclge_desc desc;
453
454         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false);
455
456         if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR)
457                 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
458         else
459                 desc.data[1] = 0;
460
461         desc.data[0] = cpu_to_le32(pg_id);
462
463         return hclge_cmd_send(&hdev->hw, &desc, 1);
464 }
465
466 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id)
467 {
468         struct hclge_desc desc;
469
470         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false);
471
472         if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
473                 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
474         else
475                 desc.data[1] = 0;
476
477         desc.data[0] = cpu_to_le32(pri_id);
478
479         return hclge_cmd_send(&hdev->hw, &desc, 1);
480 }
481
482 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode)
483 {
484         struct hclge_desc desc;
485
486         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false);
487
488         if (mode == HCLGE_SCH_MODE_DWRR)
489                 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
490         else
491                 desc.data[1] = 0;
492
493         desc.data[0] = cpu_to_le32(qs_id);
494
495         return hclge_cmd_send(&hdev->hw, &desc, 1);
496 }
497
498 static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id,
499                               u32 bit_map)
500 {
501         struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
502         struct hclge_desc desc;
503
504         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
505                                    false);
506
507         bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
508
509         bp_to_qs_map_cmd->tc_id = tc;
510         bp_to_qs_map_cmd->qs_group_id = grp_id;
511         bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map);
512
513         return hclge_cmd_send(&hdev->hw, &desc, 1);
514 }
515
516 static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
517 {
518         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
519         struct hclge_dev *hdev = vport->back;
520         u16 max_rss_size;
521         u8 i;
522
523         vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
524         kinfo->num_tc = min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
525         max_rss_size = min_t(u16, hdev->rss_size_max,
526                              vport->alloc_tqps / kinfo->num_tc);
527
528         if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
529             kinfo->req_rss_size <= max_rss_size) {
530                 dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
531                          kinfo->rss_size, kinfo->req_rss_size);
532                 kinfo->rss_size = kinfo->req_rss_size;
533         } else if (kinfo->rss_size > max_rss_size ||
534                    (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
535                 dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
536                          kinfo->rss_size, max_rss_size);
537                 kinfo->rss_size = max_rss_size;
538         }
539
540         kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size;
541         vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id;
542         vport->dwrr = 100;  /* 100 percent as init */
543         vport->alloc_rss_size = kinfo->rss_size;
544
545         for (i = 0; i < HNAE3_MAX_TC; i++) {
546                 if (hdev->hw_tc_map & BIT(i)) {
547                         kinfo->tc_info[i].enable = true;
548                         kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
549                         kinfo->tc_info[i].tqp_count = kinfo->rss_size;
550                         kinfo->tc_info[i].tc = i;
551                 } else {
552                         /* Set to default queue if TC is disable */
553                         kinfo->tc_info[i].enable = false;
554                         kinfo->tc_info[i].tqp_offset = 0;
555                         kinfo->tc_info[i].tqp_count = 1;
556                         kinfo->tc_info[i].tc = 0;
557                 }
558         }
559
560         memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc,
561                FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc));
562 }
563
564 static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
565 {
566         struct hclge_vport *vport = hdev->vport;
567         u32 i;
568
569         for (i = 0; i < hdev->num_alloc_vport; i++) {
570                 hclge_tm_vport_tc_info_update(vport);
571
572                 vport++;
573         }
574 }
575
576 static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
577 {
578         u8 i;
579
580         for (i = 0; i < hdev->tm_info.num_tc; i++) {
581                 hdev->tm_info.tc_info[i].tc_id = i;
582                 hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
583                 hdev->tm_info.tc_info[i].pgid = 0;
584                 hdev->tm_info.tc_info[i].bw_limit =
585                         hdev->tm_info.pg_info[0].bw_limit;
586         }
587
588         for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
589                 hdev->tm_info.prio_tc[i] =
590                         (i >= hdev->tm_info.num_tc) ? 0 : i;
591
592         /* DCB is enabled if we have more than 1 TC */
593         if (hdev->tm_info.num_tc > 1)
594                 hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
595         else
596                 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
597 }
598
599 static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
600 {
601         u8 i;
602
603         for (i = 0; i < hdev->tm_info.num_pg; i++) {
604                 int k;
605
606                 hdev->tm_info.pg_dwrr[i] = i ? 0 : 100;
607
608                 hdev->tm_info.pg_info[i].pg_id = i;
609                 hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
610
611                 hdev->tm_info.pg_info[i].bw_limit = HCLGE_ETHER_MAX_RATE;
612
613                 if (i != 0)
614                         continue;
615
616                 hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
617                 for (k = 0; k < hdev->tm_info.num_tc; k++)
618                         hdev->tm_info.pg_info[i].tc_dwrr[k] = 100;
619         }
620 }
621
622 static void hclge_pfc_info_init(struct hclge_dev *hdev)
623 {
624         if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) {
625                 if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
626                         dev_warn(&hdev->pdev->dev,
627                                  "DCB is disable, but last mode is FC_PFC\n");
628
629                 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
630         } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
631                 /* fc_mode_last_time record the last fc_mode when
632                  * DCB is enabled, so that fc_mode can be set to
633                  * the correct value when DCB is disabled.
634                  */
635                 hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
636                 hdev->tm_info.fc_mode = HCLGE_FC_PFC;
637         }
638 }
639
640 static int hclge_tm_schd_info_init(struct hclge_dev *hdev)
641 {
642         if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
643             (hdev->tm_info.num_pg != 1))
644                 return -EINVAL;
645
646         hclge_tm_pg_info_init(hdev);
647
648         hclge_tm_tc_info_init(hdev);
649
650         hclge_tm_vport_info_update(hdev);
651
652         hclge_pfc_info_init(hdev);
653
654         return 0;
655 }
656
657 static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
658 {
659         int ret;
660         u32 i;
661
662         if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
663                 return 0;
664
665         for (i = 0; i < hdev->tm_info.num_pg; i++) {
666                 /* Cfg mapping */
667                 ret = hclge_tm_pg_to_pri_map_cfg(
668                         hdev, i, hdev->tm_info.pg_info[i].tc_bit_map);
669                 if (ret)
670                         return ret;
671         }
672
673         return 0;
674 }
675
676 static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
677 {
678         u8 ir_u, ir_b, ir_s;
679         int ret;
680         u32 i;
681
682         /* Cfg pg schd */
683         if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
684                 return 0;
685
686         /* Pg to pri */
687         for (i = 0; i < hdev->tm_info.num_pg; i++) {
688                 /* Calc shaper para */
689                 ret = hclge_shaper_para_calc(
690                                         hdev->tm_info.pg_info[i].bw_limit,
691                                         HCLGE_SHAPER_LVL_PG,
692                                         &ir_b, &ir_u, &ir_s);
693                 if (ret)
694                         return ret;
695
696                 ret = hclge_tm_pg_shapping_cfg(hdev,
697                                                HCLGE_TM_SHAP_C_BUCKET, i,
698                                                0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
699                                                HCLGE_SHAPER_BS_S_DEF);
700                 if (ret)
701                         return ret;
702
703                 ret = hclge_tm_pg_shapping_cfg(hdev,
704                                                HCLGE_TM_SHAP_P_BUCKET, i,
705                                                ir_b, ir_u, ir_s,
706                                                HCLGE_SHAPER_BS_U_DEF,
707                                                HCLGE_SHAPER_BS_S_DEF);
708                 if (ret)
709                         return ret;
710         }
711
712         return 0;
713 }
714
715 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
716 {
717         int ret;
718         u32 i;
719
720         /* cfg pg schd */
721         if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
722                 return 0;
723
724         /* pg to prio */
725         for (i = 0; i < hdev->tm_info.num_pg; i++) {
726                 /* Cfg dwrr */
727                 ret = hclge_tm_pg_weight_cfg(hdev, i,
728                                              hdev->tm_info.pg_dwrr[i]);
729                 if (ret)
730                         return ret;
731         }
732
733         return 0;
734 }
735
736 static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
737                                    struct hclge_vport *vport)
738 {
739         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
740         struct hnae3_queue **tqp = kinfo->tqp;
741         struct hnae3_tc_info *v_tc_info;
742         u32 i, j;
743         int ret;
744
745         for (i = 0; i < kinfo->num_tc; i++) {
746                 v_tc_info = &kinfo->tc_info[i];
747                 for (j = 0; j < v_tc_info->tqp_count; j++) {
748                         struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j];
749
750                         ret = hclge_tm_q_to_qs_map_cfg(hdev,
751                                                        hclge_get_queue_id(q),
752                                                        vport->qs_offset + i);
753                         if (ret)
754                                 return ret;
755                 }
756         }
757
758         return 0;
759 }
760
761 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
762 {
763         struct hclge_vport *vport = hdev->vport;
764         int ret;
765         u32 i, k;
766
767         if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
768                 /* Cfg qs -> pri mapping, one by one mapping */
769                 for (k = 0; k < hdev->num_alloc_vport; k++)
770                         for (i = 0; i < hdev->tm_info.num_tc; i++) {
771                                 ret = hclge_tm_qs_to_pri_map_cfg(
772                                         hdev, vport[k].qs_offset + i, i);
773                                 if (ret)
774                                         return ret;
775                         }
776         } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) {
777                 /* Cfg qs -> pri mapping,  qs = tc, pri = vf, 8 qs -> 1 pri */
778                 for (k = 0; k < hdev->num_alloc_vport; k++)
779                         for (i = 0; i < HNAE3_MAX_TC; i++) {
780                                 ret = hclge_tm_qs_to_pri_map_cfg(
781                                         hdev, vport[k].qs_offset + i, k);
782                                 if (ret)
783                                         return ret;
784                         }
785         } else {
786                 return -EINVAL;
787         }
788
789         /* Cfg q -> qs mapping */
790         for (i = 0; i < hdev->num_alloc_vport; i++) {
791                 ret = hclge_vport_q_to_qs_map(hdev, vport);
792                 if (ret)
793                         return ret;
794
795                 vport++;
796         }
797
798         return 0;
799 }
800
801 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
802 {
803         u8 ir_u, ir_b, ir_s;
804         int ret;
805         u32 i;
806
807         for (i = 0; i < hdev->tm_info.num_tc; i++) {
808                 ret = hclge_shaper_para_calc(
809                                         hdev->tm_info.tc_info[i].bw_limit,
810                                         HCLGE_SHAPER_LVL_PRI,
811                                         &ir_b, &ir_u, &ir_s);
812                 if (ret)
813                         return ret;
814
815                 ret = hclge_tm_pri_shapping_cfg(
816                         hdev, HCLGE_TM_SHAP_C_BUCKET, i,
817                         0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
818                         HCLGE_SHAPER_BS_S_DEF);
819                 if (ret)
820                         return ret;
821
822                 ret = hclge_tm_pri_shapping_cfg(
823                         hdev, HCLGE_TM_SHAP_P_BUCKET, i,
824                         ir_b, ir_u, ir_s, HCLGE_SHAPER_BS_U_DEF,
825                         HCLGE_SHAPER_BS_S_DEF);
826                 if (ret)
827                         return ret;
828         }
829
830         return 0;
831 }
832
833 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
834 {
835         struct hclge_dev *hdev = vport->back;
836         u8 ir_u, ir_b, ir_s;
837         int ret;
838
839         ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
840                                      &ir_b, &ir_u, &ir_s);
841         if (ret)
842                 return ret;
843
844         ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
845                                         vport->vport_id,
846                                         0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
847                                         HCLGE_SHAPER_BS_S_DEF);
848         if (ret)
849                 return ret;
850
851         ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
852                                         vport->vport_id,
853                                         ir_b, ir_u, ir_s,
854                                         HCLGE_SHAPER_BS_U_DEF,
855                                         HCLGE_SHAPER_BS_S_DEF);
856         if (ret)
857                 return ret;
858
859         return 0;
860 }
861
862 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
863 {
864         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
865         struct hclge_dev *hdev = vport->back;
866         u8 ir_u, ir_b, ir_s;
867         u32 i;
868         int ret;
869
870         for (i = 0; i < kinfo->num_tc; i++) {
871                 ret = hclge_shaper_para_calc(
872                                         hdev->tm_info.tc_info[i].bw_limit,
873                                         HCLGE_SHAPER_LVL_QSET,
874                                         &ir_b, &ir_u, &ir_s);
875                 if (ret)
876                         return ret;
877         }
878
879         return 0;
880 }
881
882 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev)
883 {
884         struct hclge_vport *vport = hdev->vport;
885         int ret;
886         u32 i;
887
888         /* Need config vport shaper */
889         for (i = 0; i < hdev->num_alloc_vport; i++) {
890                 ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport);
891                 if (ret)
892                         return ret;
893
894                 ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport);
895                 if (ret)
896                         return ret;
897
898                 vport++;
899         }
900
901         return 0;
902 }
903
904 static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev)
905 {
906         int ret;
907
908         if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
909                 ret = hclge_tm_pri_tc_base_shaper_cfg(hdev);
910                 if (ret)
911                         return ret;
912         } else {
913                 ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev);
914                 if (ret)
915                         return ret;
916         }
917
918         return 0;
919 }
920
921 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
922 {
923         struct hclge_vport *vport = hdev->vport;
924         struct hclge_pg_info *pg_info;
925         u8 dwrr;
926         int ret;
927         u32 i, k;
928
929         for (i = 0; i < hdev->tm_info.num_tc; i++) {
930                 pg_info =
931                         &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
932                 dwrr = pg_info->tc_dwrr[i];
933
934                 ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr);
935                 if (ret)
936                         return ret;
937
938                 for (k = 0; k < hdev->num_alloc_vport; k++) {
939                         ret = hclge_tm_qs_weight_cfg(
940                                 hdev, vport[k].qs_offset + i,
941                                 vport[k].dwrr);
942                         if (ret)
943                                 return ret;
944                 }
945         }
946
947         return 0;
948 }
949
950 static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev)
951 {
952 #define DEFAULT_TC_WEIGHT       1
953 #define DEFAULT_TC_OFFSET       14
954
955         struct hclge_ets_tc_weight_cmd *ets_weight;
956         struct hclge_desc desc;
957         int i;
958
959         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false);
960         ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
961
962         for (i = 0; i < HNAE3_MAX_TC; i++) {
963                 struct hclge_pg_info *pg_info;
964
965                 ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT;
966
967                 if (!(hdev->hw_tc_map & BIT(i)))
968                         continue;
969
970                 pg_info =
971                         &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
972                 ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
973         }
974
975         ets_weight->weight_offset = DEFAULT_TC_OFFSET;
976
977         return hclge_cmd_send(&hdev->hw, &desc, 1);
978 }
979
980 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
981 {
982         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
983         struct hclge_dev *hdev = vport->back;
984         int ret;
985         u8 i;
986
987         /* Vf dwrr */
988         ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr);
989         if (ret)
990                 return ret;
991
992         /* Qset dwrr */
993         for (i = 0; i < kinfo->num_tc; i++) {
994                 ret = hclge_tm_qs_weight_cfg(
995                         hdev, vport->qs_offset + i,
996                         hdev->tm_info.pg_info[0].tc_dwrr[i]);
997                 if (ret)
998                         return ret;
999         }
1000
1001         return 0;
1002 }
1003
1004 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev)
1005 {
1006         struct hclge_vport *vport = hdev->vport;
1007         int ret;
1008         u32 i;
1009
1010         for (i = 0; i < hdev->num_alloc_vport; i++) {
1011                 ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport);
1012                 if (ret)
1013                         return ret;
1014
1015                 vport++;
1016         }
1017
1018         return 0;
1019 }
1020
1021 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
1022 {
1023         int ret;
1024
1025         if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1026                 ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev);
1027                 if (ret)
1028                         return ret;
1029
1030                 if (!hnae3_dev_dcb_supported(hdev))
1031                         return 0;
1032
1033                 ret = hclge_tm_ets_tc_dwrr_cfg(hdev);
1034                 if (ret == -EOPNOTSUPP) {
1035                         dev_warn(&hdev->pdev->dev,
1036                                  "fw %08x does't support ets tc weight cmd\n",
1037                                  hdev->fw_version);
1038                         ret = 0;
1039                 }
1040
1041                 return ret;
1042         } else {
1043                 ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev);
1044                 if (ret)
1045                         return ret;
1046         }
1047
1048         return 0;
1049 }
1050
1051 static int hclge_tm_map_cfg(struct hclge_dev *hdev)
1052 {
1053         int ret;
1054
1055         ret = hclge_up_to_tc_map(hdev);
1056         if (ret)
1057                 return ret;
1058
1059         ret = hclge_tm_pg_to_pri_map(hdev);
1060         if (ret)
1061                 return ret;
1062
1063         return hclge_tm_pri_q_qs_cfg(hdev);
1064 }
1065
1066 static int hclge_tm_shaper_cfg(struct hclge_dev *hdev)
1067 {
1068         int ret;
1069
1070         ret = hclge_tm_port_shaper_cfg(hdev);
1071         if (ret)
1072                 return ret;
1073
1074         ret = hclge_tm_pg_shaper_cfg(hdev);
1075         if (ret)
1076                 return ret;
1077
1078         return hclge_tm_pri_shaper_cfg(hdev);
1079 }
1080
1081 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev)
1082 {
1083         int ret;
1084
1085         ret = hclge_tm_pg_dwrr_cfg(hdev);
1086         if (ret)
1087                 return ret;
1088
1089         return hclge_tm_pri_dwrr_cfg(hdev);
1090 }
1091
1092 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
1093 {
1094         int ret;
1095         u8 i;
1096
1097         /* Only being config on TC-Based scheduler mode */
1098         if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
1099                 return 0;
1100
1101         for (i = 0; i < hdev->tm_info.num_pg; i++) {
1102                 ret = hclge_tm_pg_schd_mode_cfg(hdev, i);
1103                 if (ret)
1104                         return ret;
1105         }
1106
1107         return 0;
1108 }
1109
1110 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
1111 {
1112         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1113         struct hclge_dev *hdev = vport->back;
1114         int ret;
1115         u8 i;
1116
1117         ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
1118         if (ret)
1119                 return ret;
1120
1121         for (i = 0; i < kinfo->num_tc; i++) {
1122                 u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode;
1123
1124                 ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i,
1125                                                 sch_mode);
1126                 if (ret)
1127                         return ret;
1128         }
1129
1130         return 0;
1131 }
1132
1133 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
1134 {
1135         struct hclge_vport *vport = hdev->vport;
1136         int ret;
1137         u8 i, k;
1138
1139         if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1140                 for (i = 0; i < hdev->tm_info.num_tc; i++) {
1141                         ret = hclge_tm_pri_schd_mode_cfg(hdev, i);
1142                         if (ret)
1143                                 return ret;
1144
1145                         for (k = 0; k < hdev->num_alloc_vport; k++) {
1146                                 ret = hclge_tm_qs_schd_mode_cfg(
1147                                         hdev, vport[k].qs_offset + i,
1148                                         HCLGE_SCH_MODE_DWRR);
1149                                 if (ret)
1150                                         return ret;
1151                         }
1152                 }
1153         } else {
1154                 for (i = 0; i < hdev->num_alloc_vport; i++) {
1155                         ret = hclge_tm_schd_mode_vnet_base_cfg(vport);
1156                         if (ret)
1157                                 return ret;
1158
1159                         vport++;
1160                 }
1161         }
1162
1163         return 0;
1164 }
1165
1166 static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
1167 {
1168         int ret;
1169
1170         ret = hclge_tm_lvl2_schd_mode_cfg(hdev);
1171         if (ret)
1172                 return ret;
1173
1174         return hclge_tm_lvl34_schd_mode_cfg(hdev);
1175 }
1176
1177 int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
1178 {
1179         int ret;
1180
1181         /* Cfg tm mapping  */
1182         ret = hclge_tm_map_cfg(hdev);
1183         if (ret)
1184                 return ret;
1185
1186         /* Cfg tm shaper */
1187         ret = hclge_tm_shaper_cfg(hdev);
1188         if (ret)
1189                 return ret;
1190
1191         /* Cfg dwrr */
1192         ret = hclge_tm_dwrr_cfg(hdev);
1193         if (ret)
1194                 return ret;
1195
1196         /* Cfg schd mode for each level schd */
1197         return hclge_tm_schd_mode_hw(hdev);
1198 }
1199
1200 static int hclge_pause_param_setup_hw(struct hclge_dev *hdev)
1201 {
1202         struct hclge_mac *mac = &hdev->hw.mac;
1203
1204         return hclge_pause_param_cfg(hdev, mac->mac_addr,
1205                                          HCLGE_DEFAULT_PAUSE_TRANS_GAP,
1206                                          HCLGE_DEFAULT_PAUSE_TRANS_TIME);
1207 }
1208
1209 static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
1210 {
1211         u8 enable_bitmap = 0;
1212
1213         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
1214                 enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK |
1215                                 HCLGE_RX_MAC_PAUSE_EN_MSK;
1216
1217         return hclge_pfc_pause_en_cfg(hdev, enable_bitmap,
1218                                       hdev->tm_info.pfc_en);
1219 }
1220
1221 /* Each Tc has a 1024 queue sets to backpress, it divides to
1222  * 32 group, each group contains 32 queue sets, which can be
1223  * represented by u32 bitmap.
1224  */
1225 static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
1226 {
1227         int i;
1228
1229         for (i = 0; i < HCLGE_BP_GRP_NUM; i++) {
1230                 u32 qs_bitmap = 0;
1231                 int k, ret;
1232
1233                 for (k = 0; k < hdev->num_alloc_vport; k++) {
1234                         struct hclge_vport *vport = &hdev->vport[k];
1235                         u16 qs_id = vport->qs_offset + tc;
1236                         u8 grp, sub_grp;
1237
1238                         grp = hnae3_get_field(qs_id, HCLGE_BP_GRP_ID_M,
1239                                               HCLGE_BP_GRP_ID_S);
1240                         sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
1241                                                   HCLGE_BP_SUB_GRP_ID_S);
1242                         if (i == grp)
1243                                 qs_bitmap |= (1 << sub_grp);
1244                 }
1245
1246                 ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);
1247                 if (ret)
1248                         return ret;
1249         }
1250
1251         return 0;
1252 }
1253
1254 static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
1255 {
1256         bool tx_en, rx_en;
1257
1258         switch (hdev->tm_info.fc_mode) {
1259         case HCLGE_FC_NONE:
1260                 tx_en = false;
1261                 rx_en = false;
1262                 break;
1263         case HCLGE_FC_RX_PAUSE:
1264                 tx_en = false;
1265                 rx_en = true;
1266                 break;
1267         case HCLGE_FC_TX_PAUSE:
1268                 tx_en = true;
1269                 rx_en = false;
1270                 break;
1271         case HCLGE_FC_FULL:
1272                 tx_en = true;
1273                 rx_en = true;
1274                 break;
1275         case HCLGE_FC_PFC:
1276                 tx_en = false;
1277                 rx_en = false;
1278                 break;
1279         default:
1280                 tx_en = true;
1281                 rx_en = true;
1282         }
1283
1284         return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
1285 }
1286
1287 static int hclge_tm_bp_setup(struct hclge_dev *hdev)
1288 {
1289         int ret = 0;
1290         int i;
1291
1292         for (i = 0; i < hdev->tm_info.num_tc; i++) {
1293                 ret = hclge_bp_setup_hw(hdev, i);
1294                 if (ret)
1295                         return ret;
1296         }
1297
1298         return ret;
1299 }
1300
1301 int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
1302 {
1303         int ret;
1304
1305         ret = hclge_pause_param_setup_hw(hdev);
1306         if (ret)
1307                 return ret;
1308
1309         ret = hclge_mac_pause_setup_hw(hdev);
1310         if (ret)
1311                 return ret;
1312
1313         /* Only DCB-supported dev supports qset back pressure and pfc cmd */
1314         if (!hnae3_dev_dcb_supported(hdev))
1315                 return 0;
1316
1317         /* GE MAC does not support PFC, when driver is initializing and MAC
1318          * is in GE Mode, ignore the error here, otherwise initialization
1319          * will fail.
1320          */
1321         ret = hclge_pfc_setup_hw(hdev);
1322         if (init && ret == -EOPNOTSUPP)
1323                 dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n");
1324         else
1325                 return ret;
1326
1327         return hclge_tm_bp_setup(hdev);
1328 }
1329
1330 void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
1331 {
1332         struct hclge_vport *vport = hdev->vport;
1333         struct hnae3_knic_private_info *kinfo;
1334         u32 i, k;
1335
1336         for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
1337                 hdev->tm_info.prio_tc[i] = prio_tc[i];
1338
1339                 for (k = 0;  k < hdev->num_alloc_vport; k++) {
1340                         kinfo = &vport[k].nic.kinfo;
1341                         kinfo->prio_tc[i] = prio_tc[i];
1342                 }
1343         }
1344 }
1345
1346 void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
1347 {
1348         u8 i, bit_map = 0;
1349
1350         hdev->tm_info.num_tc = num_tc;
1351
1352         for (i = 0; i < hdev->tm_info.num_tc; i++)
1353                 bit_map |= BIT(i);
1354
1355         if (!bit_map) {
1356                 bit_map = 1;
1357                 hdev->tm_info.num_tc = 1;
1358         }
1359
1360         hdev->hw_tc_map = bit_map;
1361
1362         hclge_tm_schd_info_init(hdev);
1363 }
1364
1365 int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
1366 {
1367         int ret;
1368
1369         if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
1370             (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE))
1371                 return -ENOTSUPP;
1372
1373         ret = hclge_tm_schd_setup_hw(hdev);
1374         if (ret)
1375                 return ret;
1376
1377         ret = hclge_pause_setup_hw(hdev, init);
1378         if (ret)
1379                 return ret;
1380
1381         return 0;
1382 }
1383
1384 int hclge_tm_schd_init(struct hclge_dev *hdev)
1385 {
1386         int ret;
1387
1388         /* fc_mode is HCLGE_FC_FULL on reset */
1389         hdev->tm_info.fc_mode = HCLGE_FC_FULL;
1390         hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
1391
1392         ret = hclge_tm_schd_info_init(hdev);
1393         if (ret)
1394                 return ret;
1395
1396         return hclge_tm_init_hw(hdev, true);
1397 }
1398
1399 int hclge_tm_vport_map_update(struct hclge_dev *hdev)
1400 {
1401         struct hclge_vport *vport = hdev->vport;
1402         int ret;
1403
1404         hclge_tm_vport_tc_info_update(vport);
1405
1406         ret = hclge_vport_q_to_qs_map(hdev, vport);
1407         if (ret)
1408                 return ret;
1409
1410         if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE))
1411                 return 0;
1412
1413         return hclge_tm_bp_setup(hdev);
1414 }