Merge tag 'dma-mapping-5.10-2' of git://git.infradead.org/users/hch/dma-mapping
[platform/kernel/linux-rpi.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_tm.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/etherdevice.h>
5
6 #include "hclge_cmd.h"
7 #include "hclge_main.h"
8 #include "hclge_tm.h"
9
10 enum hclge_shaper_level {
11         HCLGE_SHAPER_LVL_PRI    = 0,
12         HCLGE_SHAPER_LVL_PG     = 1,
13         HCLGE_SHAPER_LVL_PORT   = 2,
14         HCLGE_SHAPER_LVL_QSET   = 3,
15         HCLGE_SHAPER_LVL_CNT    = 4,
16         HCLGE_SHAPER_LVL_VF     = 0,
17         HCLGE_SHAPER_LVL_PF     = 1,
18 };
19
20 #define HCLGE_TM_PFC_PKT_GET_CMD_NUM    3
21 #define HCLGE_TM_PFC_NUM_GET_PER_CMD    3
22
23 #define HCLGE_SHAPER_BS_U_DEF   5
24 #define HCLGE_SHAPER_BS_S_DEF   20
25
26 /* hclge_shaper_para_calc: calculate ir parameter for the shaper
27  * @ir: Rate to be config, its unit is Mbps
28  * @shaper_level: the shaper level. eg: port, pg, priority, queueset
29  * @ir_para: parameters of IR shaper
30  * @max_tm_rate: max tm rate is available to config
31  *
32  * the formula:
33  *
34  *              IR_b * (2 ^ IR_u) * 8
35  * IR(Mbps) = -------------------------  *  CLOCK(1000Mbps)
36  *              Tick * (2 ^ IR_s)
37  *
38  * @return: 0: calculate sucessful, negative: fail
39  */
40 static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
41                                   struct hclge_shaper_ir_para *ir_para,
42                                   u32 max_tm_rate)
43 {
44 #define DIVISOR_CLK             (1000 * 8)
45 #define DIVISOR_IR_B_126        (126 * DIVISOR_CLK)
46
47         static const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
48                 6 * 256,        /* Prioriy level */
49                 6 * 32,         /* Prioriy group level */
50                 6 * 8,          /* Port level */
51                 6 * 256         /* Qset level */
52         };
53         u8 ir_u_calc = 0;
54         u8 ir_s_calc = 0;
55         u32 ir_calc;
56         u32 tick;
57
58         /* Calc tick */
59         if (shaper_level >= HCLGE_SHAPER_LVL_CNT ||
60             ir > max_tm_rate)
61                 return -EINVAL;
62
63         tick = tick_array[shaper_level];
64
65         /**
66          * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
67          * the formula is changed to:
68          *              126 * 1 * 8
69          * ir_calc = ---------------- * 1000
70          *              tick * 1
71          */
72         ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
73
74         if (ir_calc == ir) {
75                 ir_para->ir_b = 126;
76                 ir_para->ir_u = 0;
77                 ir_para->ir_s = 0;
78
79                 return 0;
80         } else if (ir_calc > ir) {
81                 /* Increasing the denominator to select ir_s value */
82                 while (ir_calc >= ir && ir) {
83                         ir_s_calc++;
84                         ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
85                 }
86
87                 ir_para->ir_b = (ir * tick * (1 << ir_s_calc) +
88                                 (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
89         } else {
90                 /* Increasing the numerator to select ir_u value */
91                 u32 numerator;
92
93                 while (ir_calc < ir) {
94                         ir_u_calc++;
95                         numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc);
96                         ir_calc = (numerator + (tick >> 1)) / tick;
97                 }
98
99                 if (ir_calc == ir) {
100                         ir_para->ir_b = 126;
101                 } else {
102                         u32 denominator = DIVISOR_CLK * (1 << --ir_u_calc);
103                         ir_para->ir_b = (ir * tick + (denominator >> 1)) /
104                                         denominator;
105                 }
106         }
107
108         ir_para->ir_u = ir_u_calc;
109         ir_para->ir_s = ir_s_calc;
110
111         return 0;
112 }
113
114 static int hclge_pfc_stats_get(struct hclge_dev *hdev,
115                                enum hclge_opcode_type opcode, u64 *stats)
116 {
117         struct hclge_desc desc[HCLGE_TM_PFC_PKT_GET_CMD_NUM];
118         int ret, i, j;
119
120         if (!(opcode == HCLGE_OPC_QUERY_PFC_RX_PKT_CNT ||
121               opcode == HCLGE_OPC_QUERY_PFC_TX_PKT_CNT))
122                 return -EINVAL;
123
124         for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1; i++) {
125                 hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
126                 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
127         }
128
129         hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
130
131         ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM);
132         if (ret)
133                 return ret;
134
135         for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) {
136                 struct hclge_pfc_stats_cmd *pfc_stats =
137                                 (struct hclge_pfc_stats_cmd *)desc[i].data;
138
139                 for (j = 0; j < HCLGE_TM_PFC_NUM_GET_PER_CMD; j++) {
140                         u32 index = i * HCLGE_TM_PFC_PKT_GET_CMD_NUM + j;
141
142                         if (index < HCLGE_MAX_TC_NUM)
143                                 stats[index] =
144                                         le64_to_cpu(pfc_stats->pkt_num[j]);
145                 }
146         }
147         return 0;
148 }
149
150 int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats)
151 {
152         return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_RX_PKT_CNT, stats);
153 }
154
155 int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats)
156 {
157         return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_TX_PKT_CNT, stats);
158 }
159
160 int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
161 {
162         struct hclge_desc desc;
163
164         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false);
165
166         desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) |
167                 (rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0));
168
169         return hclge_cmd_send(&hdev->hw, &desc, 1);
170 }
171
172 static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
173                                   u8 pfc_bitmap)
174 {
175         struct hclge_desc desc;
176         struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data;
177
178         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false);
179
180         pfc->tx_rx_en_bitmap = tx_rx_bitmap;
181         pfc->pri_en_bitmap = pfc_bitmap;
182
183         return hclge_cmd_send(&hdev->hw, &desc, 1);
184 }
185
186 static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
187                                  u8 pause_trans_gap, u16 pause_trans_time)
188 {
189         struct hclge_cfg_pause_param_cmd *pause_param;
190         struct hclge_desc desc;
191
192         pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
193
194         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
195
196         ether_addr_copy(pause_param->mac_addr, addr);
197         ether_addr_copy(pause_param->mac_addr_extra, addr);
198         pause_param->pause_trans_gap = pause_trans_gap;
199         pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
200
201         return hclge_cmd_send(&hdev->hw, &desc, 1);
202 }
203
204 int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
205 {
206         struct hclge_cfg_pause_param_cmd *pause_param;
207         struct hclge_desc desc;
208         u16 trans_time;
209         u8 trans_gap;
210         int ret;
211
212         pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
213
214         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
215
216         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
217         if (ret)
218                 return ret;
219
220         trans_gap = pause_param->pause_trans_gap;
221         trans_time = le16_to_cpu(pause_param->pause_trans_time);
222
223         return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time);
224 }
225
226 static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
227 {
228         u8 tc;
229
230         tc = hdev->tm_info.prio_tc[pri_id];
231
232         if (tc >= hdev->tm_info.num_tc)
233                 return -EINVAL;
234
235         /**
236          * the register for priority has four bytes, the first bytes includes
237          *  priority0 and priority1, the higher 4bit stands for priority1
238          *  while the lower 4bit stands for priority0, as below:
239          * first byte:  | pri_1 | pri_0 |
240          * second byte: | pri_3 | pri_2 |
241          * third byte:  | pri_5 | pri_4 |
242          * fourth byte: | pri_7 | pri_6 |
243          */
244         pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4);
245
246         return 0;
247 }
248
249 static int hclge_up_to_tc_map(struct hclge_dev *hdev)
250 {
251         struct hclge_desc desc;
252         u8 *pri = (u8 *)desc.data;
253         u8 pri_id;
254         int ret;
255
256         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
257
258         for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
259                 ret = hclge_fill_pri_array(hdev, pri, pri_id);
260                 if (ret)
261                         return ret;
262         }
263
264         return hclge_cmd_send(&hdev->hw, &desc, 1);
265 }
266
267 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
268                                       u8 pg_id, u8 pri_bit_map)
269 {
270         struct hclge_pg_to_pri_link_cmd *map;
271         struct hclge_desc desc;
272
273         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false);
274
275         map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
276
277         map->pg_id = pg_id;
278         map->pri_bit_map = pri_bit_map;
279
280         return hclge_cmd_send(&hdev->hw, &desc, 1);
281 }
282
283 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
284                                       u16 qs_id, u8 pri)
285 {
286         struct hclge_qs_to_pri_link_cmd *map;
287         struct hclge_desc desc;
288
289         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false);
290
291         map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
292
293         map->qs_id = cpu_to_le16(qs_id);
294         map->priority = pri;
295         map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK;
296
297         return hclge_cmd_send(&hdev->hw, &desc, 1);
298 }
299
300 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
301                                     u16 q_id, u16 qs_id)
302 {
303         struct hclge_nq_to_qs_link_cmd *map;
304         struct hclge_desc desc;
305
306         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false);
307
308         map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
309
310         map->nq_id = cpu_to_le16(q_id);
311         map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK);
312
313         return hclge_cmd_send(&hdev->hw, &desc, 1);
314 }
315
316 static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id,
317                                   u8 dwrr)
318 {
319         struct hclge_pg_weight_cmd *weight;
320         struct hclge_desc desc;
321
322         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false);
323
324         weight = (struct hclge_pg_weight_cmd *)desc.data;
325
326         weight->pg_id = pg_id;
327         weight->dwrr = dwrr;
328
329         return hclge_cmd_send(&hdev->hw, &desc, 1);
330 }
331
332 static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id,
333                                    u8 dwrr)
334 {
335         struct hclge_priority_weight_cmd *weight;
336         struct hclge_desc desc;
337
338         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false);
339
340         weight = (struct hclge_priority_weight_cmd *)desc.data;
341
342         weight->pri_id = pri_id;
343         weight->dwrr = dwrr;
344
345         return hclge_cmd_send(&hdev->hw, &desc, 1);
346 }
347
348 static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
349                                   u8 dwrr)
350 {
351         struct hclge_qs_weight_cmd *weight;
352         struct hclge_desc desc;
353
354         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false);
355
356         weight = (struct hclge_qs_weight_cmd *)desc.data;
357
358         weight->qs_id = cpu_to_le16(qs_id);
359         weight->dwrr = dwrr;
360
361         return hclge_cmd_send(&hdev->hw, &desc, 1);
362 }
363
364 static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s,
365                                       u8 bs_b, u8 bs_s)
366 {
367         u32 shapping_para = 0;
368
369         hclge_tm_set_field(shapping_para, IR_B, ir_b);
370         hclge_tm_set_field(shapping_para, IR_U, ir_u);
371         hclge_tm_set_field(shapping_para, IR_S, ir_s);
372         hclge_tm_set_field(shapping_para, BS_B, bs_b);
373         hclge_tm_set_field(shapping_para, BS_S, bs_s);
374
375         return shapping_para;
376 }
377
378 static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
379                                     enum hclge_shap_bucket bucket, u8 pg_id,
380                                     u32 shapping_para)
381 {
382         struct hclge_pg_shapping_cmd *shap_cfg_cmd;
383         enum hclge_opcode_type opcode;
384         struct hclge_desc desc;
385
386         opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
387                  HCLGE_OPC_TM_PG_C_SHAPPING;
388         hclge_cmd_setup_basic_desc(&desc, opcode, false);
389
390         shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
391
392         shap_cfg_cmd->pg_id = pg_id;
393
394         shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para);
395
396         return hclge_cmd_send(&hdev->hw, &desc, 1);
397 }
398
399 static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
400 {
401         struct hclge_port_shapping_cmd *shap_cfg_cmd;
402         struct hclge_shaper_ir_para ir_para;
403         struct hclge_desc desc;
404         u32 shapping_para;
405         int ret;
406
407         ret = hclge_shaper_para_calc(hdev->hw.mac.speed, HCLGE_SHAPER_LVL_PORT,
408                                      &ir_para,
409                                      hdev->ae_dev->dev_specs.max_tm_rate);
410         if (ret)
411                 return ret;
412
413         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
414         shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
415
416         shapping_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
417                                                    ir_para.ir_s,
418                                                    HCLGE_SHAPER_BS_U_DEF,
419                                                    HCLGE_SHAPER_BS_S_DEF);
420
421         shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para);
422
423         return hclge_cmd_send(&hdev->hw, &desc, 1);
424 }
425
426 static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
427                                      enum hclge_shap_bucket bucket, u8 pri_id,
428                                      u32 shapping_para)
429 {
430         struct hclge_pri_shapping_cmd *shap_cfg_cmd;
431         enum hclge_opcode_type opcode;
432         struct hclge_desc desc;
433
434         opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
435                  HCLGE_OPC_TM_PRI_C_SHAPPING;
436
437         hclge_cmd_setup_basic_desc(&desc, opcode, false);
438
439         shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
440
441         shap_cfg_cmd->pri_id = pri_id;
442
443         shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para);
444
445         return hclge_cmd_send(&hdev->hw, &desc, 1);
446 }
447
448 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id)
449 {
450         struct hclge_desc desc;
451
452         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false);
453
454         if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR)
455                 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
456         else
457                 desc.data[1] = 0;
458
459         desc.data[0] = cpu_to_le32(pg_id);
460
461         return hclge_cmd_send(&hdev->hw, &desc, 1);
462 }
463
464 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id)
465 {
466         struct hclge_desc desc;
467
468         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false);
469
470         if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
471                 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
472         else
473                 desc.data[1] = 0;
474
475         desc.data[0] = cpu_to_le32(pri_id);
476
477         return hclge_cmd_send(&hdev->hw, &desc, 1);
478 }
479
480 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode)
481 {
482         struct hclge_desc desc;
483
484         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false);
485
486         if (mode == HCLGE_SCH_MODE_DWRR)
487                 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
488         else
489                 desc.data[1] = 0;
490
491         desc.data[0] = cpu_to_le32(qs_id);
492
493         return hclge_cmd_send(&hdev->hw, &desc, 1);
494 }
495
496 static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id,
497                               u32 bit_map)
498 {
499         struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
500         struct hclge_desc desc;
501
502         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
503                                    false);
504
505         bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
506
507         bp_to_qs_map_cmd->tc_id = tc;
508         bp_to_qs_map_cmd->qs_group_id = grp_id;
509         bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map);
510
511         return hclge_cmd_send(&hdev->hw, &desc, 1);
512 }
513
514 int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
515 {
516         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
517         struct hclge_qs_shapping_cmd *shap_cfg_cmd;
518         struct hclge_shaper_ir_para ir_para;
519         struct hclge_dev *hdev = vport->back;
520         struct hclge_desc desc;
521         u32 shaper_para;
522         int ret, i;
523
524         if (!max_tx_rate)
525                 max_tx_rate = hdev->ae_dev->dev_specs.max_tm_rate;
526
527         ret = hclge_shaper_para_calc(max_tx_rate, HCLGE_SHAPER_LVL_QSET,
528                                      &ir_para,
529                                      hdev->ae_dev->dev_specs.max_tm_rate);
530         if (ret)
531                 return ret;
532
533         shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
534                                                  ir_para.ir_s,
535                                                  HCLGE_SHAPER_BS_U_DEF,
536                                                  HCLGE_SHAPER_BS_S_DEF);
537
538         for (i = 0; i < kinfo->num_tc; i++) {
539                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG,
540                                            false);
541
542                 shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
543                 shap_cfg_cmd->qs_id = cpu_to_le16(vport->qs_offset + i);
544                 shap_cfg_cmd->qs_shapping_para = cpu_to_le32(shaper_para);
545
546                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
547                 if (ret) {
548                         dev_err(&hdev->pdev->dev,
549                                 "vf%u, qs%u failed to set tx_rate:%d, ret=%d\n",
550                                 vport->vport_id, shap_cfg_cmd->qs_id,
551                                 max_tx_rate, ret);
552                         return ret;
553                 }
554         }
555
556         return 0;
557 }
558
559 static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
560 {
561         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
562         struct hclge_dev *hdev = vport->back;
563         u16 max_rss_size;
564         u8 i;
565
566         /* TC configuration is shared by PF/VF in one port, only allow
567          * one tc for VF for simplicity. VF's vport_id is non zero.
568          */
569         kinfo->num_tc = vport->vport_id ? 1 :
570                         min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
571         vport->qs_offset = (vport->vport_id ? HNAE3_MAX_TC : 0) +
572                                 (vport->vport_id ? (vport->vport_id - 1) : 0);
573
574         max_rss_size = min_t(u16, hdev->rss_size_max,
575                              vport->alloc_tqps / kinfo->num_tc);
576
577         /* Set to user value, no larger than max_rss_size. */
578         if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
579             kinfo->req_rss_size <= max_rss_size) {
580                 dev_info(&hdev->pdev->dev, "rss changes from %u to %u\n",
581                          kinfo->rss_size, kinfo->req_rss_size);
582                 kinfo->rss_size = kinfo->req_rss_size;
583         } else if (kinfo->rss_size > max_rss_size ||
584                    (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
585                 /* if user not set rss, the rss_size should compare with the
586                  * valid msi numbers to ensure one to one map between tqp and
587                  * irq as default.
588                  */
589                 if (!kinfo->req_rss_size)
590                         max_rss_size = min_t(u16, max_rss_size,
591                                              (hdev->num_nic_msi - 1) /
592                                              kinfo->num_tc);
593
594                 /* Set to the maximum specification value (max_rss_size). */
595                 kinfo->rss_size = max_rss_size;
596         }
597
598         kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size;
599         vport->dwrr = 100;  /* 100 percent as init */
600         vport->alloc_rss_size = kinfo->rss_size;
601         vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
602
603         for (i = 0; i < HNAE3_MAX_TC; i++) {
604                 if (hdev->hw_tc_map & BIT(i) && i < kinfo->num_tc) {
605                         kinfo->tc_info[i].enable = true;
606                         kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
607                         kinfo->tc_info[i].tqp_count = kinfo->rss_size;
608                         kinfo->tc_info[i].tc = i;
609                 } else {
610                         /* Set to default queue if TC is disable */
611                         kinfo->tc_info[i].enable = false;
612                         kinfo->tc_info[i].tqp_offset = 0;
613                         kinfo->tc_info[i].tqp_count = 1;
614                         kinfo->tc_info[i].tc = 0;
615                 }
616         }
617
618         memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc,
619                sizeof_field(struct hnae3_knic_private_info, prio_tc));
620 }
621
622 static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
623 {
624         struct hclge_vport *vport = hdev->vport;
625         u32 i;
626
627         for (i = 0; i < hdev->num_alloc_vport; i++) {
628                 hclge_tm_vport_tc_info_update(vport);
629
630                 vport++;
631         }
632 }
633
634 static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
635 {
636         u8 i;
637
638         for (i = 0; i < hdev->tm_info.num_tc; i++) {
639                 hdev->tm_info.tc_info[i].tc_id = i;
640                 hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
641                 hdev->tm_info.tc_info[i].pgid = 0;
642                 hdev->tm_info.tc_info[i].bw_limit =
643                         hdev->tm_info.pg_info[0].bw_limit;
644         }
645
646         for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
647                 hdev->tm_info.prio_tc[i] =
648                         (i >= hdev->tm_info.num_tc) ? 0 : i;
649
650         /* DCB is enabled if we have more than 1 TC or pfc_en is
651          * non-zero.
652          */
653         if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
654                 hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
655         else
656                 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
657 }
658
659 static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
660 {
661 #define BW_PERCENT      100
662
663         u8 i;
664
665         for (i = 0; i < hdev->tm_info.num_pg; i++) {
666                 int k;
667
668                 hdev->tm_info.pg_dwrr[i] = i ? 0 : BW_PERCENT;
669
670                 hdev->tm_info.pg_info[i].pg_id = i;
671                 hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
672
673                 hdev->tm_info.pg_info[i].bw_limit =
674                                         hdev->ae_dev->dev_specs.max_tm_rate;
675
676                 if (i != 0)
677                         continue;
678
679                 hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
680                 for (k = 0; k < hdev->tm_info.num_tc; k++)
681                         hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
682         }
683 }
684
685 static void hclge_pfc_info_init(struct hclge_dev *hdev)
686 {
687         if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) {
688                 if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
689                         dev_warn(&hdev->pdev->dev,
690                                  "DCB is disable, but last mode is FC_PFC\n");
691
692                 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
693         } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
694                 /* fc_mode_last_time record the last fc_mode when
695                  * DCB is enabled, so that fc_mode can be set to
696                  * the correct value when DCB is disabled.
697                  */
698                 hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
699                 hdev->tm_info.fc_mode = HCLGE_FC_PFC;
700         }
701 }
702
703 static void hclge_tm_schd_info_init(struct hclge_dev *hdev)
704 {
705         hclge_tm_pg_info_init(hdev);
706
707         hclge_tm_tc_info_init(hdev);
708
709         hclge_tm_vport_info_update(hdev);
710
711         hclge_pfc_info_init(hdev);
712 }
713
714 static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
715 {
716         int ret;
717         u32 i;
718
719         if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
720                 return 0;
721
722         for (i = 0; i < hdev->tm_info.num_pg; i++) {
723                 /* Cfg mapping */
724                 ret = hclge_tm_pg_to_pri_map_cfg(
725                         hdev, i, hdev->tm_info.pg_info[i].tc_bit_map);
726                 if (ret)
727                         return ret;
728         }
729
730         return 0;
731 }
732
733 static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
734 {
735         u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
736         struct hclge_shaper_ir_para ir_para;
737         u32 shaper_para;
738         int ret;
739         u32 i;
740
741         /* Cfg pg schd */
742         if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
743                 return 0;
744
745         /* Pg to pri */
746         for (i = 0; i < hdev->tm_info.num_pg; i++) {
747                 /* Calc shaper para */
748                 ret = hclge_shaper_para_calc(hdev->tm_info.pg_info[i].bw_limit,
749                                              HCLGE_SHAPER_LVL_PG,
750                                              &ir_para, max_tm_rate);
751                 if (ret)
752                         return ret;
753
754                 shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
755                                                          HCLGE_SHAPER_BS_U_DEF,
756                                                          HCLGE_SHAPER_BS_S_DEF);
757                 ret = hclge_tm_pg_shapping_cfg(hdev,
758                                                HCLGE_TM_SHAP_C_BUCKET, i,
759                                                shaper_para);
760                 if (ret)
761                         return ret;
762
763                 shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b,
764                                                          ir_para.ir_u,
765                                                          ir_para.ir_s,
766                                                          HCLGE_SHAPER_BS_U_DEF,
767                                                          HCLGE_SHAPER_BS_S_DEF);
768                 ret = hclge_tm_pg_shapping_cfg(hdev,
769                                                HCLGE_TM_SHAP_P_BUCKET, i,
770                                                shaper_para);
771                 if (ret)
772                         return ret;
773         }
774
775         return 0;
776 }
777
778 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
779 {
780         int ret;
781         u32 i;
782
783         /* cfg pg schd */
784         if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
785                 return 0;
786
787         /* pg to prio */
788         for (i = 0; i < hdev->tm_info.num_pg; i++) {
789                 /* Cfg dwrr */
790                 ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]);
791                 if (ret)
792                         return ret;
793         }
794
795         return 0;
796 }
797
798 static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
799                                    struct hclge_vport *vport)
800 {
801         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
802         struct hnae3_queue **tqp = kinfo->tqp;
803         struct hnae3_tc_info *v_tc_info;
804         u32 i, j;
805         int ret;
806
807         for (i = 0; i < kinfo->num_tc; i++) {
808                 v_tc_info = &kinfo->tc_info[i];
809                 for (j = 0; j < v_tc_info->tqp_count; j++) {
810                         struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j];
811
812                         ret = hclge_tm_q_to_qs_map_cfg(hdev,
813                                                        hclge_get_queue_id(q),
814                                                        vport->qs_offset + i);
815                         if (ret)
816                                 return ret;
817                 }
818         }
819
820         return 0;
821 }
822
823 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
824 {
825         struct hclge_vport *vport = hdev->vport;
826         int ret;
827         u32 i, k;
828
829         if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
830                 /* Cfg qs -> pri mapping, one by one mapping */
831                 for (k = 0; k < hdev->num_alloc_vport; k++) {
832                         struct hnae3_knic_private_info *kinfo =
833                                 &vport[k].nic.kinfo;
834
835                         for (i = 0; i < kinfo->num_tc; i++) {
836                                 ret = hclge_tm_qs_to_pri_map_cfg(
837                                         hdev, vport[k].qs_offset + i, i);
838                                 if (ret)
839                                         return ret;
840                         }
841                 }
842         } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) {
843                 /* Cfg qs -> pri mapping,  qs = tc, pri = vf, 8 qs -> 1 pri */
844                 for (k = 0; k < hdev->num_alloc_vport; k++)
845                         for (i = 0; i < HNAE3_MAX_TC; i++) {
846                                 ret = hclge_tm_qs_to_pri_map_cfg(
847                                         hdev, vport[k].qs_offset + i, k);
848                                 if (ret)
849                                         return ret;
850                         }
851         } else {
852                 return -EINVAL;
853         }
854
855         /* Cfg q -> qs mapping */
856         for (i = 0; i < hdev->num_alloc_vport; i++) {
857                 ret = hclge_vport_q_to_qs_map(hdev, vport);
858                 if (ret)
859                         return ret;
860
861                 vport++;
862         }
863
864         return 0;
865 }
866
867 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
868 {
869         u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
870         struct hclge_shaper_ir_para ir_para;
871         u32 shaper_para;
872         int ret;
873         u32 i;
874
875         for (i = 0; i < hdev->tm_info.num_tc; i++) {
876                 ret = hclge_shaper_para_calc(hdev->tm_info.tc_info[i].bw_limit,
877                                              HCLGE_SHAPER_LVL_PRI,
878                                              &ir_para, max_tm_rate);
879                 if (ret)
880                         return ret;
881
882                 shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
883                                                          HCLGE_SHAPER_BS_U_DEF,
884                                                          HCLGE_SHAPER_BS_S_DEF);
885                 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i,
886                                                 shaper_para);
887                 if (ret)
888                         return ret;
889
890                 shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b,
891                                                          ir_para.ir_u,
892                                                          ir_para.ir_s,
893                                                          HCLGE_SHAPER_BS_U_DEF,
894                                                          HCLGE_SHAPER_BS_S_DEF);
895                 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
896                                                 shaper_para);
897                 if (ret)
898                         return ret;
899         }
900
901         return 0;
902 }
903
904 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
905 {
906         struct hclge_dev *hdev = vport->back;
907         struct hclge_shaper_ir_para ir_para;
908         u32 shaper_para;
909         int ret;
910
911         ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
912                                      &ir_para,
913                                      hdev->ae_dev->dev_specs.max_tm_rate);
914         if (ret)
915                 return ret;
916
917         shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
918                                                  HCLGE_SHAPER_BS_U_DEF,
919                                                  HCLGE_SHAPER_BS_S_DEF);
920         ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
921                                         vport->vport_id, shaper_para);
922         if (ret)
923                 return ret;
924
925         shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
926                                                  ir_para.ir_s,
927                                                  HCLGE_SHAPER_BS_U_DEF,
928                                                  HCLGE_SHAPER_BS_S_DEF);
929         ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
930                                         vport->vport_id, shaper_para);
931         if (ret)
932                 return ret;
933
934         return 0;
935 }
936
937 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
938 {
939         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
940         struct hclge_dev *hdev = vport->back;
941         u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
942         struct hclge_shaper_ir_para ir_para;
943         u32 i;
944         int ret;
945
946         for (i = 0; i < kinfo->num_tc; i++) {
947                 ret = hclge_shaper_para_calc(hdev->tm_info.tc_info[i].bw_limit,
948                                              HCLGE_SHAPER_LVL_QSET,
949                                              &ir_para, max_tm_rate);
950                 if (ret)
951                         return ret;
952         }
953
954         return 0;
955 }
956
957 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev)
958 {
959         struct hclge_vport *vport = hdev->vport;
960         int ret;
961         u32 i;
962
963         /* Need config vport shaper */
964         for (i = 0; i < hdev->num_alloc_vport; i++) {
965                 ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport);
966                 if (ret)
967                         return ret;
968
969                 ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport);
970                 if (ret)
971                         return ret;
972
973                 vport++;
974         }
975
976         return 0;
977 }
978
979 static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev)
980 {
981         int ret;
982
983         if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
984                 ret = hclge_tm_pri_tc_base_shaper_cfg(hdev);
985                 if (ret)
986                         return ret;
987         } else {
988                 ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev);
989                 if (ret)
990                         return ret;
991         }
992
993         return 0;
994 }
995
996 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
997 {
998         struct hclge_vport *vport = hdev->vport;
999         struct hclge_pg_info *pg_info;
1000         u8 dwrr;
1001         int ret;
1002         u32 i, k;
1003
1004         for (i = 0; i < hdev->tm_info.num_tc; i++) {
1005                 pg_info =
1006                         &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
1007                 dwrr = pg_info->tc_dwrr[i];
1008
1009                 ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr);
1010                 if (ret)
1011                         return ret;
1012
1013                 for (k = 0; k < hdev->num_alloc_vport; k++) {
1014                         ret = hclge_tm_qs_weight_cfg(
1015                                 hdev, vport[k].qs_offset + i,
1016                                 vport[k].dwrr);
1017                         if (ret)
1018                                 return ret;
1019                 }
1020         }
1021
1022         return 0;
1023 }
1024
1025 static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev)
1026 {
1027 #define DEFAULT_TC_WEIGHT       1
1028 #define DEFAULT_TC_OFFSET       14
1029
1030         struct hclge_ets_tc_weight_cmd *ets_weight;
1031         struct hclge_desc desc;
1032         unsigned int i;
1033
1034         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false);
1035         ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
1036
1037         for (i = 0; i < HNAE3_MAX_TC; i++) {
1038                 struct hclge_pg_info *pg_info;
1039
1040                 ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT;
1041
1042                 if (!(hdev->hw_tc_map & BIT(i)))
1043                         continue;
1044
1045                 pg_info =
1046                         &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
1047                 ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
1048         }
1049
1050         ets_weight->weight_offset = DEFAULT_TC_OFFSET;
1051
1052         return hclge_cmd_send(&hdev->hw, &desc, 1);
1053 }
1054
1055 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
1056 {
1057         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1058         struct hclge_dev *hdev = vport->back;
1059         int ret;
1060         u8 i;
1061
1062         /* Vf dwrr */
1063         ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr);
1064         if (ret)
1065                 return ret;
1066
1067         /* Qset dwrr */
1068         for (i = 0; i < kinfo->num_tc; i++) {
1069                 ret = hclge_tm_qs_weight_cfg(
1070                         hdev, vport->qs_offset + i,
1071                         hdev->tm_info.pg_info[0].tc_dwrr[i]);
1072                 if (ret)
1073                         return ret;
1074         }
1075
1076         return 0;
1077 }
1078
1079 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev)
1080 {
1081         struct hclge_vport *vport = hdev->vport;
1082         int ret;
1083         u32 i;
1084
1085         for (i = 0; i < hdev->num_alloc_vport; i++) {
1086                 ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport);
1087                 if (ret)
1088                         return ret;
1089
1090                 vport++;
1091         }
1092
1093         return 0;
1094 }
1095
1096 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
1097 {
1098         int ret;
1099
1100         if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1101                 ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev);
1102                 if (ret)
1103                         return ret;
1104
1105                 if (!hnae3_dev_dcb_supported(hdev))
1106                         return 0;
1107
1108                 ret = hclge_tm_ets_tc_dwrr_cfg(hdev);
1109                 if (ret == -EOPNOTSUPP) {
1110                         dev_warn(&hdev->pdev->dev,
1111                                  "fw %08x does't support ets tc weight cmd\n",
1112                                  hdev->fw_version);
1113                         ret = 0;
1114                 }
1115
1116                 return ret;
1117         } else {
1118                 ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev);
1119                 if (ret)
1120                         return ret;
1121         }
1122
1123         return 0;
1124 }
1125
1126 static int hclge_tm_map_cfg(struct hclge_dev *hdev)
1127 {
1128         int ret;
1129
1130         ret = hclge_up_to_tc_map(hdev);
1131         if (ret)
1132                 return ret;
1133
1134         ret = hclge_tm_pg_to_pri_map(hdev);
1135         if (ret)
1136                 return ret;
1137
1138         return hclge_tm_pri_q_qs_cfg(hdev);
1139 }
1140
1141 static int hclge_tm_shaper_cfg(struct hclge_dev *hdev)
1142 {
1143         int ret;
1144
1145         ret = hclge_tm_port_shaper_cfg(hdev);
1146         if (ret)
1147                 return ret;
1148
1149         ret = hclge_tm_pg_shaper_cfg(hdev);
1150         if (ret)
1151                 return ret;
1152
1153         return hclge_tm_pri_shaper_cfg(hdev);
1154 }
1155
1156 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev)
1157 {
1158         int ret;
1159
1160         ret = hclge_tm_pg_dwrr_cfg(hdev);
1161         if (ret)
1162                 return ret;
1163
1164         return hclge_tm_pri_dwrr_cfg(hdev);
1165 }
1166
1167 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
1168 {
1169         int ret;
1170         u8 i;
1171
1172         /* Only being config on TC-Based scheduler mode */
1173         if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
1174                 return 0;
1175
1176         for (i = 0; i < hdev->tm_info.num_pg; i++) {
1177                 ret = hclge_tm_pg_schd_mode_cfg(hdev, i);
1178                 if (ret)
1179                         return ret;
1180         }
1181
1182         return 0;
1183 }
1184
1185 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
1186 {
1187         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1188         struct hclge_dev *hdev = vport->back;
1189         int ret;
1190         u8 i;
1191
1192         if (vport->vport_id >= HNAE3_MAX_TC)
1193                 return -EINVAL;
1194
1195         ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
1196         if (ret)
1197                 return ret;
1198
1199         for (i = 0; i < kinfo->num_tc; i++) {
1200                 u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode;
1201
1202                 ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i,
1203                                                 sch_mode);
1204                 if (ret)
1205                         return ret;
1206         }
1207
1208         return 0;
1209 }
1210
1211 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
1212 {
1213         struct hclge_vport *vport = hdev->vport;
1214         int ret;
1215         u8 i, k;
1216
1217         if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1218                 for (i = 0; i < hdev->tm_info.num_tc; i++) {
1219                         ret = hclge_tm_pri_schd_mode_cfg(hdev, i);
1220                         if (ret)
1221                                 return ret;
1222
1223                         for (k = 0; k < hdev->num_alloc_vport; k++) {
1224                                 ret = hclge_tm_qs_schd_mode_cfg(
1225                                         hdev, vport[k].qs_offset + i,
1226                                         HCLGE_SCH_MODE_DWRR);
1227                                 if (ret)
1228                                         return ret;
1229                         }
1230                 }
1231         } else {
1232                 for (i = 0; i < hdev->num_alloc_vport; i++) {
1233                         ret = hclge_tm_schd_mode_vnet_base_cfg(vport);
1234                         if (ret)
1235                                 return ret;
1236
1237                         vport++;
1238                 }
1239         }
1240
1241         return 0;
1242 }
1243
1244 static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
1245 {
1246         int ret;
1247
1248         ret = hclge_tm_lvl2_schd_mode_cfg(hdev);
1249         if (ret)
1250                 return ret;
1251
1252         return hclge_tm_lvl34_schd_mode_cfg(hdev);
1253 }
1254
1255 int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
1256 {
1257         int ret;
1258
1259         /* Cfg tm mapping  */
1260         ret = hclge_tm_map_cfg(hdev);
1261         if (ret)
1262                 return ret;
1263
1264         /* Cfg tm shaper */
1265         ret = hclge_tm_shaper_cfg(hdev);
1266         if (ret)
1267                 return ret;
1268
1269         /* Cfg dwrr */
1270         ret = hclge_tm_dwrr_cfg(hdev);
1271         if (ret)
1272                 return ret;
1273
1274         /* Cfg schd mode for each level schd */
1275         return hclge_tm_schd_mode_hw(hdev);
1276 }
1277
1278 static int hclge_pause_param_setup_hw(struct hclge_dev *hdev)
1279 {
1280         struct hclge_mac *mac = &hdev->hw.mac;
1281
1282         return hclge_pause_param_cfg(hdev, mac->mac_addr,
1283                                      HCLGE_DEFAULT_PAUSE_TRANS_GAP,
1284                                      HCLGE_DEFAULT_PAUSE_TRANS_TIME);
1285 }
1286
1287 static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
1288 {
1289         u8 enable_bitmap = 0;
1290
1291         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
1292                 enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK |
1293                                 HCLGE_RX_MAC_PAUSE_EN_MSK;
1294
1295         return hclge_pfc_pause_en_cfg(hdev, enable_bitmap,
1296                                       hdev->tm_info.pfc_en);
1297 }
1298
1299 /* Each Tc has a 1024 queue sets to backpress, it divides to
1300  * 32 group, each group contains 32 queue sets, which can be
1301  * represented by u32 bitmap.
1302  */
1303 static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
1304 {
1305         int i;
1306
1307         for (i = 0; i < HCLGE_BP_GRP_NUM; i++) {
1308                 u32 qs_bitmap = 0;
1309                 int k, ret;
1310
1311                 for (k = 0; k < hdev->num_alloc_vport; k++) {
1312                         struct hclge_vport *vport = &hdev->vport[k];
1313                         u16 qs_id = vport->qs_offset + tc;
1314                         u8 grp, sub_grp;
1315
1316                         grp = hnae3_get_field(qs_id, HCLGE_BP_GRP_ID_M,
1317                                               HCLGE_BP_GRP_ID_S);
1318                         sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
1319                                                   HCLGE_BP_SUB_GRP_ID_S);
1320                         if (i == grp)
1321                                 qs_bitmap |= (1 << sub_grp);
1322                 }
1323
1324                 ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);
1325                 if (ret)
1326                         return ret;
1327         }
1328
1329         return 0;
1330 }
1331
1332 static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
1333 {
1334         bool tx_en, rx_en;
1335
1336         switch (hdev->tm_info.fc_mode) {
1337         case HCLGE_FC_NONE:
1338                 tx_en = false;
1339                 rx_en = false;
1340                 break;
1341         case HCLGE_FC_RX_PAUSE:
1342                 tx_en = false;
1343                 rx_en = true;
1344                 break;
1345         case HCLGE_FC_TX_PAUSE:
1346                 tx_en = true;
1347                 rx_en = false;
1348                 break;
1349         case HCLGE_FC_FULL:
1350                 tx_en = true;
1351                 rx_en = true;
1352                 break;
1353         case HCLGE_FC_PFC:
1354                 tx_en = false;
1355                 rx_en = false;
1356                 break;
1357         default:
1358                 tx_en = true;
1359                 rx_en = true;
1360         }
1361
1362         return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
1363 }
1364
1365 static int hclge_tm_bp_setup(struct hclge_dev *hdev)
1366 {
1367         int ret;
1368         int i;
1369
1370         for (i = 0; i < hdev->tm_info.num_tc; i++) {
1371                 ret = hclge_bp_setup_hw(hdev, i);
1372                 if (ret)
1373                         return ret;
1374         }
1375
1376         return 0;
1377 }
1378
1379 int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
1380 {
1381         int ret;
1382
1383         ret = hclge_pause_param_setup_hw(hdev);
1384         if (ret)
1385                 return ret;
1386
1387         ret = hclge_mac_pause_setup_hw(hdev);
1388         if (ret)
1389                 return ret;
1390
1391         /* Only DCB-supported dev supports qset back pressure and pfc cmd */
1392         if (!hnae3_dev_dcb_supported(hdev))
1393                 return 0;
1394
1395         /* GE MAC does not support PFC, when driver is initializing and MAC
1396          * is in GE Mode, ignore the error here, otherwise initialization
1397          * will fail.
1398          */
1399         ret = hclge_pfc_setup_hw(hdev);
1400         if (init && ret == -EOPNOTSUPP)
1401                 dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n");
1402         else if (ret) {
1403                 dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n",
1404                         ret);
1405                 return ret;
1406         }
1407
1408         return hclge_tm_bp_setup(hdev);
1409 }
1410
1411 void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
1412 {
1413         struct hclge_vport *vport = hdev->vport;
1414         struct hnae3_knic_private_info *kinfo;
1415         u32 i, k;
1416
1417         for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
1418                 hdev->tm_info.prio_tc[i] = prio_tc[i];
1419
1420                 for (k = 0;  k < hdev->num_alloc_vport; k++) {
1421                         kinfo = &vport[k].nic.kinfo;
1422                         kinfo->prio_tc[i] = prio_tc[i];
1423                 }
1424         }
1425 }
1426
1427 void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
1428 {
1429         u8 bit_map = 0;
1430         u8 i;
1431
1432         hdev->tm_info.num_tc = num_tc;
1433
1434         for (i = 0; i < hdev->tm_info.num_tc; i++)
1435                 bit_map |= BIT(i);
1436
1437         if (!bit_map) {
1438                 bit_map = 1;
1439                 hdev->tm_info.num_tc = 1;
1440         }
1441
1442         hdev->hw_tc_map = bit_map;
1443
1444         hclge_tm_schd_info_init(hdev);
1445 }
1446
1447 void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
1448 {
1449         /* DCB is enabled if we have more than 1 TC or pfc_en is
1450          * non-zero.
1451          */
1452         if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
1453                 hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
1454         else
1455                 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
1456
1457         hclge_pfc_info_init(hdev);
1458 }
1459
1460 int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
1461 {
1462         int ret;
1463
1464         if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
1465             (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE))
1466                 return -ENOTSUPP;
1467
1468         ret = hclge_tm_schd_setup_hw(hdev);
1469         if (ret)
1470                 return ret;
1471
1472         ret = hclge_pause_setup_hw(hdev, init);
1473         if (ret)
1474                 return ret;
1475
1476         return 0;
1477 }
1478
1479 int hclge_tm_schd_init(struct hclge_dev *hdev)
1480 {
1481         /* fc_mode is HCLGE_FC_FULL on reset */
1482         hdev->tm_info.fc_mode = HCLGE_FC_FULL;
1483         hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
1484
1485         if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE &&
1486             hdev->tm_info.num_pg != 1)
1487                 return -EINVAL;
1488
1489         hclge_tm_schd_info_init(hdev);
1490
1491         return hclge_tm_init_hw(hdev, true);
1492 }
1493
1494 int hclge_tm_vport_map_update(struct hclge_dev *hdev)
1495 {
1496         struct hclge_vport *vport = hdev->vport;
1497         int ret;
1498
1499         hclge_tm_vport_tc_info_update(vport);
1500
1501         ret = hclge_vport_q_to_qs_map(hdev, vport);
1502         if (ret)
1503                 return ret;
1504
1505         if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE))
1506                 return 0;
1507
1508         return hclge_tm_bp_setup(hdev);
1509 }