xfs: preserve DIFLAG2_NREXT64 when setting other inode attributes
[platform/kernel/linux-starfive.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_tm.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/etherdevice.h>
5
6 #include "hclge_cmd.h"
7 #include "hclge_main.h"
8 #include "hclge_tm.h"
9
10 enum hclge_shaper_level {
11         HCLGE_SHAPER_LVL_PRI    = 0,
12         HCLGE_SHAPER_LVL_PG     = 1,
13         HCLGE_SHAPER_LVL_PORT   = 2,
14         HCLGE_SHAPER_LVL_QSET   = 3,
15         HCLGE_SHAPER_LVL_CNT    = 4,
16         HCLGE_SHAPER_LVL_VF     = 0,
17         HCLGE_SHAPER_LVL_PF     = 1,
18 };
19
20 #define HCLGE_TM_PFC_PKT_GET_CMD_NUM    3
21 #define HCLGE_TM_PFC_NUM_GET_PER_CMD    3
22
23 #define HCLGE_SHAPER_BS_U_DEF   5
24 #define HCLGE_SHAPER_BS_S_DEF   20
25
26 /* hclge_shaper_para_calc: calculate ir parameter for the shaper
27  * @ir: Rate to be config, its unit is Mbps
28  * @shaper_level: the shaper level. eg: port, pg, priority, queueset
29  * @ir_para: parameters of IR shaper
30  * @max_tm_rate: max tm rate is available to config
31  *
32  * the formula:
33  *
34  *              IR_b * (2 ^ IR_u) * 8
35  * IR(Mbps) = -------------------------  *  CLOCK(1000Mbps)
36  *              Tick * (2 ^ IR_s)
37  *
38  * @return: 0: calculate sucessful, negative: fail
39  */
40 static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
41                                   struct hclge_shaper_ir_para *ir_para,
42                                   u32 max_tm_rate)
43 {
44 #define DEFAULT_SHAPER_IR_B     126
45 #define DIVISOR_CLK             (1000 * 8)
46 #define DEFAULT_DIVISOR_IR_B    (DEFAULT_SHAPER_IR_B * DIVISOR_CLK)
47
48         static const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
49                 6 * 256,        /* Prioriy level */
50                 6 * 32,         /* Prioriy group level */
51                 6 * 8,          /* Port level */
52                 6 * 256         /* Qset level */
53         };
54         u8 ir_u_calc = 0;
55         u8 ir_s_calc = 0;
56         u32 ir_calc;
57         u32 tick;
58
59         /* Calc tick */
60         if (shaper_level >= HCLGE_SHAPER_LVL_CNT ||
61             ir > max_tm_rate)
62                 return -EINVAL;
63
64         tick = tick_array[shaper_level];
65
66         /**
67          * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
68          * the formula is changed to:
69          *              126 * 1 * 8
70          * ir_calc = ---------------- * 1000
71          *              tick * 1
72          */
73         ir_calc = (DEFAULT_DIVISOR_IR_B + (tick >> 1) - 1) / tick;
74
75         if (ir_calc == ir) {
76                 ir_para->ir_b = DEFAULT_SHAPER_IR_B;
77                 ir_para->ir_u = 0;
78                 ir_para->ir_s = 0;
79
80                 return 0;
81         } else if (ir_calc > ir) {
82                 /* Increasing the denominator to select ir_s value */
83                 while (ir_calc >= ir && ir) {
84                         ir_s_calc++;
85                         ir_calc = DEFAULT_DIVISOR_IR_B /
86                                   (tick * (1 << ir_s_calc));
87                 }
88
89                 ir_para->ir_b = (ir * tick * (1 << ir_s_calc) +
90                                 (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
91         } else {
92                 /* Increasing the numerator to select ir_u value */
93                 u32 numerator;
94
95                 while (ir_calc < ir) {
96                         ir_u_calc++;
97                         numerator = DEFAULT_DIVISOR_IR_B * (1 << ir_u_calc);
98                         ir_calc = (numerator + (tick >> 1)) / tick;
99                 }
100
101                 if (ir_calc == ir) {
102                         ir_para->ir_b = DEFAULT_SHAPER_IR_B;
103                 } else {
104                         u32 denominator = DIVISOR_CLK * (1 << --ir_u_calc);
105                         ir_para->ir_b = (ir * tick + (denominator >> 1)) /
106                                         denominator;
107                 }
108         }
109
110         ir_para->ir_u = ir_u_calc;
111         ir_para->ir_s = ir_s_calc;
112
113         return 0;
114 }
115
116 static const u16 hclge_pfc_tx_stats_offset[] = {
117         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num),
118         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num),
119         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num),
120         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num),
121         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num),
122         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num),
123         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num),
124         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)
125 };
126
127 static const u16 hclge_pfc_rx_stats_offset[] = {
128         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num),
129         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num),
130         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num),
131         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num),
132         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num),
133         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num),
134         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num),
135         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)
136 };
137
138 static void hclge_pfc_stats_get(struct hclge_dev *hdev, bool tx, u64 *stats)
139 {
140         const u16 *offset;
141         int i;
142
143         if (tx)
144                 offset = hclge_pfc_tx_stats_offset;
145         else
146                 offset = hclge_pfc_rx_stats_offset;
147
148         for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
149                 stats[i] = HCLGE_STATS_READ(&hdev->mac_stats, offset[i]);
150 }
151
152 void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats)
153 {
154         hclge_pfc_stats_get(hdev, false, stats);
155 }
156
157 void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats)
158 {
159         hclge_pfc_stats_get(hdev, true, stats);
160 }
161
162 int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
163 {
164         struct hclge_desc desc;
165
166         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false);
167
168         desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) |
169                 (rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0));
170
171         return hclge_cmd_send(&hdev->hw, &desc, 1);
172 }
173
174 static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
175                                   u8 pfc_bitmap)
176 {
177         struct hclge_desc desc;
178         struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data;
179
180         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false);
181
182         pfc->tx_rx_en_bitmap = tx_rx_bitmap;
183         pfc->pri_en_bitmap = pfc_bitmap;
184
185         return hclge_cmd_send(&hdev->hw, &desc, 1);
186 }
187
188 static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
189                                  u8 pause_trans_gap, u16 pause_trans_time)
190 {
191         struct hclge_cfg_pause_param_cmd *pause_param;
192         struct hclge_desc desc;
193
194         pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
195
196         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
197
198         ether_addr_copy(pause_param->mac_addr, addr);
199         ether_addr_copy(pause_param->mac_addr_extra, addr);
200         pause_param->pause_trans_gap = pause_trans_gap;
201         pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
202
203         return hclge_cmd_send(&hdev->hw, &desc, 1);
204 }
205
206 int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
207 {
208         struct hclge_cfg_pause_param_cmd *pause_param;
209         struct hclge_desc desc;
210         u16 trans_time;
211         u8 trans_gap;
212         int ret;
213
214         pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
215
216         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
217
218         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
219         if (ret)
220                 return ret;
221
222         trans_gap = pause_param->pause_trans_gap;
223         trans_time = le16_to_cpu(pause_param->pause_trans_time);
224
225         return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time);
226 }
227
228 static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
229 {
230         u8 tc;
231
232         tc = hdev->tm_info.prio_tc[pri_id];
233
234         if (tc >= hdev->tm_info.num_tc)
235                 return -EINVAL;
236
237         /**
238          * the register for priority has four bytes, the first bytes includes
239          *  priority0 and priority1, the higher 4bit stands for priority1
240          *  while the lower 4bit stands for priority0, as below:
241          * first byte:  | pri_1 | pri_0 |
242          * second byte: | pri_3 | pri_2 |
243          * third byte:  | pri_5 | pri_4 |
244          * fourth byte: | pri_7 | pri_6 |
245          */
246         pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4);
247
248         return 0;
249 }
250
251 static int hclge_up_to_tc_map(struct hclge_dev *hdev)
252 {
253         struct hclge_desc desc;
254         u8 *pri = (u8 *)desc.data;
255         u8 pri_id;
256         int ret;
257
258         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
259
260         for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
261                 ret = hclge_fill_pri_array(hdev, pri, pri_id);
262                 if (ret)
263                         return ret;
264         }
265
266         return hclge_cmd_send(&hdev->hw, &desc, 1);
267 }
268
269 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
270                                       u8 pg_id, u8 pri_bit_map)
271 {
272         struct hclge_pg_to_pri_link_cmd *map;
273         struct hclge_desc desc;
274
275         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false);
276
277         map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
278
279         map->pg_id = pg_id;
280         map->pri_bit_map = pri_bit_map;
281
282         return hclge_cmd_send(&hdev->hw, &desc, 1);
283 }
284
285 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
286                                       u16 qs_id, u8 pri)
287 {
288         struct hclge_qs_to_pri_link_cmd *map;
289         struct hclge_desc desc;
290
291         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false);
292
293         map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
294
295         map->qs_id = cpu_to_le16(qs_id);
296         map->priority = pri;
297         map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK;
298
299         return hclge_cmd_send(&hdev->hw, &desc, 1);
300 }
301
302 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
303                                     u16 q_id, u16 qs_id)
304 {
305         struct hclge_nq_to_qs_link_cmd *map;
306         struct hclge_desc desc;
307         u16 qs_id_l;
308         u16 qs_id_h;
309
310         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false);
311
312         map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
313
314         map->nq_id = cpu_to_le16(q_id);
315
316         /* convert qs_id to the following format to support qset_id >= 1024
317          * qs_id: | 15 | 14 ~ 10 |  9 ~ 0   |
318          *            /         / \         \
319          *           /         /   \         \
320          * qset_id: | 15 ~ 11 |  10 |  9 ~ 0  |
321          *          | qs_id_h | vld | qs_id_l |
322          */
323         qs_id_l = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_L_MSK,
324                                   HCLGE_TM_QS_ID_L_S);
325         qs_id_h = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_H_MSK,
326                                   HCLGE_TM_QS_ID_H_S);
327         hnae3_set_field(qs_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S,
328                         qs_id_l);
329         hnae3_set_field(qs_id, HCLGE_TM_QS_ID_H_EXT_MSK, HCLGE_TM_QS_ID_H_EXT_S,
330                         qs_id_h);
331         map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK);
332
333         return hclge_cmd_send(&hdev->hw, &desc, 1);
334 }
335
336 static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id,
337                                   u8 dwrr)
338 {
339         struct hclge_pg_weight_cmd *weight;
340         struct hclge_desc desc;
341
342         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false);
343
344         weight = (struct hclge_pg_weight_cmd *)desc.data;
345
346         weight->pg_id = pg_id;
347         weight->dwrr = dwrr;
348
349         return hclge_cmd_send(&hdev->hw, &desc, 1);
350 }
351
352 static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id,
353                                    u8 dwrr)
354 {
355         struct hclge_priority_weight_cmd *weight;
356         struct hclge_desc desc;
357
358         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false);
359
360         weight = (struct hclge_priority_weight_cmd *)desc.data;
361
362         weight->pri_id = pri_id;
363         weight->dwrr = dwrr;
364
365         return hclge_cmd_send(&hdev->hw, &desc, 1);
366 }
367
368 static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
369                                   u8 dwrr)
370 {
371         struct hclge_qs_weight_cmd *weight;
372         struct hclge_desc desc;
373
374         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false);
375
376         weight = (struct hclge_qs_weight_cmd *)desc.data;
377
378         weight->qs_id = cpu_to_le16(qs_id);
379         weight->dwrr = dwrr;
380
381         return hclge_cmd_send(&hdev->hw, &desc, 1);
382 }
383
384 static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s,
385                                       u8 bs_b, u8 bs_s)
386 {
387         u32 shapping_para = 0;
388
389         hclge_tm_set_field(shapping_para, IR_B, ir_b);
390         hclge_tm_set_field(shapping_para, IR_U, ir_u);
391         hclge_tm_set_field(shapping_para, IR_S, ir_s);
392         hclge_tm_set_field(shapping_para, BS_B, bs_b);
393         hclge_tm_set_field(shapping_para, BS_S, bs_s);
394
395         return shapping_para;
396 }
397
398 static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
399                                     enum hclge_shap_bucket bucket, u8 pg_id,
400                                     u32 shapping_para, u32 rate)
401 {
402         struct hclge_pg_shapping_cmd *shap_cfg_cmd;
403         enum hclge_opcode_type opcode;
404         struct hclge_desc desc;
405
406         opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
407                  HCLGE_OPC_TM_PG_C_SHAPPING;
408         hclge_cmd_setup_basic_desc(&desc, opcode, false);
409
410         shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
411
412         shap_cfg_cmd->pg_id = pg_id;
413
414         shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para);
415
416         hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
417
418         shap_cfg_cmd->pg_rate = cpu_to_le32(rate);
419
420         return hclge_cmd_send(&hdev->hw, &desc, 1);
421 }
422
423 static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
424 {
425         struct hclge_port_shapping_cmd *shap_cfg_cmd;
426         struct hclge_shaper_ir_para ir_para;
427         struct hclge_desc desc;
428         u32 shapping_para;
429         int ret;
430
431         ret = hclge_shaper_para_calc(hdev->hw.mac.speed, HCLGE_SHAPER_LVL_PORT,
432                                      &ir_para,
433                                      hdev->ae_dev->dev_specs.max_tm_rate);
434         if (ret)
435                 return ret;
436
437         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
438         shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
439
440         shapping_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
441                                                    ir_para.ir_s,
442                                                    HCLGE_SHAPER_BS_U_DEF,
443                                                    HCLGE_SHAPER_BS_S_DEF);
444
445         shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para);
446
447         hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
448
449         shap_cfg_cmd->port_rate = cpu_to_le32(hdev->hw.mac.speed);
450
451         return hclge_cmd_send(&hdev->hw, &desc, 1);
452 }
453
454 static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
455                                      enum hclge_shap_bucket bucket, u8 pri_id,
456                                      u32 shapping_para, u32 rate)
457 {
458         struct hclge_pri_shapping_cmd *shap_cfg_cmd;
459         enum hclge_opcode_type opcode;
460         struct hclge_desc desc;
461
462         opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
463                  HCLGE_OPC_TM_PRI_C_SHAPPING;
464
465         hclge_cmd_setup_basic_desc(&desc, opcode, false);
466
467         shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
468
469         shap_cfg_cmd->pri_id = pri_id;
470
471         shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para);
472
473         hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
474
475         shap_cfg_cmd->pri_rate = cpu_to_le32(rate);
476
477         return hclge_cmd_send(&hdev->hw, &desc, 1);
478 }
479
480 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id)
481 {
482         struct hclge_desc desc;
483
484         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false);
485
486         if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR)
487                 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
488         else
489                 desc.data[1] = 0;
490
491         desc.data[0] = cpu_to_le32(pg_id);
492
493         return hclge_cmd_send(&hdev->hw, &desc, 1);
494 }
495
496 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id)
497 {
498         struct hclge_desc desc;
499
500         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false);
501
502         if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
503                 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
504         else
505                 desc.data[1] = 0;
506
507         desc.data[0] = cpu_to_le32(pri_id);
508
509         return hclge_cmd_send(&hdev->hw, &desc, 1);
510 }
511
512 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode)
513 {
514         struct hclge_desc desc;
515
516         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false);
517
518         if (mode == HCLGE_SCH_MODE_DWRR)
519                 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
520         else
521                 desc.data[1] = 0;
522
523         desc.data[0] = cpu_to_le32(qs_id);
524
525         return hclge_cmd_send(&hdev->hw, &desc, 1);
526 }
527
528 static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id,
529                               u32 bit_map)
530 {
531         struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
532         struct hclge_desc desc;
533
534         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
535                                    false);
536
537         bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
538
539         bp_to_qs_map_cmd->tc_id = tc;
540         bp_to_qs_map_cmd->qs_group_id = grp_id;
541         bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map);
542
543         return hclge_cmd_send(&hdev->hw, &desc, 1);
544 }
545
546 int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
547 {
548         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
549         struct hclge_qs_shapping_cmd *shap_cfg_cmd;
550         struct hclge_shaper_ir_para ir_para;
551         struct hclge_dev *hdev = vport->back;
552         struct hclge_desc desc;
553         u32 shaper_para;
554         int ret, i;
555
556         if (!max_tx_rate)
557                 max_tx_rate = hdev->ae_dev->dev_specs.max_tm_rate;
558
559         ret = hclge_shaper_para_calc(max_tx_rate, HCLGE_SHAPER_LVL_QSET,
560                                      &ir_para,
561                                      hdev->ae_dev->dev_specs.max_tm_rate);
562         if (ret)
563                 return ret;
564
565         shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
566                                                  ir_para.ir_s,
567                                                  HCLGE_SHAPER_BS_U_DEF,
568                                                  HCLGE_SHAPER_BS_S_DEF);
569
570         for (i = 0; i < kinfo->tc_info.num_tc; i++) {
571                 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG,
572                                            false);
573
574                 shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
575                 shap_cfg_cmd->qs_id = cpu_to_le16(vport->qs_offset + i);
576                 shap_cfg_cmd->qs_shapping_para = cpu_to_le32(shaper_para);
577
578                 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
579                 shap_cfg_cmd->qs_rate = cpu_to_le32(max_tx_rate);
580
581                 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
582                 if (ret) {
583                         dev_err(&hdev->pdev->dev,
584                                 "vport%u, qs%u failed to set tx_rate:%d, ret=%d\n",
585                                 vport->vport_id, shap_cfg_cmd->qs_id,
586                                 max_tx_rate, ret);
587                         return ret;
588                 }
589         }
590
591         return 0;
592 }
593
594 static u16 hclge_vport_get_max_rss_size(struct hclge_vport *vport)
595 {
596         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
597         struct hnae3_tc_info *tc_info = &kinfo->tc_info;
598         struct hclge_dev *hdev = vport->back;
599         u16 max_rss_size = 0;
600         int i;
601
602         if (!tc_info->mqprio_active)
603                 return vport->alloc_tqps / tc_info->num_tc;
604
605         for (i = 0; i < HNAE3_MAX_TC; i++) {
606                 if (!(hdev->hw_tc_map & BIT(i)) || i >= tc_info->num_tc)
607                         continue;
608                 if (max_rss_size < tc_info->tqp_count[i])
609                         max_rss_size = tc_info->tqp_count[i];
610         }
611
612         return max_rss_size;
613 }
614
615 static u16 hclge_vport_get_tqp_num(struct hclge_vport *vport)
616 {
617         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
618         struct hnae3_tc_info *tc_info = &kinfo->tc_info;
619         struct hclge_dev *hdev = vport->back;
620         int sum = 0;
621         int i;
622
623         if (!tc_info->mqprio_active)
624                 return kinfo->rss_size * tc_info->num_tc;
625
626         for (i = 0; i < HNAE3_MAX_TC; i++) {
627                 if (hdev->hw_tc_map & BIT(i) && i < tc_info->num_tc)
628                         sum += tc_info->tqp_count[i];
629         }
630
631         return sum;
632 }
633
634 static void hclge_tm_update_kinfo_rss_size(struct hclge_vport *vport)
635 {
636         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
637         struct hclge_dev *hdev = vport->back;
638         u16 vport_max_rss_size;
639         u16 max_rss_size;
640
641         /* TC configuration is shared by PF/VF in one port, only allow
642          * one tc for VF for simplicity. VF's vport_id is non zero.
643          */
644         if (vport->vport_id) {
645                 kinfo->tc_info.num_tc = 1;
646                 vport->qs_offset = HNAE3_MAX_TC +
647                                    vport->vport_id - HCLGE_VF_VPORT_START_NUM;
648                 vport_max_rss_size = hdev->vf_rss_size_max;
649         } else {
650                 kinfo->tc_info.num_tc =
651                         min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
652                 vport->qs_offset = 0;
653                 vport_max_rss_size = hdev->pf_rss_size_max;
654         }
655
656         max_rss_size = min_t(u16, vport_max_rss_size,
657                              hclge_vport_get_max_rss_size(vport));
658
659         /* Set to user value, no larger than max_rss_size. */
660         if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
661             kinfo->req_rss_size <= max_rss_size) {
662                 dev_info(&hdev->pdev->dev, "rss changes from %u to %u\n",
663                          kinfo->rss_size, kinfo->req_rss_size);
664                 kinfo->rss_size = kinfo->req_rss_size;
665         } else if (kinfo->rss_size > max_rss_size ||
666                    (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
667                 /* Set to the maximum specification value (max_rss_size). */
668                 kinfo->rss_size = max_rss_size;
669         }
670 }
671
672 static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
673 {
674         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
675         struct hclge_dev *hdev = vport->back;
676         u8 i;
677
678         hclge_tm_update_kinfo_rss_size(vport);
679         kinfo->num_tqps = hclge_vport_get_tqp_num(vport);
680         vport->dwrr = 100;  /* 100 percent as init */
681         vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
682         hdev->rss_cfg.rss_size = kinfo->rss_size;
683
684         /* when enable mqprio, the tc_info has been updated. */
685         if (kinfo->tc_info.mqprio_active)
686                 return;
687
688         for (i = 0; i < HNAE3_MAX_TC; i++) {
689                 if (hdev->hw_tc_map & BIT(i) && i < kinfo->tc_info.num_tc) {
690                         kinfo->tc_info.tqp_offset[i] = i * kinfo->rss_size;
691                         kinfo->tc_info.tqp_count[i] = kinfo->rss_size;
692                 } else {
693                         /* Set to default queue if TC is disable */
694                         kinfo->tc_info.tqp_offset[i] = 0;
695                         kinfo->tc_info.tqp_count[i] = 1;
696                 }
697         }
698
699         memcpy(kinfo->tc_info.prio_tc, hdev->tm_info.prio_tc,
700                sizeof_field(struct hnae3_tc_info, prio_tc));
701 }
702
703 static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
704 {
705         struct hclge_vport *vport = hdev->vport;
706         u32 i;
707
708         for (i = 0; i < hdev->num_alloc_vport; i++) {
709                 hclge_tm_vport_tc_info_update(vport);
710
711                 vport++;
712         }
713 }
714
715 static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
716 {
717         u8 i;
718
719         for (i = 0; i < hdev->tm_info.num_tc; i++) {
720                 hdev->tm_info.tc_info[i].tc_id = i;
721                 hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
722                 hdev->tm_info.tc_info[i].pgid = 0;
723                 hdev->tm_info.tc_info[i].bw_limit =
724                         hdev->tm_info.pg_info[0].bw_limit;
725         }
726
727         for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
728                 hdev->tm_info.prio_tc[i] =
729                         (i >= hdev->tm_info.num_tc) ? 0 : i;
730 }
731
732 static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
733 {
734 #define BW_PERCENT      100
735
736         u8 i;
737
738         for (i = 0; i < hdev->tm_info.num_pg; i++) {
739                 int k;
740
741                 hdev->tm_info.pg_dwrr[i] = i ? 0 : BW_PERCENT;
742
743                 hdev->tm_info.pg_info[i].pg_id = i;
744                 hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
745
746                 hdev->tm_info.pg_info[i].bw_limit =
747                                         hdev->ae_dev->dev_specs.max_tm_rate;
748
749                 if (i != 0)
750                         continue;
751
752                 hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
753                 for (k = 0; k < hdev->tm_info.num_tc; k++)
754                         hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
755                 for (; k < HNAE3_MAX_TC; k++)
756                         hdev->tm_info.pg_info[i].tc_dwrr[k] = 0;
757         }
758 }
759
760 static void hclge_update_fc_mode_by_dcb_flag(struct hclge_dev *hdev)
761 {
762         if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) {
763                 if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
764                         dev_warn(&hdev->pdev->dev,
765                                  "Only 1 tc used, but last mode is FC_PFC\n");
766
767                 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
768         } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
769                 /* fc_mode_last_time record the last fc_mode when
770                  * DCB is enabled, so that fc_mode can be set to
771                  * the correct value when DCB is disabled.
772                  */
773                 hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
774                 hdev->tm_info.fc_mode = HCLGE_FC_PFC;
775         }
776 }
777
778 static void hclge_update_fc_mode(struct hclge_dev *hdev)
779 {
780         if (!hdev->tm_info.pfc_en) {
781                 hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
782                 return;
783         }
784
785         if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
786                 hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
787                 hdev->tm_info.fc_mode = HCLGE_FC_PFC;
788         }
789 }
790
791 void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
792 {
793         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
794                 hclge_update_fc_mode(hdev);
795         else
796                 hclge_update_fc_mode_by_dcb_flag(hdev);
797 }
798
799 static void hclge_tm_schd_info_init(struct hclge_dev *hdev)
800 {
801         hclge_tm_pg_info_init(hdev);
802
803         hclge_tm_tc_info_init(hdev);
804
805         hclge_tm_vport_info_update(hdev);
806
807         hclge_tm_pfc_info_update(hdev);
808 }
809
810 static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
811 {
812         int ret;
813         u32 i;
814
815         if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
816                 return 0;
817
818         for (i = 0; i < hdev->tm_info.num_pg; i++) {
819                 /* Cfg mapping */
820                 ret = hclge_tm_pg_to_pri_map_cfg(
821                         hdev, i, hdev->tm_info.pg_info[i].tc_bit_map);
822                 if (ret)
823                         return ret;
824         }
825
826         return 0;
827 }
828
829 static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
830 {
831         u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
832         struct hclge_shaper_ir_para ir_para;
833         u32 shaper_para;
834         int ret;
835         u32 i;
836
837         /* Cfg pg schd */
838         if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
839                 return 0;
840
841         /* Pg to pri */
842         for (i = 0; i < hdev->tm_info.num_pg; i++) {
843                 u32 rate = hdev->tm_info.pg_info[i].bw_limit;
844
845                 /* Calc shaper para */
846                 ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PG,
847                                              &ir_para, max_tm_rate);
848                 if (ret)
849                         return ret;
850
851                 shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
852                                                          HCLGE_SHAPER_BS_U_DEF,
853                                                          HCLGE_SHAPER_BS_S_DEF);
854                 ret = hclge_tm_pg_shapping_cfg(hdev,
855                                                HCLGE_TM_SHAP_C_BUCKET, i,
856                                                shaper_para, rate);
857                 if (ret)
858                         return ret;
859
860                 shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b,
861                                                          ir_para.ir_u,
862                                                          ir_para.ir_s,
863                                                          HCLGE_SHAPER_BS_U_DEF,
864                                                          HCLGE_SHAPER_BS_S_DEF);
865                 ret = hclge_tm_pg_shapping_cfg(hdev,
866                                                HCLGE_TM_SHAP_P_BUCKET, i,
867                                                shaper_para, rate);
868                 if (ret)
869                         return ret;
870         }
871
872         return 0;
873 }
874
875 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
876 {
877         int ret;
878         u32 i;
879
880         /* cfg pg schd */
881         if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
882                 return 0;
883
884         /* pg to prio */
885         for (i = 0; i < hdev->tm_info.num_pg; i++) {
886                 /* Cfg dwrr */
887                 ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]);
888                 if (ret)
889                         return ret;
890         }
891
892         return 0;
893 }
894
895 static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
896                                    struct hclge_vport *vport)
897 {
898         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
899         struct hnae3_tc_info *tc_info = &kinfo->tc_info;
900         struct hnae3_queue **tqp = kinfo->tqp;
901         u32 i, j;
902         int ret;
903
904         for (i = 0; i < tc_info->num_tc; i++) {
905                 for (j = 0; j < tc_info->tqp_count[i]; j++) {
906                         struct hnae3_queue *q = tqp[tc_info->tqp_offset[i] + j];
907
908                         ret = hclge_tm_q_to_qs_map_cfg(hdev,
909                                                        hclge_get_queue_id(q),
910                                                        vport->qs_offset + i);
911                         if (ret)
912                                 return ret;
913                 }
914         }
915
916         return 0;
917 }
918
919 static int hclge_tm_pri_q_qs_cfg_tc_base(struct hclge_dev *hdev)
920 {
921         struct hclge_vport *vport = hdev->vport;
922         u16 i, k;
923         int ret;
924
925         /* Cfg qs -> pri mapping, one by one mapping */
926         for (k = 0; k < hdev->num_alloc_vport; k++) {
927                 struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo;
928
929                 for (i = 0; i < kinfo->tc_info.num_tc; i++) {
930                         ret = hclge_tm_qs_to_pri_map_cfg(hdev,
931                                                          vport[k].qs_offset + i,
932                                                          i);
933                         if (ret)
934                                 return ret;
935                 }
936         }
937
938         return 0;
939 }
940
941 static int hclge_tm_pri_q_qs_cfg_vnet_base(struct hclge_dev *hdev)
942 {
943         struct hclge_vport *vport = hdev->vport;
944         u16 i, k;
945         int ret;
946
947         /* Cfg qs -> pri mapping,  qs = tc, pri = vf, 8 qs -> 1 pri */
948         for (k = 0; k < hdev->num_alloc_vport; k++)
949                 for (i = 0; i < HNAE3_MAX_TC; i++) {
950                         ret = hclge_tm_qs_to_pri_map_cfg(hdev,
951                                                          vport[k].qs_offset + i,
952                                                          k);
953                         if (ret)
954                                 return ret;
955                 }
956
957         return 0;
958 }
959
960 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
961 {
962         struct hclge_vport *vport = hdev->vport;
963         int ret;
964         u32 i;
965
966         if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE)
967                 ret = hclge_tm_pri_q_qs_cfg_tc_base(hdev);
968         else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
969                 ret = hclge_tm_pri_q_qs_cfg_vnet_base(hdev);
970         else
971                 return -EINVAL;
972
973         if (ret)
974                 return ret;
975
976         /* Cfg q -> qs mapping */
977         for (i = 0; i < hdev->num_alloc_vport; i++) {
978                 ret = hclge_vport_q_to_qs_map(hdev, vport);
979                 if (ret)
980                         return ret;
981
982                 vport++;
983         }
984
985         return 0;
986 }
987
988 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
989 {
990         u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
991         struct hclge_shaper_ir_para ir_para;
992         u32 shaper_para;
993         int ret;
994         u32 i;
995
996         for (i = 0; i < hdev->tm_info.num_tc; i++) {
997                 u32 rate = hdev->tm_info.tc_info[i].bw_limit;
998
999                 ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI,
1000                                              &ir_para, max_tm_rate);
1001                 if (ret)
1002                         return ret;
1003
1004                 shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
1005                                                          HCLGE_SHAPER_BS_U_DEF,
1006                                                          HCLGE_SHAPER_BS_S_DEF);
1007                 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i,
1008                                                 shaper_para, rate);
1009                 if (ret)
1010                         return ret;
1011
1012                 shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b,
1013                                                          ir_para.ir_u,
1014                                                          ir_para.ir_s,
1015                                                          HCLGE_SHAPER_BS_U_DEF,
1016                                                          HCLGE_SHAPER_BS_S_DEF);
1017                 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
1018                                                 shaper_para, rate);
1019                 if (ret)
1020                         return ret;
1021         }
1022
1023         return 0;
1024 }
1025
1026 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
1027 {
1028         struct hclge_dev *hdev = vport->back;
1029         struct hclge_shaper_ir_para ir_para;
1030         u32 shaper_para;
1031         int ret;
1032
1033         ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
1034                                      &ir_para,
1035                                      hdev->ae_dev->dev_specs.max_tm_rate);
1036         if (ret)
1037                 return ret;
1038
1039         shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
1040                                                  HCLGE_SHAPER_BS_U_DEF,
1041                                                  HCLGE_SHAPER_BS_S_DEF);
1042         ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
1043                                         vport->vport_id, shaper_para,
1044                                         vport->bw_limit);
1045         if (ret)
1046                 return ret;
1047
1048         shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
1049                                                  ir_para.ir_s,
1050                                                  HCLGE_SHAPER_BS_U_DEF,
1051                                                  HCLGE_SHAPER_BS_S_DEF);
1052         ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
1053                                         vport->vport_id, shaper_para,
1054                                         vport->bw_limit);
1055         if (ret)
1056                 return ret;
1057
1058         return 0;
1059 }
1060
1061 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
1062 {
1063         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1064         struct hclge_dev *hdev = vport->back;
1065         u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
1066         struct hclge_shaper_ir_para ir_para;
1067         u32 i;
1068         int ret;
1069
1070         for (i = 0; i < kinfo->tc_info.num_tc; i++) {
1071                 ret = hclge_shaper_para_calc(hdev->tm_info.tc_info[i].bw_limit,
1072                                              HCLGE_SHAPER_LVL_QSET,
1073                                              &ir_para, max_tm_rate);
1074                 if (ret)
1075                         return ret;
1076         }
1077
1078         return 0;
1079 }
1080
1081 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev)
1082 {
1083         struct hclge_vport *vport = hdev->vport;
1084         int ret;
1085         u32 i;
1086
1087         /* Need config vport shaper */
1088         for (i = 0; i < hdev->num_alloc_vport; i++) {
1089                 ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport);
1090                 if (ret)
1091                         return ret;
1092
1093                 ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport);
1094                 if (ret)
1095                         return ret;
1096
1097                 vport++;
1098         }
1099
1100         return 0;
1101 }
1102
1103 static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev)
1104 {
1105         int ret;
1106
1107         if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1108                 ret = hclge_tm_pri_tc_base_shaper_cfg(hdev);
1109                 if (ret)
1110                         return ret;
1111         } else {
1112                 ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev);
1113                 if (ret)
1114                         return ret;
1115         }
1116
1117         return 0;
1118 }
1119
1120 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
1121 {
1122         struct hclge_vport *vport = hdev->vport;
1123         struct hclge_pg_info *pg_info;
1124         u8 dwrr;
1125         int ret;
1126         u32 i, k;
1127
1128         for (i = 0; i < hdev->tm_info.num_tc; i++) {
1129                 pg_info =
1130                         &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
1131                 dwrr = pg_info->tc_dwrr[i];
1132
1133                 ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr);
1134                 if (ret)
1135                         return ret;
1136
1137                 for (k = 0; k < hdev->num_alloc_vport; k++) {
1138                         ret = hclge_tm_qs_weight_cfg(
1139                                 hdev, vport[k].qs_offset + i,
1140                                 vport[k].dwrr);
1141                         if (ret)
1142                                 return ret;
1143                 }
1144         }
1145
1146         return 0;
1147 }
1148
1149 static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev)
1150 {
1151 #define DEFAULT_TC_OFFSET       14
1152
1153         struct hclge_ets_tc_weight_cmd *ets_weight;
1154         struct hclge_desc desc;
1155         unsigned int i;
1156
1157         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false);
1158         ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
1159
1160         for (i = 0; i < HNAE3_MAX_TC; i++) {
1161                 struct hclge_pg_info *pg_info;
1162
1163                 pg_info = &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
1164                 ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
1165         }
1166
1167         ets_weight->weight_offset = DEFAULT_TC_OFFSET;
1168
1169         return hclge_cmd_send(&hdev->hw, &desc, 1);
1170 }
1171
1172 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
1173 {
1174         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1175         struct hclge_dev *hdev = vport->back;
1176         int ret;
1177         u8 i;
1178
1179         /* Vf dwrr */
1180         ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr);
1181         if (ret)
1182                 return ret;
1183
1184         /* Qset dwrr */
1185         for (i = 0; i < kinfo->tc_info.num_tc; i++) {
1186                 ret = hclge_tm_qs_weight_cfg(
1187                         hdev, vport->qs_offset + i,
1188                         hdev->tm_info.pg_info[0].tc_dwrr[i]);
1189                 if (ret)
1190                         return ret;
1191         }
1192
1193         return 0;
1194 }
1195
1196 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev)
1197 {
1198         struct hclge_vport *vport = hdev->vport;
1199         int ret;
1200         u32 i;
1201
1202         for (i = 0; i < hdev->num_alloc_vport; i++) {
1203                 ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport);
1204                 if (ret)
1205                         return ret;
1206
1207                 vport++;
1208         }
1209
1210         return 0;
1211 }
1212
1213 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
1214 {
1215         int ret;
1216
1217         if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1218                 ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev);
1219                 if (ret)
1220                         return ret;
1221
1222                 if (!hnae3_dev_dcb_supported(hdev))
1223                         return 0;
1224
1225                 ret = hclge_tm_ets_tc_dwrr_cfg(hdev);
1226                 if (ret == -EOPNOTSUPP) {
1227                         dev_warn(&hdev->pdev->dev,
1228                                  "fw %08x doesn't support ets tc weight cmd\n",
1229                                  hdev->fw_version);
1230                         ret = 0;
1231                 }
1232
1233                 return ret;
1234         } else {
1235                 ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev);
1236                 if (ret)
1237                         return ret;
1238         }
1239
1240         return 0;
1241 }
1242
1243 static int hclge_tm_map_cfg(struct hclge_dev *hdev)
1244 {
1245         int ret;
1246
1247         ret = hclge_up_to_tc_map(hdev);
1248         if (ret)
1249                 return ret;
1250
1251         ret = hclge_tm_pg_to_pri_map(hdev);
1252         if (ret)
1253                 return ret;
1254
1255         return hclge_tm_pri_q_qs_cfg(hdev);
1256 }
1257
1258 static int hclge_tm_shaper_cfg(struct hclge_dev *hdev)
1259 {
1260         int ret;
1261
1262         ret = hclge_tm_port_shaper_cfg(hdev);
1263         if (ret)
1264                 return ret;
1265
1266         ret = hclge_tm_pg_shaper_cfg(hdev);
1267         if (ret)
1268                 return ret;
1269
1270         return hclge_tm_pri_shaper_cfg(hdev);
1271 }
1272
1273 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev)
1274 {
1275         int ret;
1276
1277         ret = hclge_tm_pg_dwrr_cfg(hdev);
1278         if (ret)
1279                 return ret;
1280
1281         return hclge_tm_pri_dwrr_cfg(hdev);
1282 }
1283
1284 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
1285 {
1286         int ret;
1287         u8 i;
1288
1289         /* Only being config on TC-Based scheduler mode */
1290         if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
1291                 return 0;
1292
1293         for (i = 0; i < hdev->tm_info.num_pg; i++) {
1294                 ret = hclge_tm_pg_schd_mode_cfg(hdev, i);
1295                 if (ret)
1296                         return ret;
1297         }
1298
1299         return 0;
1300 }
1301
1302 static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id)
1303 {
1304         struct hclge_vport *vport = hdev->vport;
1305         int ret;
1306         u16 i;
1307
1308         ret = hclge_tm_pri_schd_mode_cfg(hdev, pri_id);
1309         if (ret)
1310                 return ret;
1311
1312         for (i = 0; i < hdev->num_alloc_vport; i++) {
1313                 ret = hclge_tm_qs_schd_mode_cfg(hdev,
1314                                                 vport[i].qs_offset + pri_id,
1315                                                 HCLGE_SCH_MODE_DWRR);
1316                 if (ret)
1317                         return ret;
1318         }
1319
1320         return 0;
1321 }
1322
1323 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
1324 {
1325         struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1326         struct hclge_dev *hdev = vport->back;
1327         int ret;
1328         u8 i;
1329
1330         if (vport->vport_id >= HNAE3_MAX_TC)
1331                 return -EINVAL;
1332
1333         ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
1334         if (ret)
1335                 return ret;
1336
1337         for (i = 0; i < kinfo->tc_info.num_tc; i++) {
1338                 u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode;
1339
1340                 ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i,
1341                                                 sch_mode);
1342                 if (ret)
1343                         return ret;
1344         }
1345
1346         return 0;
1347 }
1348
1349 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
1350 {
1351         struct hclge_vport *vport = hdev->vport;
1352         int ret;
1353         u8 i;
1354
1355         if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1356                 for (i = 0; i < hdev->tm_info.num_tc; i++) {
1357                         ret = hclge_tm_schd_mode_tc_base_cfg(hdev, i);
1358                         if (ret)
1359                                 return ret;
1360                 }
1361         } else {
1362                 for (i = 0; i < hdev->num_alloc_vport; i++) {
1363                         ret = hclge_tm_schd_mode_vnet_base_cfg(vport);
1364                         if (ret)
1365                                 return ret;
1366
1367                         vport++;
1368                 }
1369         }
1370
1371         return 0;
1372 }
1373
1374 static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
1375 {
1376         int ret;
1377
1378         ret = hclge_tm_lvl2_schd_mode_cfg(hdev);
1379         if (ret)
1380                 return ret;
1381
1382         return hclge_tm_lvl34_schd_mode_cfg(hdev);
1383 }
1384
1385 int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
1386 {
1387         int ret;
1388
1389         /* Cfg tm mapping  */
1390         ret = hclge_tm_map_cfg(hdev);
1391         if (ret)
1392                 return ret;
1393
1394         /* Cfg tm shaper */
1395         ret = hclge_tm_shaper_cfg(hdev);
1396         if (ret)
1397                 return ret;
1398
1399         /* Cfg dwrr */
1400         ret = hclge_tm_dwrr_cfg(hdev);
1401         if (ret)
1402                 return ret;
1403
1404         /* Cfg schd mode for each level schd */
1405         return hclge_tm_schd_mode_hw(hdev);
1406 }
1407
1408 static int hclge_pause_param_setup_hw(struct hclge_dev *hdev)
1409 {
1410         struct hclge_mac *mac = &hdev->hw.mac;
1411
1412         return hclge_pause_param_cfg(hdev, mac->mac_addr,
1413                                      HCLGE_DEFAULT_PAUSE_TRANS_GAP,
1414                                      HCLGE_DEFAULT_PAUSE_TRANS_TIME);
1415 }
1416
1417 static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
1418 {
1419         u8 enable_bitmap = 0;
1420
1421         if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
1422                 enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK |
1423                                 HCLGE_RX_MAC_PAUSE_EN_MSK;
1424
1425         return hclge_pfc_pause_en_cfg(hdev, enable_bitmap,
1426                                       hdev->tm_info.pfc_en);
1427 }
1428
1429 /* for the queues that use for backpress, divides to several groups,
1430  * each group contains 32 queue sets, which can be represented by u32 bitmap.
1431  */
1432 static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
1433 {
1434         u16 grp_id_shift = HCLGE_BP_GRP_ID_S;
1435         u16 grp_id_mask = HCLGE_BP_GRP_ID_M;
1436         u8 grp_num = HCLGE_BP_GRP_NUM;
1437         int i;
1438
1439         if (hdev->num_tqps > HCLGE_TQP_MAX_SIZE_DEV_V2) {
1440                 grp_num = HCLGE_BP_EXT_GRP_NUM;
1441                 grp_id_mask = HCLGE_BP_EXT_GRP_ID_M;
1442                 grp_id_shift = HCLGE_BP_EXT_GRP_ID_S;
1443         }
1444
1445         for (i = 0; i < grp_num; i++) {
1446                 u32 qs_bitmap = 0;
1447                 int k, ret;
1448
1449                 for (k = 0; k < hdev->num_alloc_vport; k++) {
1450                         struct hclge_vport *vport = &hdev->vport[k];
1451                         u16 qs_id = vport->qs_offset + tc;
1452                         u8 grp, sub_grp;
1453
1454                         grp = hnae3_get_field(qs_id, grp_id_mask, grp_id_shift);
1455                         sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
1456                                                   HCLGE_BP_SUB_GRP_ID_S);
1457                         if (i == grp)
1458                                 qs_bitmap |= (1 << sub_grp);
1459                 }
1460
1461                 ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);
1462                 if (ret)
1463                         return ret;
1464         }
1465
1466         return 0;
1467 }
1468
1469 static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
1470 {
1471         bool tx_en, rx_en;
1472
1473         switch (hdev->tm_info.fc_mode) {
1474         case HCLGE_FC_NONE:
1475                 tx_en = false;
1476                 rx_en = false;
1477                 break;
1478         case HCLGE_FC_RX_PAUSE:
1479                 tx_en = false;
1480                 rx_en = true;
1481                 break;
1482         case HCLGE_FC_TX_PAUSE:
1483                 tx_en = true;
1484                 rx_en = false;
1485                 break;
1486         case HCLGE_FC_FULL:
1487                 tx_en = true;
1488                 rx_en = true;
1489                 break;
1490         case HCLGE_FC_PFC:
1491                 tx_en = false;
1492                 rx_en = false;
1493                 break;
1494         default:
1495                 tx_en = true;
1496                 rx_en = true;
1497         }
1498
1499         return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
1500 }
1501
1502 static int hclge_tm_bp_setup(struct hclge_dev *hdev)
1503 {
1504         int ret;
1505         int i;
1506
1507         for (i = 0; i < hdev->tm_info.num_tc; i++) {
1508                 ret = hclge_bp_setup_hw(hdev, i);
1509                 if (ret)
1510                         return ret;
1511         }
1512
1513         return 0;
1514 }
1515
1516 int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
1517 {
1518         int ret;
1519
1520         ret = hclge_pause_param_setup_hw(hdev);
1521         if (ret)
1522                 return ret;
1523
1524         ret = hclge_mac_pause_setup_hw(hdev);
1525         if (ret)
1526                 return ret;
1527
1528         /* Only DCB-supported dev supports qset back pressure and pfc cmd */
1529         if (!hnae3_dev_dcb_supported(hdev))
1530                 return 0;
1531
1532         /* GE MAC does not support PFC, when driver is initializing and MAC
1533          * is in GE Mode, ignore the error here, otherwise initialization
1534          * will fail.
1535          */
1536         ret = hclge_pfc_setup_hw(hdev);
1537         if (init && ret == -EOPNOTSUPP)
1538                 dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n");
1539         else if (ret) {
1540                 dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n",
1541                         ret);
1542                 return ret;
1543         }
1544
1545         return hclge_tm_bp_setup(hdev);
1546 }
1547
1548 void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
1549 {
1550         struct hclge_vport *vport = hdev->vport;
1551         struct hnae3_knic_private_info *kinfo;
1552         u32 i, k;
1553
1554         for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
1555                 hdev->tm_info.prio_tc[i] = prio_tc[i];
1556
1557                 for (k = 0;  k < hdev->num_alloc_vport; k++) {
1558                         kinfo = &vport[k].nic.kinfo;
1559                         kinfo->tc_info.prio_tc[i] = prio_tc[i];
1560                 }
1561         }
1562 }
1563
1564 void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
1565 {
1566         u8 bit_map = 0;
1567         u8 i;
1568
1569         hdev->tm_info.num_tc = num_tc;
1570
1571         for (i = 0; i < hdev->tm_info.num_tc; i++)
1572                 bit_map |= BIT(i);
1573
1574         if (!bit_map) {
1575                 bit_map = 1;
1576                 hdev->tm_info.num_tc = 1;
1577         }
1578
1579         hdev->hw_tc_map = bit_map;
1580
1581         hclge_tm_schd_info_init(hdev);
1582 }
1583
1584 int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
1585 {
1586         int ret;
1587
1588         if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
1589             (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE))
1590                 return -ENOTSUPP;
1591
1592         ret = hclge_tm_schd_setup_hw(hdev);
1593         if (ret)
1594                 return ret;
1595
1596         ret = hclge_pause_setup_hw(hdev, init);
1597         if (ret)
1598                 return ret;
1599
1600         return 0;
1601 }
1602
1603 int hclge_tm_schd_init(struct hclge_dev *hdev)
1604 {
1605         /* fc_mode is HCLGE_FC_FULL on reset */
1606         hdev->tm_info.fc_mode = HCLGE_FC_FULL;
1607         hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
1608
1609         if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE &&
1610             hdev->tm_info.num_pg != 1)
1611                 return -EINVAL;
1612
1613         hclge_tm_schd_info_init(hdev);
1614
1615         return hclge_tm_init_hw(hdev, true);
1616 }
1617
1618 int hclge_tm_vport_map_update(struct hclge_dev *hdev)
1619 {
1620         struct hclge_vport *vport = hdev->vport;
1621         int ret;
1622
1623         hclge_tm_vport_tc_info_update(vport);
1624
1625         ret = hclge_vport_q_to_qs_map(hdev, vport);
1626         if (ret)
1627                 return ret;
1628
1629         if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en)
1630                 return 0;
1631
1632         return hclge_tm_bp_setup(hdev);
1633 }
1634
1635 int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num)
1636 {
1637         struct hclge_tm_nodes_cmd *nodes;
1638         struct hclge_desc desc;
1639         int ret;
1640
1641         if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) {
1642                 /* Each PF has 8 qsets and each VF has 1 qset */
1643                 *qset_num = HCLGE_TM_PF_MAX_QSET_NUM + pci_num_vf(hdev->pdev);
1644                 return 0;
1645         }
1646
1647         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
1648         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1649         if (ret) {
1650                 dev_err(&hdev->pdev->dev,
1651                         "failed to get qset num, ret = %d\n", ret);
1652                 return ret;
1653         }
1654
1655         nodes = (struct hclge_tm_nodes_cmd *)desc.data;
1656         *qset_num = le16_to_cpu(nodes->qset_num);
1657         return 0;
1658 }
1659
1660 int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num)
1661 {
1662         struct hclge_tm_nodes_cmd *nodes;
1663         struct hclge_desc desc;
1664         int ret;
1665
1666         if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) {
1667                 *pri_num = HCLGE_TM_PF_MAX_PRI_NUM;
1668                 return 0;
1669         }
1670
1671         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
1672         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1673         if (ret) {
1674                 dev_err(&hdev->pdev->dev,
1675                         "failed to get pri num, ret = %d\n", ret);
1676                 return ret;
1677         }
1678
1679         nodes = (struct hclge_tm_nodes_cmd *)desc.data;
1680         *pri_num = nodes->pri_num;
1681         return 0;
1682 }
1683
1684 int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority,
1685                               u8 *link_vld)
1686 {
1687         struct hclge_qs_to_pri_link_cmd *map;
1688         struct hclge_desc desc;
1689         int ret;
1690
1691         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, true);
1692         map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
1693         map->qs_id = cpu_to_le16(qset_id);
1694         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1695         if (ret) {
1696                 dev_err(&hdev->pdev->dev,
1697                         "failed to get qset map priority, ret = %d\n", ret);
1698                 return ret;
1699         }
1700
1701         *priority = map->priority;
1702         *link_vld = map->link_vld;
1703         return 0;
1704 }
1705
1706 int hclge_tm_get_qset_sch_mode(struct hclge_dev *hdev, u16 qset_id, u8 *mode)
1707 {
1708         struct hclge_qs_sch_mode_cfg_cmd *qs_sch_mode;
1709         struct hclge_desc desc;
1710         int ret;
1711
1712         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, true);
1713         qs_sch_mode = (struct hclge_qs_sch_mode_cfg_cmd *)desc.data;
1714         qs_sch_mode->qs_id = cpu_to_le16(qset_id);
1715         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1716         if (ret) {
1717                 dev_err(&hdev->pdev->dev,
1718                         "failed to get qset sch mode, ret = %d\n", ret);
1719                 return ret;
1720         }
1721
1722         *mode = qs_sch_mode->sch_mode;
1723         return 0;
1724 }
1725
1726 int hclge_tm_get_qset_weight(struct hclge_dev *hdev, u16 qset_id, u8 *weight)
1727 {
1728         struct hclge_qs_weight_cmd *qs_weight;
1729         struct hclge_desc desc;
1730         int ret;
1731
1732         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, true);
1733         qs_weight = (struct hclge_qs_weight_cmd *)desc.data;
1734         qs_weight->qs_id = cpu_to_le16(qset_id);
1735         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1736         if (ret) {
1737                 dev_err(&hdev->pdev->dev,
1738                         "failed to get qset weight, ret = %d\n", ret);
1739                 return ret;
1740         }
1741
1742         *weight = qs_weight->dwrr;
1743         return 0;
1744 }
1745
1746 int hclge_tm_get_qset_shaper(struct hclge_dev *hdev, u16 qset_id,
1747                              struct hclge_tm_shaper_para *para)
1748 {
1749         struct hclge_qs_shapping_cmd *shap_cfg_cmd;
1750         struct hclge_desc desc;
1751         u32 shapping_para;
1752         int ret;
1753
1754         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true);
1755         shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
1756         shap_cfg_cmd->qs_id = cpu_to_le16(qset_id);
1757         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1758         if (ret) {
1759                 dev_err(&hdev->pdev->dev,
1760                         "failed to get qset %u shaper, ret = %d\n", qset_id,
1761                         ret);
1762                 return ret;
1763         }
1764
1765         shapping_para = le32_to_cpu(shap_cfg_cmd->qs_shapping_para);
1766         para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
1767         para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
1768         para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
1769         para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
1770         para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
1771         para->flag = shap_cfg_cmd->flag;
1772         para->rate = le32_to_cpu(shap_cfg_cmd->qs_rate);
1773         return 0;
1774 }
1775
1776 int hclge_tm_get_pri_sch_mode(struct hclge_dev *hdev, u8 pri_id, u8 *mode)
1777 {
1778         struct hclge_pri_sch_mode_cfg_cmd *pri_sch_mode;
1779         struct hclge_desc desc;
1780         int ret;
1781
1782         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, true);
1783         pri_sch_mode = (struct hclge_pri_sch_mode_cfg_cmd *)desc.data;
1784         pri_sch_mode->pri_id = pri_id;
1785         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1786         if (ret) {
1787                 dev_err(&hdev->pdev->dev,
1788                         "failed to get priority sch mode, ret = %d\n", ret);
1789                 return ret;
1790         }
1791
1792         *mode = pri_sch_mode->sch_mode;
1793         return 0;
1794 }
1795
1796 int hclge_tm_get_pri_weight(struct hclge_dev *hdev, u8 pri_id, u8 *weight)
1797 {
1798         struct hclge_priority_weight_cmd *priority_weight;
1799         struct hclge_desc desc;
1800         int ret;
1801
1802         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, true);
1803         priority_weight = (struct hclge_priority_weight_cmd *)desc.data;
1804         priority_weight->pri_id = pri_id;
1805         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1806         if (ret) {
1807                 dev_err(&hdev->pdev->dev,
1808                         "failed to get priority weight, ret = %d\n", ret);
1809                 return ret;
1810         }
1811
1812         *weight = priority_weight->dwrr;
1813         return 0;
1814 }
1815
1816 int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id,
1817                             enum hclge_opcode_type cmd,
1818                             struct hclge_tm_shaper_para *para)
1819 {
1820         struct hclge_pri_shapping_cmd *shap_cfg_cmd;
1821         struct hclge_desc desc;
1822         u32 shapping_para;
1823         int ret;
1824
1825         if (cmd != HCLGE_OPC_TM_PRI_C_SHAPPING &&
1826             cmd != HCLGE_OPC_TM_PRI_P_SHAPPING)
1827                 return -EINVAL;
1828
1829         hclge_cmd_setup_basic_desc(&desc, cmd, true);
1830         shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
1831         shap_cfg_cmd->pri_id = pri_id;
1832         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1833         if (ret) {
1834                 dev_err(&hdev->pdev->dev,
1835                         "failed to get priority shaper(%#x), ret = %d\n",
1836                         cmd, ret);
1837                 return ret;
1838         }
1839
1840         shapping_para = le32_to_cpu(shap_cfg_cmd->pri_shapping_para);
1841         para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
1842         para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
1843         para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
1844         para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
1845         para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
1846         para->flag = shap_cfg_cmd->flag;
1847         para->rate = le32_to_cpu(shap_cfg_cmd->pri_rate);
1848         return 0;
1849 }
1850
1851 int hclge_tm_get_q_to_qs_map(struct hclge_dev *hdev, u16 q_id, u16 *qset_id)
1852 {
1853         struct hclge_nq_to_qs_link_cmd *map;
1854         struct hclge_desc desc;
1855         u16 qs_id_l;
1856         u16 qs_id_h;
1857         int ret;
1858
1859         map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
1860         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, true);
1861         map->nq_id = cpu_to_le16(q_id);
1862         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1863         if (ret) {
1864                 dev_err(&hdev->pdev->dev,
1865                         "failed to get queue to qset map, ret = %d\n", ret);
1866                 return ret;
1867         }
1868         *qset_id = le16_to_cpu(map->qset_id);
1869
1870         /* convert qset_id to the following format, drop the vld bit
1871          *            | qs_id_h | vld | qs_id_l |
1872          * qset_id:   | 15 ~ 11 |  10 |  9 ~ 0  |
1873          *             \         \   /         /
1874          *              \         \ /         /
1875          * qset_id: | 15 | 14 ~ 10 |  9 ~ 0  |
1876          */
1877         qs_id_l = hnae3_get_field(*qset_id, HCLGE_TM_QS_ID_L_MSK,
1878                                   HCLGE_TM_QS_ID_L_S);
1879         qs_id_h = hnae3_get_field(*qset_id, HCLGE_TM_QS_ID_H_EXT_MSK,
1880                                   HCLGE_TM_QS_ID_H_EXT_S);
1881         *qset_id = 0;
1882         hnae3_set_field(*qset_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S,
1883                         qs_id_l);
1884         hnae3_set_field(*qset_id, HCLGE_TM_QS_ID_H_MSK, HCLGE_TM_QS_ID_H_S,
1885                         qs_id_h);
1886         return 0;
1887 }
1888
1889 int hclge_tm_get_q_to_tc(struct hclge_dev *hdev, u16 q_id, u8 *tc_id)
1890 {
1891 #define HCLGE_TM_TC_MASK                0x7
1892
1893         struct hclge_tqp_tx_queue_tc_cmd *tc;
1894         struct hclge_desc desc;
1895         int ret;
1896
1897         tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data;
1898         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TQP_TX_QUEUE_TC, true);
1899         tc->queue_id = cpu_to_le16(q_id);
1900         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1901         if (ret) {
1902                 dev_err(&hdev->pdev->dev,
1903                         "failed to get queue to tc map, ret = %d\n", ret);
1904                 return ret;
1905         }
1906
1907         *tc_id = tc->tc_id & HCLGE_TM_TC_MASK;
1908         return 0;
1909 }
1910
1911 int hclge_tm_get_pg_to_pri_map(struct hclge_dev *hdev, u8 pg_id,
1912                                u8 *pri_bit_map)
1913 {
1914         struct hclge_pg_to_pri_link_cmd *map;
1915         struct hclge_desc desc;
1916         int ret;
1917
1918         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, true);
1919         map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
1920         map->pg_id = pg_id;
1921         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1922         if (ret) {
1923                 dev_err(&hdev->pdev->dev,
1924                         "failed to get pg to pri map, ret = %d\n", ret);
1925                 return ret;
1926         }
1927
1928         *pri_bit_map = map->pri_bit_map;
1929         return 0;
1930 }
1931
1932 int hclge_tm_get_pg_weight(struct hclge_dev *hdev, u8 pg_id, u8 *weight)
1933 {
1934         struct hclge_pg_weight_cmd *pg_weight_cmd;
1935         struct hclge_desc desc;
1936         int ret;
1937
1938         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, true);
1939         pg_weight_cmd = (struct hclge_pg_weight_cmd *)desc.data;
1940         pg_weight_cmd->pg_id = pg_id;
1941         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1942         if (ret) {
1943                 dev_err(&hdev->pdev->dev,
1944                         "failed to get pg weight, ret = %d\n", ret);
1945                 return ret;
1946         }
1947
1948         *weight = pg_weight_cmd->dwrr;
1949         return 0;
1950 }
1951
1952 int hclge_tm_get_pg_sch_mode(struct hclge_dev *hdev, u8 pg_id, u8 *mode)
1953 {
1954         struct hclge_desc desc;
1955         int ret;
1956
1957         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, true);
1958         desc.data[0] = cpu_to_le32(pg_id);
1959         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1960         if (ret) {
1961                 dev_err(&hdev->pdev->dev,
1962                         "failed to get pg sch mode, ret = %d\n", ret);
1963                 return ret;
1964         }
1965
1966         *mode = (u8)le32_to_cpu(desc.data[1]);
1967         return 0;
1968 }
1969
1970 int hclge_tm_get_pg_shaper(struct hclge_dev *hdev, u8 pg_id,
1971                            enum hclge_opcode_type cmd,
1972                            struct hclge_tm_shaper_para *para)
1973 {
1974         struct hclge_pg_shapping_cmd *shap_cfg_cmd;
1975         struct hclge_desc desc;
1976         u32 shapping_para;
1977         int ret;
1978
1979         if (cmd != HCLGE_OPC_TM_PG_C_SHAPPING &&
1980             cmd != HCLGE_OPC_TM_PG_P_SHAPPING)
1981                 return -EINVAL;
1982
1983         hclge_cmd_setup_basic_desc(&desc, cmd, true);
1984         shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
1985         shap_cfg_cmd->pg_id = pg_id;
1986         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1987         if (ret) {
1988                 dev_err(&hdev->pdev->dev,
1989                         "failed to get pg shaper(%#x), ret = %d\n",
1990                         cmd, ret);
1991                 return ret;
1992         }
1993
1994         shapping_para = le32_to_cpu(shap_cfg_cmd->pg_shapping_para);
1995         para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
1996         para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
1997         para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
1998         para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
1999         para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
2000         para->flag = shap_cfg_cmd->flag;
2001         para->rate = le32_to_cpu(shap_cfg_cmd->pg_rate);
2002         return 0;
2003 }
2004
2005 int hclge_tm_get_port_shaper(struct hclge_dev *hdev,
2006                              struct hclge_tm_shaper_para *para)
2007 {
2008         struct hclge_port_shapping_cmd *port_shap_cfg_cmd;
2009         struct hclge_desc desc;
2010         u32 shapping_para;
2011         int ret;
2012
2013         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, true);
2014         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2015         if (ret) {
2016                 dev_err(&hdev->pdev->dev,
2017                         "failed to get port shaper, ret = %d\n", ret);
2018                 return ret;
2019         }
2020
2021         port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
2022         shapping_para = le32_to_cpu(port_shap_cfg_cmd->port_shapping_para);
2023         para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
2024         para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
2025         para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
2026         para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
2027         para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
2028         para->flag = port_shap_cfg_cmd->flag;
2029         para->rate = le32_to_cpu(port_shap_cfg_cmd->port_rate);
2030
2031         return 0;
2032 }