net: hns3: add limit ets dwrr bandwidth cannot be 0
[platform/kernel/linux-starfive.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hclge_dcb.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include "hclge_main.h"
5 #include "hclge_dcb.h"
6 #include "hclge_tm.h"
7 #include "hnae3.h"
8
9 #define BW_PERCENT      100
10
11 static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev,
12                                      struct ieee_ets *ets)
13 {
14         u8 i;
15
16         for (i = 0; i < HNAE3_MAX_TC; i++) {
17                 switch (ets->tc_tsa[i]) {
18                 case IEEE_8021QAZ_TSA_STRICT:
19                         hdev->tm_info.tc_info[i].tc_sch_mode =
20                                 HCLGE_SCH_MODE_SP;
21                         hdev->tm_info.pg_info[0].tc_dwrr[i] = 0;
22                         break;
23                 case IEEE_8021QAZ_TSA_ETS:
24                         hdev->tm_info.tc_info[i].tc_sch_mode =
25                                 HCLGE_SCH_MODE_DWRR;
26                         hdev->tm_info.pg_info[0].tc_dwrr[i] =
27                                 ets->tc_tx_bw[i];
28                         break;
29                 default:
30                         /* Hardware only supports SP (strict priority)
31                          * or ETS (enhanced transmission selection)
32                          * algorithms, if we receive some other value
33                          * from dcbnl, then throw an error.
34                          */
35                         return -EINVAL;
36                 }
37         }
38
39         hclge_tm_prio_tc_info_update(hdev, ets->prio_tc);
40
41         return 0;
42 }
43
44 static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev,
45                                       struct ieee_ets *ets)
46 {
47         u32 i;
48
49         memset(ets, 0, sizeof(*ets));
50         ets->willing = 1;
51         ets->ets_cap = hdev->tc_max;
52
53         for (i = 0; i < HNAE3_MAX_TC; i++) {
54                 ets->prio_tc[i] = hdev->tm_info.prio_tc[i];
55                 ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i];
56
57                 if (hdev->tm_info.tc_info[i].tc_sch_mode ==
58                     HCLGE_SCH_MODE_SP)
59                         ets->tc_tsa[i] = IEEE_8021QAZ_TSA_STRICT;
60                 else
61                         ets->tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
62         }
63 }
64
65 /* IEEE std */
66 static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets)
67 {
68         struct hclge_vport *vport = hclge_get_vport(h);
69         struct hclge_dev *hdev = vport->back;
70
71         hclge_tm_info_to_ieee_ets(hdev, ets);
72
73         return 0;
74 }
75
76 static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc,
77                                      u8 *prio_tc)
78 {
79         int i;
80
81         if (num_tc > hdev->tc_max) {
82                 dev_err(&hdev->pdev->dev,
83                         "tc num checking failed, %u > tc_max(%u)\n",
84                         num_tc, hdev->tc_max);
85                 return -EINVAL;
86         }
87
88         for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
89                 if (prio_tc[i] >= num_tc) {
90                         dev_err(&hdev->pdev->dev,
91                                 "prio_tc[%d] checking failed, %u >= num_tc(%u)\n",
92                                 i, prio_tc[i], num_tc);
93                         return -EINVAL;
94                 }
95         }
96
97         if (num_tc > hdev->vport[0].alloc_tqps) {
98                 dev_err(&hdev->pdev->dev,
99                         "allocated tqp checking failed, %u > tqp(%u)\n",
100                         num_tc, hdev->vport[0].alloc_tqps);
101                 return -EINVAL;
102         }
103
104         return 0;
105 }
106
107 static u8 hclge_ets_tc_changed(struct hclge_dev *hdev, struct ieee_ets *ets,
108                                bool *changed)
109 {
110         u8 max_tc_id = 0;
111         u8 i;
112
113         for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
114                 if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i])
115                         *changed = true;
116
117                 if (ets->prio_tc[i] > max_tc_id)
118                         max_tc_id = ets->prio_tc[i];
119         }
120
121         /* return max tc number, max tc id need to plus 1 */
122         return max_tc_id + 1;
123 }
124
125 static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev,
126                                        struct ieee_ets *ets, bool *changed)
127 {
128         bool has_ets_tc = false;
129         u32 total_ets_bw = 0;
130         u8 i;
131
132         for (i = 0; i < hdev->tc_max; i++) {
133                 switch (ets->tc_tsa[i]) {
134                 case IEEE_8021QAZ_TSA_STRICT:
135                         if (hdev->tm_info.tc_info[i].tc_sch_mode !=
136                                 HCLGE_SCH_MODE_SP)
137                                 *changed = true;
138                         break;
139                 case IEEE_8021QAZ_TSA_ETS:
140                         /* The hardware will switch to sp mode if bandwidth is
141                          * 0, so limit ets bandwidth must be greater than 0.
142                          */
143                         if (!ets->tc_tx_bw[i]) {
144                                 dev_err(&hdev->pdev->dev,
145                                         "tc%u ets bw cannot be 0\n", i);
146                                 return -EINVAL;
147                         }
148
149                         if (hdev->tm_info.tc_info[i].tc_sch_mode !=
150                                 HCLGE_SCH_MODE_DWRR)
151                                 *changed = true;
152
153                         total_ets_bw += ets->tc_tx_bw[i];
154                         has_ets_tc = true;
155                         break;
156                 default:
157                         return -EINVAL;
158                 }
159         }
160
161         if (has_ets_tc && total_ets_bw != BW_PERCENT)
162                 return -EINVAL;
163
164         return 0;
165 }
166
167 static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
168                               u8 *tc, bool *changed)
169 {
170         u8 tc_num;
171         int ret;
172
173         tc_num = hclge_ets_tc_changed(hdev, ets, changed);
174
175         ret = hclge_dcb_common_validate(hdev, tc_num, ets->prio_tc);
176         if (ret)
177                 return ret;
178
179         ret = hclge_ets_sch_mode_validate(hdev, ets, changed);
180         if (ret)
181                 return ret;
182
183         *tc = tc_num;
184         if (*tc != hdev->tm_info.num_tc)
185                 *changed = true;
186
187         return 0;
188 }
189
190 static int hclge_map_update(struct hclge_dev *hdev)
191 {
192         int ret;
193
194         ret = hclge_tm_schd_setup_hw(hdev);
195         if (ret)
196                 return ret;
197
198         ret = hclge_pause_setup_hw(hdev, false);
199         if (ret)
200                 return ret;
201
202         ret = hclge_buffer_alloc(hdev);
203         if (ret)
204                 return ret;
205
206         hclge_rss_indir_init_cfg(hdev);
207
208         return hclge_rss_init_hw(hdev);
209 }
210
211 static int hclge_notify_down_uinit(struct hclge_dev *hdev)
212 {
213         int ret;
214
215         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
216         if (ret)
217                 return ret;
218
219         return hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
220 }
221
222 static int hclge_notify_init_up(struct hclge_dev *hdev)
223 {
224         int ret;
225
226         ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
227         if (ret)
228                 return ret;
229
230         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
231 }
232
233 static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
234 {
235         struct hclge_vport *vport = hclge_get_vport(h);
236         struct net_device *netdev = h->kinfo.netdev;
237         struct hclge_dev *hdev = vport->back;
238         bool map_changed = false;
239         u8 num_tc = 0;
240         int ret;
241
242         if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
243             hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
244                 return -EINVAL;
245
246         ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed);
247         if (ret)
248                 return ret;
249
250         if (map_changed) {
251                 netif_dbg(h, drv, netdev, "set ets\n");
252
253                 ret = hclge_notify_down_uinit(hdev);
254                 if (ret)
255                         return ret;
256         }
257
258         hclge_tm_schd_info_update(hdev, num_tc);
259         if (num_tc > 1)
260                 hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
261         else
262                 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
263
264         ret = hclge_ieee_ets_to_tm_info(hdev, ets);
265         if (ret)
266                 goto err_out;
267
268         if (map_changed) {
269                 ret = hclge_map_update(hdev);
270                 if (ret)
271                         goto err_out;
272
273                 return hclge_notify_init_up(hdev);
274         }
275
276         return hclge_tm_dwrr_cfg(hdev);
277
278 err_out:
279         if (!map_changed)
280                 return ret;
281
282         hclge_notify_init_up(hdev);
283
284         return ret;
285 }
286
287 static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
288 {
289         u64 requests[HNAE3_MAX_TC], indications[HNAE3_MAX_TC];
290         struct hclge_vport *vport = hclge_get_vport(h);
291         struct hclge_dev *hdev = vport->back;
292         int ret;
293         u8 i;
294
295         memset(pfc, 0, sizeof(*pfc));
296         pfc->pfc_cap = hdev->pfc_max;
297         pfc->pfc_en = hdev->tm_info.pfc_en;
298
299         ret = hclge_pfc_tx_stats_get(hdev, requests);
300         if (ret)
301                 return ret;
302
303         ret = hclge_pfc_rx_stats_get(hdev, indications);
304         if (ret)
305                 return ret;
306
307         for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
308                 pfc->requests[i] = requests[i];
309                 pfc->indications[i] = indications[i];
310         }
311         return 0;
312 }
313
314 static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
315 {
316         struct hclge_vport *vport = hclge_get_vport(h);
317         struct net_device *netdev = h->kinfo.netdev;
318         struct hclge_dev *hdev = vport->back;
319         u8 i, j, pfc_map, *prio_tc;
320         int ret;
321
322         if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
323                 return -EINVAL;
324
325         if (pfc->pfc_en == hdev->tm_info.pfc_en)
326                 return 0;
327
328         prio_tc = hdev->tm_info.prio_tc;
329         pfc_map = 0;
330
331         for (i = 0; i < hdev->tm_info.num_tc; i++) {
332                 for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
333                         if ((prio_tc[j] == i) && (pfc->pfc_en & BIT(j))) {
334                                 pfc_map |= BIT(i);
335                                 break;
336                         }
337                 }
338         }
339
340         hdev->tm_info.hw_pfc_map = pfc_map;
341         hdev->tm_info.pfc_en = pfc->pfc_en;
342
343         netif_dbg(h, drv, netdev,
344                   "set pfc: pfc_en=%x, pfc_map=%x, num_tc=%u\n",
345                   pfc->pfc_en, pfc_map, hdev->tm_info.num_tc);
346
347         hclge_tm_pfc_info_update(hdev);
348
349         ret = hclge_pause_setup_hw(hdev, false);
350         if (ret)
351                 return ret;
352
353         ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
354         if (ret)
355                 return ret;
356
357         ret = hclge_buffer_alloc(hdev);
358         if (ret) {
359                 hclge_notify_client(hdev, HNAE3_UP_CLIENT);
360                 return ret;
361         }
362
363         return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
364 }
365
366 /* DCBX configuration */
367 static u8 hclge_getdcbx(struct hnae3_handle *h)
368 {
369         struct hclge_vport *vport = hclge_get_vport(h);
370         struct hclge_dev *hdev = vport->back;
371
372         if (hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
373                 return 0;
374
375         return hdev->dcbx_cap;
376 }
377
378 static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode)
379 {
380         struct hclge_vport *vport = hclge_get_vport(h);
381         struct net_device *netdev = h->kinfo.netdev;
382         struct hclge_dev *hdev = vport->back;
383
384         netif_dbg(h, drv, netdev, "set dcbx: mode=%u\n", mode);
385
386         /* No support for LLD_MANAGED modes or CEE */
387         if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
388             (mode & DCB_CAP_DCBX_VER_CEE) ||
389             !(mode & DCB_CAP_DCBX_HOST))
390                 return 1;
391
392         hdev->dcbx_cap = mode;
393
394         return 0;
395 }
396
397 static int hclge_mqprio_qopt_check(struct hclge_dev *hdev,
398                                    struct tc_mqprio_qopt_offload *mqprio_qopt)
399 {
400         u16 queue_sum = 0;
401         int ret;
402         int i;
403
404         if (!mqprio_qopt->qopt.num_tc) {
405                 mqprio_qopt->qopt.num_tc = 1;
406                 return 0;
407         }
408
409         ret = hclge_dcb_common_validate(hdev, mqprio_qopt->qopt.num_tc,
410                                         mqprio_qopt->qopt.prio_tc_map);
411         if (ret)
412                 return ret;
413
414         for (i = 0; i < mqprio_qopt->qopt.num_tc; i++) {
415                 if (!is_power_of_2(mqprio_qopt->qopt.count[i])) {
416                         dev_err(&hdev->pdev->dev,
417                                 "qopt queue count must be power of 2\n");
418                         return -EINVAL;
419                 }
420
421                 if (mqprio_qopt->qopt.count[i] > hdev->pf_rss_size_max) {
422                         dev_err(&hdev->pdev->dev,
423                                 "qopt queue count should be no more than %u\n",
424                                 hdev->pf_rss_size_max);
425                         return -EINVAL;
426                 }
427
428                 if (mqprio_qopt->qopt.offset[i] != queue_sum) {
429                         dev_err(&hdev->pdev->dev,
430                                 "qopt queue offset must start from 0, and being continuous\n");
431                         return -EINVAL;
432                 }
433
434                 if (mqprio_qopt->min_rate[i] || mqprio_qopt->max_rate[i]) {
435                         dev_err(&hdev->pdev->dev,
436                                 "qopt tx_rate is not supported\n");
437                         return -EOPNOTSUPP;
438                 }
439
440                 queue_sum = mqprio_qopt->qopt.offset[i];
441                 queue_sum += mqprio_qopt->qopt.count[i];
442         }
443         if (hdev->vport[0].alloc_tqps < queue_sum) {
444                 dev_err(&hdev->pdev->dev,
445                         "qopt queue count sum should be less than %u\n",
446                         hdev->vport[0].alloc_tqps);
447                 return -EINVAL;
448         }
449
450         return 0;
451 }
452
453 static void hclge_sync_mqprio_qopt(struct hnae3_tc_info *tc_info,
454                                    struct tc_mqprio_qopt_offload *mqprio_qopt)
455 {
456         memset(tc_info, 0, sizeof(*tc_info));
457         tc_info->num_tc = mqprio_qopt->qopt.num_tc;
458         memcpy(tc_info->prio_tc, mqprio_qopt->qopt.prio_tc_map,
459                sizeof_field(struct hnae3_tc_info, prio_tc));
460         memcpy(tc_info->tqp_count, mqprio_qopt->qopt.count,
461                sizeof_field(struct hnae3_tc_info, tqp_count));
462         memcpy(tc_info->tqp_offset, mqprio_qopt->qopt.offset,
463                sizeof_field(struct hnae3_tc_info, tqp_offset));
464 }
465
466 static int hclge_config_tc(struct hclge_dev *hdev,
467                            struct hnae3_tc_info *tc_info)
468 {
469         int i;
470
471         hclge_tm_schd_info_update(hdev, tc_info->num_tc);
472         for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
473                 hdev->tm_info.prio_tc[i] = tc_info->prio_tc[i];
474
475         return hclge_map_update(hdev);
476 }
477
478 /* Set up TC for hardware offloaded mqprio in channel mode */
479 static int hclge_setup_tc(struct hnae3_handle *h,
480                           struct tc_mqprio_qopt_offload *mqprio_qopt)
481 {
482         struct hclge_vport *vport = hclge_get_vport(h);
483         struct hnae3_knic_private_info *kinfo;
484         struct hclge_dev *hdev = vport->back;
485         struct hnae3_tc_info old_tc_info;
486         u8 tc = mqprio_qopt->qopt.num_tc;
487         int ret;
488
489         /* if client unregistered, it's not allowed to change
490          * mqprio configuration, which may cause uninit ring
491          * fail.
492          */
493         if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
494                 return -EBUSY;
495
496         if (hdev->flag & HCLGE_FLAG_DCB_ENABLE)
497                 return -EINVAL;
498
499         ret = hclge_mqprio_qopt_check(hdev, mqprio_qopt);
500         if (ret) {
501                 dev_err(&hdev->pdev->dev,
502                         "failed to check mqprio qopt params, ret = %d\n", ret);
503                 return ret;
504         }
505
506         ret = hclge_notify_down_uinit(hdev);
507         if (ret)
508                 return ret;
509
510         kinfo = &vport->nic.kinfo;
511         memcpy(&old_tc_info, &kinfo->tc_info, sizeof(old_tc_info));
512         hclge_sync_mqprio_qopt(&kinfo->tc_info, mqprio_qopt);
513         kinfo->tc_info.mqprio_active = tc > 0;
514
515         ret = hclge_config_tc(hdev, &kinfo->tc_info);
516         if (ret)
517                 goto err_out;
518
519         hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
520
521         if (tc > 1)
522                 hdev->flag |= HCLGE_FLAG_MQPRIO_ENABLE;
523         else
524                 hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE;
525
526         return hclge_notify_init_up(hdev);
527
528 err_out:
529         if (!tc) {
530                 dev_warn(&hdev->pdev->dev,
531                          "failed to destroy mqprio, will active after reset, ret = %d\n",
532                          ret);
533         } else {
534                 /* roll-back */
535                 memcpy(&kinfo->tc_info, &old_tc_info, sizeof(old_tc_info));
536                 if (hclge_config_tc(hdev, &kinfo->tc_info))
537                         dev_err(&hdev->pdev->dev,
538                                 "failed to roll back tc configuration\n");
539         }
540         hclge_notify_init_up(hdev);
541
542         return ret;
543 }
544
545 static const struct hnae3_dcb_ops hns3_dcb_ops = {
546         .ieee_getets    = hclge_ieee_getets,
547         .ieee_setets    = hclge_ieee_setets,
548         .ieee_getpfc    = hclge_ieee_getpfc,
549         .ieee_setpfc    = hclge_ieee_setpfc,
550         .getdcbx        = hclge_getdcbx,
551         .setdcbx        = hclge_setdcbx,
552         .setup_tc       = hclge_setup_tc,
553 };
554
555 void hclge_dcb_ops_set(struct hclge_dev *hdev)
556 {
557         struct hclge_vport *vport = hdev->vport;
558         struct hnae3_knic_private_info *kinfo;
559
560         /* Hdev does not support DCB or vport is
561          * not a pf, then dcb_ops is not set.
562          */
563         if (!hnae3_dev_dcb_supported(hdev) ||
564             vport->vport_id != 0)
565                 return;
566
567         kinfo = &vport->nic.kinfo;
568         kinfo->dcb_ops = &hns3_dcb_ops;
569         hdev->dcbx_cap = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_HOST;
570 }