1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include "hclge_main.h"
10 static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev,
15 for (i = 0; i < HNAE3_MAX_TC; i++) {
16 switch (ets->tc_tsa[i]) {
17 case IEEE_8021QAZ_TSA_STRICT:
18 hdev->tm_info.tc_info[i].tc_sch_mode =
20 hdev->tm_info.pg_info[0].tc_dwrr[i] = 0;
22 case IEEE_8021QAZ_TSA_ETS:
23 hdev->tm_info.tc_info[i].tc_sch_mode =
25 hdev->tm_info.pg_info[0].tc_dwrr[i] =
29 /* Hardware only supports SP (strict priority)
30 * or ETS (enhanced transmission selection)
31 * algorithms, if we receive some other value
32 * from dcbnl, then throw an error.
38 hclge_tm_prio_tc_info_update(hdev, ets->prio_tc);
43 static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev,
48 memset(ets, 0, sizeof(*ets));
50 ets->ets_cap = hdev->tc_max;
52 for (i = 0; i < HNAE3_MAX_TC; i++) {
53 ets->prio_tc[i] = hdev->tm_info.prio_tc[i];
54 ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i];
56 if (hdev->tm_info.tc_info[i].tc_sch_mode ==
58 ets->tc_tsa[i] = IEEE_8021QAZ_TSA_STRICT;
60 ets->tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
65 static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets)
67 struct hclge_vport *vport = hclge_get_vport(h);
68 struct hclge_dev *hdev = vport->back;
70 hclge_tm_info_to_ieee_ets(hdev, ets);
75 static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc,
80 if (num_tc > hdev->tc_max) {
81 dev_err(&hdev->pdev->dev,
82 "tc num checking failed, %u > tc_max(%u)\n",
83 num_tc, hdev->tc_max);
87 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
88 if (prio_tc[i] >= num_tc) {
89 dev_err(&hdev->pdev->dev,
90 "prio_tc[%u] checking failed, %u >= num_tc(%u)\n",
91 i, prio_tc[i], num_tc);
96 for (i = 0; i < hdev->num_alloc_vport; i++) {
97 if (num_tc > hdev->vport[i].alloc_tqps) {
98 dev_err(&hdev->pdev->dev,
99 "allocated tqp(%u) checking failed, %u > tqp(%u)\n",
100 i, num_tc, hdev->vport[i].alloc_tqps);
108 static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
109 u8 *tc, bool *changed)
111 bool has_ets_tc = false;
112 u32 total_ets_bw = 0;
117 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
118 if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i])
121 if (ets->prio_tc[i] > max_tc)
122 max_tc = ets->prio_tc[i];
125 ret = hclge_dcb_common_validate(hdev, max_tc + 1, ets->prio_tc);
129 for (i = 0; i < HNAE3_MAX_TC; i++) {
130 switch (ets->tc_tsa[i]) {
131 case IEEE_8021QAZ_TSA_STRICT:
132 if (hdev->tm_info.tc_info[i].tc_sch_mode !=
136 case IEEE_8021QAZ_TSA_ETS:
137 if (hdev->tm_info.tc_info[i].tc_sch_mode !=
141 total_ets_bw += ets->tc_tx_bw[i];
149 if (has_ets_tc && total_ets_bw != BW_PERCENT)
153 if (*tc != hdev->tm_info.num_tc)
159 static int hclge_map_update(struct hnae3_handle *h)
161 struct hclge_vport *vport = hclge_get_vport(h);
162 struct hclge_dev *hdev = vport->back;
165 ret = hclge_tm_schd_setup_hw(hdev);
169 ret = hclge_pause_setup_hw(hdev, false);
173 ret = hclge_buffer_alloc(hdev);
177 hclge_rss_indir_init_cfg(hdev);
179 return hclge_rss_init_hw(hdev);
182 static int hclge_client_setup_tc(struct hclge_dev *hdev)
184 struct hclge_vport *vport = hdev->vport;
185 struct hnae3_client *client;
186 struct hnae3_handle *handle;
190 for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
191 handle = &vport[i].nic;
192 client = handle->client;
194 if (!client || !client->ops || !client->ops->setup_tc)
197 ret = client->ops->setup_tc(handle, hdev->tm_info.num_tc);
205 static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
207 struct hclge_vport *vport = hclge_get_vport(h);
208 struct hclge_dev *hdev = vport->back;
209 bool map_changed = false;
213 if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
214 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
217 ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed);
222 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
226 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
231 hclge_tm_schd_info_update(hdev, num_tc);
233 ret = hclge_ieee_ets_to_tm_info(hdev, ets);
238 ret = hclge_client_setup_tc(hdev);
241 ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
245 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
250 return hclge_tm_dwrr_cfg(hdev);
253 static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
255 u64 requests[HNAE3_MAX_TC], indications[HNAE3_MAX_TC];
256 struct hclge_vport *vport = hclge_get_vport(h);
257 struct hclge_dev *hdev = vport->back;
258 u8 i, j, pfc_map, *prio_tc;
261 memset(pfc, 0, sizeof(*pfc));
262 pfc->pfc_cap = hdev->pfc_max;
263 prio_tc = hdev->tm_info.prio_tc;
264 pfc_map = hdev->tm_info.hw_pfc_map;
266 /* Pfc setting is based on TC */
267 for (i = 0; i < hdev->tm_info.num_tc; i++) {
268 for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
269 if ((prio_tc[j] == i) && (pfc_map & BIT(i)))
270 pfc->pfc_en |= BIT(j);
274 ret = hclge_pfc_tx_stats_get(hdev, requests);
278 ret = hclge_pfc_rx_stats_get(hdev, indications);
282 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
283 pfc->requests[i] = requests[i];
284 pfc->indications[i] = indications[i];
289 static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
291 struct hclge_vport *vport = hclge_get_vport(h);
292 struct hclge_dev *hdev = vport->back;
293 u8 i, j, pfc_map, *prio_tc;
295 if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
296 hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
299 prio_tc = hdev->tm_info.prio_tc;
302 for (i = 0; i < hdev->tm_info.num_tc; i++) {
303 for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
304 if ((prio_tc[j] == i) && (pfc->pfc_en & BIT(j))) {
311 if (pfc_map == hdev->tm_info.hw_pfc_map)
314 hdev->tm_info.hw_pfc_map = pfc_map;
316 return hclge_pause_setup_hw(hdev, false);
319 /* DCBX configuration */
320 static u8 hclge_getdcbx(struct hnae3_handle *h)
322 struct hclge_vport *vport = hclge_get_vport(h);
323 struct hclge_dev *hdev = vport->back;
325 if (hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
328 return hdev->dcbx_cap;
331 static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode)
333 struct hclge_vport *vport = hclge_get_vport(h);
334 struct hclge_dev *hdev = vport->back;
336 /* No support for LLD_MANAGED modes or CEE */
337 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
338 (mode & DCB_CAP_DCBX_VER_CEE) ||
339 !(mode & DCB_CAP_DCBX_HOST))
342 hdev->dcbx_cap = mode;
347 /* Set up TC for hardware offloaded mqprio in channel mode */
348 static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc)
350 struct hclge_vport *vport = hclge_get_vport(h);
351 struct hclge_dev *hdev = vport->back;
354 if (hdev->flag & HCLGE_FLAG_DCB_ENABLE)
357 ret = hclge_dcb_common_validate(hdev, tc, prio_tc);
361 hclge_tm_schd_info_update(hdev, tc);
362 hclge_tm_prio_tc_info_update(hdev, prio_tc);
364 ret = hclge_tm_init_hw(hdev, false);
368 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
371 hdev->flag |= HCLGE_FLAG_MQPRIO_ENABLE;
373 hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE;
378 static const struct hnae3_dcb_ops hns3_dcb_ops = {
379 .ieee_getets = hclge_ieee_getets,
380 .ieee_setets = hclge_ieee_setets,
381 .ieee_getpfc = hclge_ieee_getpfc,
382 .ieee_setpfc = hclge_ieee_setpfc,
383 .getdcbx = hclge_getdcbx,
384 .setdcbx = hclge_setdcbx,
385 .map_update = hclge_map_update,
386 .setup_tc = hclge_setup_tc,
389 void hclge_dcb_ops_set(struct hclge_dev *hdev)
391 struct hclge_vport *vport = hdev->vport;
392 struct hnae3_knic_private_info *kinfo;
394 /* Hdev does not support DCB or vport is
395 * not a pf, then dcb_ops is not set.
397 if (!hnae3_dev_dcb_supported(hdev) ||
398 vport->vport_id != 0)
401 kinfo = &vport->nic.kinfo;
402 kinfo->dcb_ops = &hns3_dcb_ops;
403 hdev->dcbx_cap = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_HOST;