net: hns3: refine function hclge_tm_pri_q_qs_cfg()
authorGuangbin Huang <huangguangbin2@huawei.com>
Mon, 29 Nov 2021 14:00:24 +0000 (22:00 +0800)
committerDavid S. Miller <davem@davemloft.net>
Mon, 29 Nov 2021 14:26:17 +0000 (14:26 +0000)
This patch encapsulates the process code for queue to qset config of two
mode(tc based and vnet based) into two function, for making code more
concise.

Signed-off-by: Guangbin Huang <huangguangbin2@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c

index 1afd305..3edbfc8 100644 (file)
@@ -916,38 +916,63 @@ static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
        return 0;
 }
 
-static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
+static int hclge_tm_pri_q_qs_cfg_tc_base(struct hclge_dev *hdev)
 {
        struct hclge_vport *vport = hdev->vport;
+       u16 i, k;
        int ret;
-       u32 i, k;
 
-       if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
-               /* Cfg qs -> pri mapping, one by one mapping */
-               for (k = 0; k < hdev->num_alloc_vport; k++) {
-                       struct hnae3_knic_private_info *kinfo =
-                               &vport[k].nic.kinfo;
-
-                       for (i = 0; i < kinfo->tc_info.num_tc; i++) {
-                               ret = hclge_tm_qs_to_pri_map_cfg(
-                                       hdev, vport[k].qs_offset + i, i);
-                               if (ret)
-                                       return ret;
-                       }
+       /* Cfg qs -> pri mapping, one by one mapping */
+       for (k = 0; k < hdev->num_alloc_vport; k++) {
+               struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo;
+
+               for (i = 0; i < kinfo->tc_info.num_tc; i++) {
+                       ret = hclge_tm_qs_to_pri_map_cfg(hdev,
+                                                        vport[k].qs_offset + i,
+                                                        i);
+                       if (ret)
+                               return ret;
                }
-       } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) {
-               /* Cfg qs -> pri mapping,  qs = tc, pri = vf, 8 qs -> 1 pri */
-               for (k = 0; k < hdev->num_alloc_vport; k++)
-                       for (i = 0; i < HNAE3_MAX_TC; i++) {
-                               ret = hclge_tm_qs_to_pri_map_cfg(
-                                       hdev, vport[k].qs_offset + i, k);
-                               if (ret)
-                                       return ret;
-                       }
-       } else {
-               return -EINVAL;
        }
 
+       return 0;
+}
+
+static int hclge_tm_pri_q_qs_cfg_vnet_base(struct hclge_dev *hdev)
+{
+       struct hclge_vport *vport = hdev->vport;
+       u16 i, k;
+       int ret;
+
+       /* Cfg qs -> pri mapping,  qs = tc, pri = vf, 8 qs -> 1 pri */
+       for (k = 0; k < hdev->num_alloc_vport; k++)
+               for (i = 0; i < HNAE3_MAX_TC; i++) {
+                       ret = hclge_tm_qs_to_pri_map_cfg(hdev,
+                                                        vport[k].qs_offset + i,
+                                                        k);
+                       if (ret)
+                               return ret;
+               }
+
+       return 0;
+}
+
+static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
+{
+       struct hclge_vport *vport = hdev->vport;
+       int ret;
+       u32 i;
+
+       if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE)
+               ret = hclge_tm_pri_q_qs_cfg_tc_base(hdev);
+       else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
+               ret = hclge_tm_pri_q_qs_cfg_vnet_base(hdev);
+       else
+               return -EINVAL;
+
+       if (ret)
+               return ret;
+
        /* Cfg q -> qs mapping */
        for (i = 0; i < hdev->num_alloc_vport; i++) {
                ret = hclge_vport_q_to_qs_map(hdev, vport);