net/mlx5e: HTB, move stats and max_sqs to priv
authorMoshe Tal <moshet@nvidia.com>
Wed, 16 Mar 2022 09:40:09 +0000 (11:40 +0200)
committerSaeed Mahameed <saeedm@nvidia.com>
Tue, 19 Jul 2022 20:32:51 +0000 (13:32 -0700)
Preparation for dynamic allocation of the HTB struct.
The statistics should be preserved even when the struct is de-allocated.

Signed-off-by: Moshe Tal <moshet@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Reviewed-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c

index 1222156..d2ed275 100644 (file)
@@ -902,8 +902,6 @@ struct mlx5e_scratchpad {
 struct mlx5e_htb {
        DECLARE_HASHTABLE(qos_tc2node, order_base_2(MLX5E_QOS_MAX_LEAF_NODES));
        DECLARE_BITMAP(qos_used_qids, MLX5E_QOS_MAX_LEAF_NODES);
-       struct mlx5e_sq_stats **qos_sq_stats;
-       u16 max_qos_sqs;
 };
 
 struct mlx5e_trap;
@@ -944,6 +942,8 @@ struct mlx5e_priv {
        struct mlx5e_channel_stats **channel_stats;
        struct mlx5e_channel_stats trap_stats;
        struct mlx5e_ptp_stats     ptp_stats;
+       struct mlx5e_sq_stats      **htb_qos_sq_stats;
+       u16                        htb_max_qos_sqs;
        u16                        stats_nch;
        u16                        max_nch;
        u8                         max_opened_tc;
index 9a61c44..6136cad 100644 (file)
@@ -213,11 +213,11 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs
 
        txq_ix = mlx5e_qid_from_qos(chs, node->qid);
 
-       WARN_ON(node->qid > priv->htb.max_qos_sqs);
-       if (node->qid == priv->htb.max_qos_sqs) {
+       WARN_ON(node->qid > priv->htb_max_qos_sqs);
+       if (node->qid == priv->htb_max_qos_sqs) {
                struct mlx5e_sq_stats *stats, **stats_list = NULL;
 
-               if (priv->htb.max_qos_sqs == 0) {
+               if (priv->htb_max_qos_sqs == 0) {
                        stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev),
                                              sizeof(*stats_list),
                                              GFP_KERNEL);
@@ -230,12 +230,12 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs
                        return -ENOMEM;
                }
                if (stats_list)
-                       WRITE_ONCE(priv->htb.qos_sq_stats, stats_list);
-               WRITE_ONCE(priv->htb.qos_sq_stats[node->qid], stats);
-               /* Order max_qos_sqs increment after writing the array pointer.
+                       WRITE_ONCE(priv->htb_qos_sq_stats, stats_list);
+               WRITE_ONCE(priv->htb_qos_sq_stats[node->qid], stats);
+               /* Order htb_max_qos_sqs increment after writing the array pointer.
                 * Pairs with smp_load_acquire in en_stats.c.
                 */
-               smp_store_release(&priv->htb.max_qos_sqs, priv->htb.max_qos_sqs + 1);
+               smp_store_release(&priv->htb_max_qos_sqs, priv->htb_max_qos_sqs + 1);
        }
 
        ix = node->qid % params->num_channels;
@@ -259,7 +259,7 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs
                goto err_free_sq;
        err = mlx5e_open_txqsq(c, priv->tisn[c->lag_port][0], txq_ix, params,
                               &param_sq, sq, 0, node->hw_id,
-                              priv->htb.qos_sq_stats[node->qid]);
+                              priv->htb_qos_sq_stats[node->qid]);
        if (err)
                goto err_close_cq;
 
index 700bca0..fed24b5 100644 (file)
@@ -5372,9 +5372,9 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
        mutex_unlock(&priv->state_lock);
        free_cpumask_var(priv->scratchpad.cpumask);
 
-       for (i = 0; i < priv->htb.max_qos_sqs; i++)
-               kfree(priv->htb.qos_sq_stats[i]);
-       kvfree(priv->htb.qos_sq_stats);
+       for (i = 0; i < priv->htb_max_qos_sqs; i++)
+               kfree(priv->htb_qos_sq_stats[i]);
+       kvfree(priv->htb_qos_sq_stats);
 
        memset(priv, 0, sizeof(*priv));
 }
index 1e87bb2..1a88406 100644 (file)
@@ -474,8 +474,8 @@ static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
        int i;
 
        /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
-       max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
-       stats = READ_ONCE(priv->htb.qos_sq_stats);
+       max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
+       stats = READ_ONCE(priv->htb_qos_sq_stats);
 
        for (i = 0; i < max_qos_sqs; i++) {
                mlx5e_stats_grp_sw_update_stats_sq(s, READ_ONCE(stats[i]));
@@ -2184,13 +2184,13 @@ static const struct counter_desc qos_sq_stats_desc[] = {
 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)
 {
        /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
-       return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb.max_qos_sqs);
+       return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb_max_qos_sqs);
 }
 
 static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)
 {
        /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
-       u16 max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
+       u16 max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
        int i, qid;
 
        for (qid = 0; qid < max_qos_sqs; qid++)
@@ -2208,8 +2208,8 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)
        int i, qid;
 
        /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
-       max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
-       stats = READ_ONCE(priv->htb.qos_sq_stats);
+       max_qos_sqs = smp_load_acquire(&priv->htb_max_qos_sqs);
+       stats = READ_ONCE(priv->htb_qos_sq_stats);
 
        for (qid = 0; qid < max_qos_sqs; qid++) {
                struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]);