net/mlx5e: Distribute RSS table among all RX rings
authorTariq Toukan <tariqt@mellanox.com>
Wed, 7 Jun 2017 10:55:34 +0000 (13:55 +0300)
committerSaeed Mahameed <saeedm@mellanox.com>
Sun, 3 Sep 2017 03:34:09 +0000 (06:34 +0300)
In default, uniformly distribute the RSS indirection table entries
among all RX rings, rather than restricting this only to the rings
on the close NUMA node. irqbalancer would anyway dynamically override
the default affinities set to the RX rings.
This gives better multi-stream performance and CPU util.

Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c

index 1388a1e..8b7d83b 100644 (file)
@@ -925,8 +925,7 @@ void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
 void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
 void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
 
-void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
-                                  u32 *indirection_rqt, int len,
+void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
                                   int num_channels);
 int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
 
index 6127e0d..d12e9fc 100644 (file)
@@ -663,8 +663,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
        new_channels.params = priv->channels.params;
        new_channels.params.num_channels = count;
        if (!netif_is_rxfh_configured(priv->netdev))
-               mlx5e_build_default_indir_rqt(priv->mdev,
-                                             new_channels.params.indirection_rqt,
+               mlx5e_build_default_indir_rqt(new_channels.params.indirection_rqt,
                                              MLX5E_INDIR_RQT_SIZE, count);
 
        if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
index 20f3413..7706860 100644 (file)
@@ -3833,22 +3833,11 @@ u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
               2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
 }
 
-void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
-                                  u32 *indirection_rqt, int len,
+void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
                                   int num_channels)
 {
-       int node = mdev->priv.numa_node;
-       int node_num_of_cores;
        int i;
 
-       if (node == -1)
-               node = first_online_node;
-
-       node_num_of_cores = cpumask_weight(cpumask_of_node(node));
-
-       if (node_num_of_cores)
-               num_channels = min_t(int, num_channels, node_num_of_cores);
-
        for (i = 0; i < len; i++)
                indirection_rqt[i] = i % num_channels;
 }
@@ -3987,7 +3976,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
        /* RSS */
        params->rss_hfunc = ETH_RSS_HASH_XOR;
        netdev_rss_key_fill(params->toeplitz_hash_key, sizeof(params->toeplitz_hash_key));
-       mlx5e_build_default_indir_rqt(mdev, params->indirection_rqt,
+       mlx5e_build_default_indir_rqt(params->indirection_rqt,
                                      MLX5E_INDIR_RQT_SIZE, max_channels);
 }