}
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params, u8 rq_type)
+ struct mlx5e_params *params)
{
- params->rq_wq_type = rq_type;
params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
static bool slow_pci_heuristic(struct mlx5_core_dev *mdev);
-static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
+static void mlx5e_set_rq_type(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
{
- u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) &&
+ params->rq_wq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) &&
!slow_pci_heuristic(mdev) &&
!params->xdp_prog && !MLX5_IPSEC_DEV(mdev) ?
MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
MLX5_WQ_TYPE_LINKED_LIST;
- mlx5e_init_rq_type_params(mdev, params, rq_type);
}
static void mlx5e_update_carrier(struct mlx5e_priv *priv)
bpf_prog_put(old_prog);
if (reset) /* change RQ type according to priv->xdp_prog */
- mlx5e_set_rq_params(priv->mdev, &priv->channels.params);
+ mlx5e_set_rq_type(priv->mdev, &priv->channels.params);
if (was_opened && reset)
mlx5e_open_locked(netdev);
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
/* RQ */
- mlx5e_set_rq_params(mdev, params);
+ mlx5e_set_rq_type(mdev, params);
+ mlx5e_init_rq_type_params(mdev, params);
/* HW LRO */
struct mlx5e_params *params)
{
/* Override RQ params as IPoIB supports only LINKED LIST RQ for now */
- mlx5e_init_rq_type_params(mdev, params, MLX5_WQ_TYPE_LINKED_LIST);
+ params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
+ mlx5e_init_rq_type_params(mdev, params);
/* RQ size in ipoib by default is 512 */
params->log_rq_size = is_kdump_kernel() ?