From 3b5ff59fd851d8e8c7c3ba08b01011baffa60cb6 Mon Sep 17 00:00:00 2001 From: Rabie Loulou Date: Thu, 26 Apr 2018 16:45:41 +0300 Subject: [PATCH] net/mlx5: Adjustments for the activate LAG logic to run under sriov When HW lag is set/unset, roce must not be enabled on the port, as such we wrap such changes with roce enable/disable either directly or through re-creation of IB device. Currently, lag and sriov are mutually exclusive, so by definition this code doesn't run under sriov. Towards changing this exclusion, we need to make sure that roce will not be enabled on the eswitch manager port under sriov since this is requirement of the switchdev mode. We are going strict here and avoiding this all together under sriov. Signed-off-by: Rabie Loulou Reviewed-by: Or Gerlitz Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/lag.c | 33 +++++++++++++++++---------- 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c index 8c5c5e4..8127d90 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c @@ -34,6 +34,7 @@ #include #include #include "mlx5_core.h" +#include "eswitch.h" enum { MLX5_LAG_FLAG_BONDED = 1 << 0, @@ -257,13 +258,15 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) { struct mlx5_core_dev *dev0 = ldev->pf[0].dev; struct mlx5_core_dev *dev1 = ldev->pf[1].dev; + bool do_bond, sriov_enabled; struct lag_tracker tracker; int i; - bool do_bond; if (!dev0 || !dev1) return; + sriov_enabled = mlx5_sriov_is_enabled(dev0) || mlx5_sriov_is_enabled(dev1); + mutex_lock(&lag_mutex); tracker = ldev->tracker; mutex_unlock(&lag_mutex); @@ -271,26 +274,32 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) do_bond = tracker.is_bonded && ldev->allowed; if (do_bond && !mlx5_lag_is_bonded(ldev)) { - for (i = 0; i < MLX5_MAX_PORTS; i++) - mlx5_remove_dev_by_protocol(ldev->pf[i].dev, - MLX5_INTERFACE_PROTOCOL_IB); + if (!sriov_enabled) + for (i = 0; i < MLX5_MAX_PORTS; i++) + mlx5_remove_dev_by_protocol(ldev->pf[i].dev, + MLX5_INTERFACE_PROTOCOL_IB); mlx5_activate_lag(ldev, &tracker); - mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB); - mlx5_nic_vport_enable_roce(dev1); + if (!sriov_enabled) { + mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB); + mlx5_nic_vport_enable_roce(dev1); + } } else if (do_bond && mlx5_lag_is_bonded(ldev)) { mlx5_modify_lag(ldev, &tracker); } else if (!do_bond && mlx5_lag_is_bonded(ldev)) { - mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB); - mlx5_nic_vport_disable_roce(dev1); + if (!sriov_enabled) { + mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB); + mlx5_nic_vport_disable_roce(dev1); + } mlx5_deactivate_lag(ldev); - for (i = 0; i < MLX5_MAX_PORTS; i++) - if (ldev->pf[i].dev) - mlx5_add_dev_by_protocol(ldev->pf[i].dev, - MLX5_INTERFACE_PROTOCOL_IB); + if (!sriov_enabled) + for (i = 0; i < MLX5_MAX_PORTS; i++) + if (ldev->pf[i].dev) + mlx5_add_dev_by_protocol(ldev->pf[i].dev, + MLX5_INTERFACE_PROTOCOL_IB); } } -- 2.7.4