From 9ef9c640f4c5376189d8ca70f51a50a3d0b16914 Mon Sep 17 00:00:00 2001 From: Aviv Heller Date: Sun, 18 Sep 2016 20:48:01 +0300 Subject: [PATCH] IB/mlx5: Merge vports flow steering during LAG This is done in two steps: 1) Issuing CREATE_VPORT_LAG in order to have Ethernet traffic from both ports arriving on PF0 root flowtable, so we will be able to catch all raw-eth traffic on PF0. 2) Creation of LAG demux flowtable in order to direct all non-raw-eth traffic back to its source port, assuring that normal Ethernet traffic "jumps" to the root flowtable of its RX port (non-LAG behavior). Signed-off-by: Aviv Heller Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx5/main.c | 49 ++++++++++++++++++++++++++++++++++++ drivers/infiniband/hw/mlx5/mlx5_ib.h | 1 + 2 files changed, 50 insertions(+) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index ed038b7..70e7c8d 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -2703,6 +2703,47 @@ static void get_dev_fw_str(struct ib_device *ibdev, char *str, fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev)); } +static int mlx5_roce_lag_init(struct mlx5_ib_dev *dev) +{ + struct mlx5_core_dev *mdev = dev->mdev; + struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev, + MLX5_FLOW_NAMESPACE_LAG); + struct mlx5_flow_table *ft; + int err; + + if (!ns || !mlx5_lag_is_active(mdev)) + return 0; + + err = mlx5_cmd_create_vport_lag(mdev); + if (err) + return err; + + ft = mlx5_create_lag_demux_flow_table(ns, 0, 0); + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + goto err_destroy_vport_lag; + } + + dev->flow_db.lag_demux_ft = ft; + return 0; + +err_destroy_vport_lag: + mlx5_cmd_destroy_vport_lag(mdev); + return err; +} + +static void mlx5_roce_lag_cleanup(struct mlx5_ib_dev *dev) +{ + struct mlx5_core_dev *mdev = dev->mdev; + + if (dev->flow_db.lag_demux_ft) { + mlx5_destroy_flow_table(dev->flow_db.lag_demux_ft); + dev->flow_db.lag_demux_ft = NULL; + + mlx5_cmd_destroy_vport_lag(mdev); + } +} + static void mlx5_remove_roce_notifier(struct mlx5_ib_dev *dev) { if (dev->roce.nb.notifier_call) { @@ -2726,8 +2767,15 @@ static int mlx5_enable_roce(struct mlx5_ib_dev *dev) if (err) goto err_unregister_netdevice_notifier; + err = mlx5_roce_lag_init(dev); + if (err) + goto err_disable_roce; + return 0; +err_disable_roce: + mlx5_nic_vport_disable_roce(dev->mdev); + err_unregister_netdevice_notifier: mlx5_remove_roce_notifier(dev); return err; @@ -2735,6 +2783,7 @@ err_unregister_netdevice_notifier: static void mlx5_disable_roce(struct mlx5_ib_dev *dev) { + mlx5_roce_lag_cleanup(dev); mlx5_nic_vport_disable_roce(dev->mdev); } diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index f8a62a6..53e1f1d 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -158,6 +158,7 @@ struct mlx5_ib_flow_handler { struct mlx5_ib_flow_db { struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT]; struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS]; + struct mlx5_flow_table *lag_demux_ft; /* Protect flow steering bypass flow tables * when add/del flow rules. * only single add/removal of flow steering rule could be done -- 2.7.4