net/mlx5e: add mlx5e_rep_indr_setup_ft_cb support
authorwenxu <wenxu@ucloud.cn>
Wed, 25 Mar 2020 12:18:59 +0000 (20:18 +0800)
committerSaeed Mahameed <saeedm@mellanox.com>
Mon, 30 Mar 2020 06:42:27 +0000 (23:42 -0700)
Add mlx5e_rep_indr_setup_ft_cb to support indr block setup
in FT mode.
Both tc rules and flow table rules are of the same format,
It can re-use tc parsing for that, and move the flow table rules
to their steering domain(the specific chain_index), the indr
block offload in FT also follow this scenario.

Signed-off-by: wenxu <wenxu@ucloud.cn>
Reviewed-by: Vlad Buslov <vladbu@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c

index 4c947b1..2a0243e 100644 (file)
@@ -732,6 +732,52 @@ static int mlx5e_rep_indr_setup_tc_cb(enum tc_setup_type type,
        }
 }
 
+static int mlx5e_rep_indr_setup_ft_cb(enum tc_setup_type type,
+                                     void *type_data, void *indr_priv)
+{
+       struct mlx5e_rep_indr_block_priv *priv = indr_priv;
+       struct flow_cls_offload *f = type_data;
+       struct flow_cls_offload tmp;
+       struct mlx5e_priv *mpriv;
+       struct mlx5_eswitch *esw;
+       unsigned long flags;
+       int err;
+
+       mpriv = netdev_priv(priv->rpriv->netdev);
+       esw = mpriv->mdev->priv.eswitch;
+
+       flags = MLX5_TC_FLAG(EGRESS) |
+               MLX5_TC_FLAG(ESW_OFFLOAD) |
+               MLX5_TC_FLAG(FT_OFFLOAD);
+
+       switch (type) {
+       case TC_SETUP_CLSFLOWER:
+               memcpy(&tmp, f, sizeof(*f));
+
+               /* Re-use tc offload path by moving the ft flow to the
+                * reserved ft chain.
+                *
+                * FT offload can use prio range [0, INT_MAX], so we normalize
+                * it to range [1, mlx5_esw_chains_get_prio_range(esw)]
+                * as with tc, where prio 0 isn't supported.
+                *
+                * We only support chain 0 of FT offload.
+                */
+               if (!mlx5_esw_chains_prios_supported(esw) ||
+                   tmp.common.prio >= mlx5_esw_chains_get_prio_range(esw) ||
+                   tmp.common.chain_index)
+                       return -EOPNOTSUPP;
+
+               tmp.common.chain_index = mlx5_esw_chains_get_ft_chain(esw);
+               tmp.common.prio++;
+               err = mlx5e_rep_indr_offload(priv->netdev, &tmp, priv, flags);
+               memcpy(&f->stats, &tmp.stats, sizeof(f->stats));
+               return err;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
 static void mlx5e_rep_indr_block_unbind(void *cb_priv)
 {
        struct mlx5e_rep_indr_block_priv *indr_priv = cb_priv;
@@ -809,6 +855,9 @@ int mlx5e_rep_indr_setup_cb(struct net_device *netdev, void *cb_priv,
        case TC_SETUP_BLOCK:
                return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data,
                                                  mlx5e_rep_indr_setup_tc_cb);
+       case TC_SETUP_FT:
+               return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data,
+                                                 mlx5e_rep_indr_setup_ft_cb);
        default:
                return -EOPNOTSUPP;
        }