Currently, per vport table was used only for port mirroring actions.
However, sample action will also require a per vport table instance.
Generalize the vport table API to work with multiple namespaces where
each namespace manages its own vport table instance.
Signed-off-by: Chris Mi <cmi@nvidia.com>
Reviewed-by: Oz Shlomo <ozsh@nvidia.com>
Reviewed-by: Mark Bloch <mbloch@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
#include "eswitch.h"
-#define MLX5_ESW_VPORT_TABLE_SIZE 128
-#define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
-
/* This struct is used as a key to the hash table and we need it to be packed
* so hash result is consistent
*/
u16 prio;
u16 vport;
u16 vhca_id;
+ const struct esw_vport_tbl_namespace *vport_ns;
} __packed;
struct mlx5_vport_table {
};
static struct mlx5_flow_table *
-esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns)
+esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns,
+ const struct esw_vport_tbl_namespace *vport_ns)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_table *fdb;
- ft_attr.autogroup.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS;
- ft_attr.max_fte = MLX5_ESW_VPORT_TABLE_SIZE;
+ ft_attr.autogroup.max_num_groups = vport_ns->max_num_groups;
+ ft_attr.max_fte = vport_ns->max_fte;
ft_attr.prio = FDB_PER_VPORT;
+ ft_attr.flags = vport_ns->flags;
fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(fdb)) {
esw_warn(esw->dev, "Failed to create per vport FDB Table err %ld\n",
key->chain = attr->chain;
key->prio = attr->prio;
key->vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
+ key->vport_ns = attr->vport_ns;
return jhash(key, sizeof(*key), 0);
}
goto err_ns;
}
- fdb = esw_vport_tbl_create(esw, ns);
+ fdb = esw_vport_tbl_create(esw, ns, attr->vport_ns);
if (IS_ERR(fdb))
goto err_ns;
esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
+struct esw_vport_tbl_namespace {
+ int max_fte;
+ int max_num_groups;
+ u32 flags;
+};
+
struct mlx5_vport_tbl_attr {
u16 chain;
u16 prio;
u16 vport;
+ const struct esw_vport_tbl_namespace *vport_ns;
};
struct mlx5_flow_table *
#define MLX5_ESW_MISS_FLOWS (2)
#define UPLINK_REP_INDEX 0
+#define MLX5_ESW_VPORT_TBL_SIZE 128
+#define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
+
+static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
+ .max_fte = MLX5_ESW_VPORT_TBL_SIZE,
+ .max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS,
+ .flags = 0,
+};
+
static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
u16 vport_num)
{
fwd_attr.chain = attr->chain;
fwd_attr.prio = attr->prio;
fwd_attr.vport = esw_attr->in_rep->vport;
+ fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr);
} else {
fwd_attr.chain = attr->chain;
fwd_attr.prio = attr->prio;
fwd_attr.vport = esw_attr->in_rep->vport;
+ fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
fwd_fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr);
if (IS_ERR(fwd_fdb)) {
rule = ERR_CAST(fwd_fdb);
fwd_attr.chain = attr->chain;
fwd_attr.prio = attr->prio;
fwd_attr.vport = esw_attr->in_rep->vport;
+ fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
}
if (fwd_rule) {
attr.prio = 1;
mlx5_esw_for_all_vports(esw, i, vport) {
attr.vport = vport->vport;
+ attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
mlx5_esw_vporttbl_put(esw, &attr);
}
}
attr.prio = 1;
mlx5_esw_for_all_vports(esw, i, vport) {
attr.vport = vport->vport;
+ attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
fdb = mlx5_esw_vporttbl_get(esw, &attr);
if (IS_ERR(fdb))
goto out;