if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level))
flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[i].ft = esw->fdb_table.offloads.slow_fdb;
+ dest[i].ft = mlx5_eswitch_get_slow_fdb(esw);
}
static int
if (rep->vport == MLX5_VPORT_UPLINK)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
- flow_rule = mlx5_add_flow_rules(on_esw->fdb_table.offloads.slow_fdb,
+ flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(on_esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule))
esw_warn(on_esw->dev, "FDB: Failed to add send to vport rule err %ld\n",
mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
dest.vport.num = vport_num;
- flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule))
esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule vport %d, err %ld\n",
esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
spec, MLX5_VPORT_PF);
- flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow)) {
err = PTR_ERR(flow);
if (mlx5_ecpf_vport_exists(esw->dev)) {
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
- flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow)) {
err = PTR_ERR(flow);
peer_dev->priv.eswitch,
spec, vport->vport);
- flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow)) {
err = PTR_ERR(flow);
dest.vport.num = esw->manager_vport;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
outer_headers.dmac_47_16);
dmac_v[0] = 0x01;
- flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
fdb_chains_err:
mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
tc_miss_table_err:
- mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
+ mlx5_destroy_flow_table(mlx5_eswitch_get_slow_fdb(esw));
slow_fdb_err:
/* Holds true only as long as DMFS is the default */
mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS);
static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
{
- if (!esw->fdb_table.offloads.slow_fdb)
+ if (!mlx5_eswitch_get_slow_fdb(esw))
return;
esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
esw_chains_destroy(esw, esw_chains(esw));
mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
- mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
+ mlx5_destroy_flow_table(mlx5_eswitch_get_slow_fdb(esw));
/* Holds true only as long as DMFS is the default */
mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
MLX5_FLOW_STEERING_MODE_DMFS);