{
struct mlx5_flow_table *ft = ft_prio->flow_table;
struct mlx5_ib_flow_handler *handler;
+ struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec;
const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
unsigned int spec_index;
- u32 action;
int err = 0;
if (!is_valid_attr(flow_attr))
}
spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
- action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
+ flow_act.action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
+ flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
handler->rule = mlx5_add_flow_rules(ft, spec,
- action,
- MLX5_FS_DEFAULT_FLOW_TAG,
- dst, 1);
+ &flow_act,
+ dst, 1);
if (IS_ERR(handler->rule)) {
err = PTR_ERR(handler->rule);
enum arfs_type type)
{
struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type];
+ struct mlx5_flow_act flow_act = {
+ .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
+ .encap_id = 0,
+ };
struct mlx5_flow_destination dest;
struct mlx5e_tir *tir = priv->indir_tir;
struct mlx5_flow_spec *spec;
}
arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, spec,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG,
+ &flow_act,
&dest, 1);
if (IS_ERR(arfs_t->default_rule)) {
err = PTR_ERR(arfs_t->default_rule);
static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
struct arfs_rule *arfs_rule)
{
+ struct mlx5_flow_act flow_act = {
+ .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
+ .encap_id = 0,
+ };
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
struct arfs_tuple *tuple = &arfs_rule->tuple;
struct mlx5_flow_handle *rule = NULL;
}
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn;
- rule = mlx5_add_flow_rules(ft, spec, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG,
- &dest, 1);
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: add rule(filter id=%d, rq idx=%d) failed, err=%d\n",
enum mlx5e_vlan_rule_type rule_type,
u16 vid, struct mlx5_flow_spec *spec)
{
+ struct mlx5_flow_act flow_act = {
+ .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
+ .encap_id = 0,
+ };
struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
struct mlx5_flow_destination dest;
struct mlx5_flow_handle **rule_p;
break;
}
- *rule_p = mlx5_add_flow_rules(ft, spec,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG,
- &dest, 1);
+ *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p);
u16 etype,
u8 proto)
{
+ struct mlx5_flow_act flow_act = {
+ .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
+ .encap_id = 0,
+ };
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
int err = 0;
MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
}
- rule = mlx5_add_flow_rules(ft, spec,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG,
- dest, 1);
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
struct mlx5e_l2_rule *ai, int type)
{
+ struct mlx5_flow_act flow_act = {
+ .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
+ .encap_id = 0,
+ };
struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
struct mlx5_flow_destination dest;
struct mlx5_flow_spec *spec;
break;
}
- ai->rule = mlx5_add_flow_rules(ft, spec,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG, &dest, 1);
+ ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(ai->rule)) {
netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
__func__, mv_dmac);
struct ethtool_rx_flow_spec *fs)
{
struct mlx5_flow_destination *dst = NULL;
+ struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec;
struct mlx5_flow_handle *rule;
int err = 0;
- u32 action;
spec = mlx5_vzalloc(sizeof(*spec));
if (!spec)
goto free;
if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
- action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
} else {
dst = kzalloc(sizeof(*dst), GFP_KERNEL);
if (!dst) {
dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dst->tir_num = priv->direct_tir[fs->ring_cookie].tirn;
- action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
}
spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
- rule = mlx5_add_flow_rules(ft, spec, action,
- MLX5_FS_DEFAULT_FLOW_TAG, dst, 1);
+ flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",
{
struct mlx5_core_dev *dev = priv->mdev;
struct mlx5_flow_destination dest = { 0 };
+ struct mlx5_flow_act flow_act = {
+ .action = action,
+ .flow_tag = flow_tag,
+ .encap_id = 0,
+ };
struct mlx5_fc *counter = NULL;
struct mlx5_flow_handle *rule;
bool table_created = false;
}
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- rule = mlx5_add_flow_rules(priv->fs.tc.t, spec,
- action, flow_tag,
- &dest, 1);
+ rule = mlx5_add_flow_rules(priv->fs.tc.t, spec, &flow_act, &dest, 1);
if (IS_ERR(rule))
goto err_add_rule;
int match_header = (is_zero_ether_addr(mac_c) ? 0 :
MLX5_MATCH_OUTER_HEADERS);
struct mlx5_flow_handle *flow_rule = NULL;
+ struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_destination dest;
struct mlx5_flow_spec *spec;
void *mv_misc = NULL;
"\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
dmac_v, dmac_c, vport);
spec->match_criteria_enable = match_header;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule =
mlx5_add_flow_rules(esw->fdb_table.fdb, spec,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- 0, &dest, 1);
+ &flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
esw_warn(esw->dev,
"FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
+ struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec;
int err = 0;
u8 *smac_v;
}
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
vport->ingress.allow_rule =
mlx5_add_flow_rules(vport->ingress.acl, spec,
- MLX5_FLOW_CONTEXT_ACTION_ALLOW,
- 0, NULL, 0);
+ &flow_act, NULL, 0);
if (IS_ERR(vport->ingress.allow_rule)) {
err = PTR_ERR(vport->ingress.allow_rule);
esw_warn(esw->dev,
}
memset(spec, 0, sizeof(*spec));
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
vport->ingress.drop_rule =
mlx5_add_flow_rules(vport->ingress.acl, spec,
- MLX5_FLOW_CONTEXT_ACTION_DROP,
- 0, NULL, 0);
+ &flow_act, NULL, 0);
if (IS_ERR(vport->ingress.drop_rule)) {
err = PTR_ERR(vport->ingress.drop_rule);
esw_warn(esw->dev,
static int esw_vport_egress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
+ struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_spec *spec;
int err = 0;
MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan);
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
vport->egress.allowed_vlan =
mlx5_add_flow_rules(vport->egress.acl, spec,
- MLX5_FLOW_CONTEXT_ACTION_ALLOW,
- 0, NULL, 0);
+ &flow_act, NULL, 0);
if (IS_ERR(vport->egress.allowed_vlan)) {
err = PTR_ERR(vport->egress.allowed_vlan);
esw_warn(esw->dev,
/* Drop others rule (star rule) */
memset(spec, 0, sizeof(*spec));
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
vport->egress.drop_rule =
mlx5_add_flow_rules(vport->egress.acl, spec,
- MLX5_FLOW_CONTEXT_ACTION_DROP,
- 0, NULL, 0);
+ &flow_act, NULL, 0);
if (IS_ERR(vport->egress.drop_rule)) {
err = PTR_ERR(vport->egress.drop_rule);
esw_warn(esw->dev,
struct mlx5_esw_flow_attr *attr)
{
struct mlx5_flow_destination dest[2] = {};
+ struct mlx5_flow_act flow_act = {0};
struct mlx5_fc *counter = NULL;
struct mlx5_flow_handle *rule;
void *misc;
- int action;
int i = 0;
if (esw->mode != SRIOV_OFFLOADS)
return ERR_PTR(-EOPNOTSUPP);
- action = attr->action;
+ flow_act.action = attr->action;
- if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest[i].vport_num = attr->out_rep->vport;
i++;
}
- if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(esw->dev, true);
if (IS_ERR(counter))
return ERR_CAST(counter);
MLX5_MATCH_MISC_PARAMETERS;
rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
- spec, action, 0, dest, i);
+ spec, &flow_act, dest, i);
if (IS_ERR(rule))
mlx5_fc_destroy(esw->dev, counter);
static struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
{
+ struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_destination dest;
struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_spec *spec;
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport_num = vport;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- 0, &dest, 1);
+ &flow_act, &dest, 1);
if (IS_ERR(flow_rule))
esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
out:
static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
{
+ struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_destination dest;
struct mlx5_flow_handle *flow_rule = NULL;
struct mlx5_flow_spec *spec;
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport_num = 0;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- 0, &dest, 1);
+ &flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err);
struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
{
+ struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_destination dest;
struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_spec *spec;
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dest.tir_num = tirn;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- 0, &dest, 1);
+ &flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
goto out;
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag);
MLX5_SET(flow_context, in_flow_context, action, fte->action);
+ MLX5_SET(flow_context, in_flow_context, encap_id, fte->encap_id);
in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
match_value);
memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param));
fg->id, ft->id);
}
-static struct fs_fte *alloc_fte(u8 action,
- u32 flow_tag,
+static struct fs_fte *alloc_fte(struct mlx5_flow_act *flow_act,
u32 *match_value,
unsigned int index)
{
memcpy(fte->val, match_value, sizeof(fte->val));
fte->node.type = FS_TYPE_FLOW_ENTRY;
- fte->flow_tag = flow_tag;
+ fte->flow_tag = flow_act->flow_tag;
fte->index = index;
- fte->action = action;
+ fte->action = flow_act->action;
+ fte->encap_id = flow_act->encap_id;
return fte;
}
/* prev is output, prev->next = new_fte */
static struct fs_fte *create_fte(struct mlx5_flow_group *fg,
u32 *match_value,
- u8 action,
- u32 flow_tag,
+ struct mlx5_flow_act *flow_act,
struct list_head **prev)
{
struct fs_fte *fte;
int index;
index = get_free_fte_index(fg, prev);
- fte = alloc_fte(action, flow_tag, match_value, index);
+ fte = alloc_fte(flow_act, match_value, index);
if (IS_ERR(fte))
return fte;
static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
u32 *match_value,
- u8 action,
- u32 flow_tag,
+ struct mlx5_flow_act *flow_act,
struct mlx5_flow_destination *dest,
int dest_num)
{
fs_for_each_fte(fte, fg) {
nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
if (compare_match_value(&fg->mask, match_value, &fte->val) &&
- (action & fte->action) && flow_tag == fte->flow_tag) {
+ (flow_act->action & fte->action) &&
+ flow_act->flow_tag == fte->flow_tag) {
int old_action = fte->action;
- fte->action |= action;
+ fte->action |= flow_act->action;
handle = add_rule_fte(fte, fg, dest, dest_num,
- old_action != action);
+ old_action != flow_act->action);
if (IS_ERR(handle)) {
fte->action = old_action;
goto unlock_fte;
goto unlock_fg;
}
- fte = create_fte(fg, match_value, action, flow_tag, &prev);
+ fte = create_fte(fg, match_value, flow_act, &prev);
if (IS_ERR(fte)) {
handle = (void *)fte;
goto unlock_fg;
static struct mlx5_flow_handle *
_mlx5_add_flow_rules(struct mlx5_flow_table *ft,
struct mlx5_flow_spec *spec,
- u32 action,
- u32 flow_tag,
+ struct mlx5_flow_act *flow_act,
struct mlx5_flow_destination *dest,
int dest_num)
+
{
struct mlx5_flow_group *g;
struct mlx5_flow_handle *rule;
int i;
for (i = 0; i < dest_num; i++) {
- if (!dest_is_valid(&dest[i], action, ft))
+ if (!dest_is_valid(&dest[i], flow_act->action, ft))
return ERR_PTR(-EINVAL);
}
g->mask.match_criteria,
spec->match_criteria)) {
rule = add_rule_fg(g, spec->match_value,
- action, flow_tag, dest, dest_num);
+ flow_act, dest, dest_num);
if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC)
goto unlock;
}
goto unlock;
}
- rule = add_rule_fg(g, spec->match_value,
- action, flow_tag, dest, dest_num);
+ rule = add_rule_fg(g, spec->match_value, flow_act, dest, dest_num);
if (IS_ERR(rule)) {
/* Remove assumes refcount > 0 and autogroup creates a group
* with a refcount = 0.
struct mlx5_flow_handle *
mlx5_add_flow_rules(struct mlx5_flow_table *ft,
struct mlx5_flow_spec *spec,
- u32 action,
- u32 flow_tag,
+ struct mlx5_flow_act *flow_act,
struct mlx5_flow_destination *dest,
int dest_num)
{
struct mlx5_flow_destination gen_dest;
struct mlx5_flow_table *next_ft = NULL;
struct mlx5_flow_handle *handle = NULL;
- u32 sw_action = action;
+ u32 sw_action = flow_act->action;
struct fs_prio *prio;
fs_get_obj(prio, ft->node.parent);
- if (action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
+ if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
if (!fwd_next_prio_supported(ft))
return ERR_PTR(-EOPNOTSUPP);
if (dest)
gen_dest.ft = next_ft;
dest = &gen_dest;
dest_num = 1;
- action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
} else {
mutex_unlock(&root->chain_lock);
return ERR_PTR(-EOPNOTSUPP);
}
}
- handle = _mlx5_add_flow_rules(ft, spec, action, flow_tag, dest,
- dest_num);
+ handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, dest_num);
if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
if (!IS_ERR_OR_NULL(handle) &&
u32 flow_tag;
u32 index;
u32 action;
+ u32 encap_id;
enum fs_fte_status status;
struct mlx5_fc *counter;
};
mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in);
void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
+struct mlx5_flow_act {
+ u32 action;
+ u32 flow_tag;
+ u32 encap_id;
+};
+
/* Single destination per rule.
* Group ID is implied by the match criteria.
*/
struct mlx5_flow_handle *
mlx5_add_flow_rules(struct mlx5_flow_table *ft,
struct mlx5_flow_spec *spec,
- u32 action,
- u32 flow_tag,
+ struct mlx5_flow_act *flow_act,
struct mlx5_flow_destination *dest,
int dest_num);
void mlx5_del_flow_rules(struct mlx5_flow_handle *fr);