return 0;
}
-static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tc_cls_matchall_offload *f,
- bool ingress)
+static int
+mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_flow_block *flow_block,
+ struct tc_cls_matchall_offload *f)
{
switch (f->command) {
case TC_CLSMATCHALL_REPLACE:
- return mlxsw_sp_mall_replace(mlxsw_sp_port, f, ingress);
+ return mlxsw_sp_mall_replace(flow_block, f);
case TC_CLSMATCHALL_DESTROY:
- mlxsw_sp_mall_destroy(mlxsw_sp_port, f);
+ mlxsw_sp_mall_destroy(flow_block, f);
return 0;
default:
return -EOPNOTSUPP;
}
}
-static int mlxsw_sp_setup_tc_block_cb_matchall(enum tc_setup_type type,
- void *type_data,
- void *cb_priv, bool ingress)
+static int mlxsw_sp_setup_tc_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
{
- struct mlxsw_sp_port *mlxsw_sp_port = cb_priv;
-
- switch (type) {
- case TC_SETUP_CLSMATCHALL:
- if (!tc_cls_can_offload_and_chain0(mlxsw_sp_port->dev,
- type_data))
- return -EOPNOTSUPP;
+ struct mlxsw_sp_flow_block *flow_block = cb_priv;
- return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data,
- ingress);
- case TC_SETUP_CLSFLOWER:
- return 0;
- default:
+ if (mlxsw_sp_flow_block_disabled(flow_block))
return -EOPNOTSUPP;
- }
-}
-
-static int mlxsw_sp_setup_tc_block_cb_matchall_ig(enum tc_setup_type type,
- void *type_data,
- void *cb_priv)
-{
- return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
- cb_priv, true);
-}
-
-static int mlxsw_sp_setup_tc_block_cb_matchall_eg(enum tc_setup_type type,
- void *type_data,
- void *cb_priv)
-{
- return mlxsw_sp_setup_tc_block_cb_matchall(type, type_data,
- cb_priv, false);
-}
-
-static int mlxsw_sp_setup_tc_block_cb_flower(enum tc_setup_type type,
- void *type_data, void *cb_priv)
-{
- struct mlxsw_sp_flow_block *flow_block = cb_priv;
switch (type) {
case TC_SETUP_CLSMATCHALL:
- return 0;
+ return mlxsw_sp_setup_tc_cls_matchall(flow_block, type_data);
case TC_SETUP_CLSFLOWER:
- if (mlxsw_sp_flow_block_disabled(flow_block))
- return -EOPNOTSUPP;
-
return mlxsw_sp_setup_tc_cls_flower(flow_block, type_data);
default:
return -EOPNOTSUPP;
}
}
-static void mlxsw_sp_tc_block_flower_release(void *cb_priv)
+static void mlxsw_sp_tc_block_release(void *cb_priv)
{
struct mlxsw_sp_flow_block *flow_block = cb_priv;
static LIST_HEAD(mlxsw_sp_block_cb_list);
-static int
-mlxsw_sp_setup_tc_block_flower_bind(struct mlxsw_sp_port *mlxsw_sp_port,
- struct flow_block_offload *f, bool ingress)
+static int mlxsw_sp_setup_tc_block_bind(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f,
+ bool ingress)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_flow_block *flow_block;
bool register_block = false;
int err;
- block_cb = flow_block_cb_lookup(f->block,
- mlxsw_sp_setup_tc_block_cb_flower,
+ block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_setup_tc_block_cb,
mlxsw_sp);
if (!block_cb) {
flow_block = mlxsw_sp_flow_block_create(mlxsw_sp, f->net);
if (!flow_block)
return -ENOMEM;
- block_cb = flow_block_cb_alloc(mlxsw_sp_setup_tc_block_cb_flower,
+ block_cb = flow_block_cb_alloc(mlxsw_sp_setup_tc_block_cb,
mlxsw_sp, flow_block,
- mlxsw_sp_tc_block_flower_release);
+ mlxsw_sp_tc_block_release);
if (IS_ERR(block_cb)) {
mlxsw_sp_flow_block_destroy(flow_block);
err = PTR_ERR(block_cb);
return err;
}
-static void
-mlxsw_sp_setup_tc_block_flower_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
- struct flow_block_offload *f,
- bool ingress)
+static void mlxsw_sp_setup_tc_block_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f,
+ bool ingress)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_flow_block *flow_block;
struct flow_block_cb *block_cb;
int err;
- block_cb = flow_block_cb_lookup(f->block,
- mlxsw_sp_setup_tc_block_cb_flower,
+ block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_setup_tc_block_cb,
mlxsw_sp);
if (!block_cb)
return;
static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
struct flow_block_offload *f)
{
- struct flow_block_cb *block_cb;
- flow_setup_cb_t *cb;
bool ingress;
- int err;
- if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
- cb = mlxsw_sp_setup_tc_block_cb_matchall_ig;
+ if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
ingress = true;
- } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
- cb = mlxsw_sp_setup_tc_block_cb_matchall_eg;
+ else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
ingress = false;
- } else {
+ else
return -EOPNOTSUPP;
- }
f->driver_block_list = &mlxsw_sp_block_cb_list;
switch (f->command) {
case FLOW_BLOCK_BIND:
- if (flow_block_cb_is_busy(cb, mlxsw_sp_port,
- &mlxsw_sp_block_cb_list))
- return -EBUSY;
-
- block_cb = flow_block_cb_alloc(cb, mlxsw_sp_port,
- mlxsw_sp_port, NULL);
- if (IS_ERR(block_cb))
- return PTR_ERR(block_cb);
- err = mlxsw_sp_setup_tc_block_flower_bind(mlxsw_sp_port, f,
- ingress);
- if (err) {
- flow_block_cb_free(block_cb);
- return err;
- }
- flow_block_cb_add(block_cb, f);
- list_add_tail(&block_cb->driver_list, &mlxsw_sp_block_cb_list);
- return 0;
+ return mlxsw_sp_setup_tc_block_bind(mlxsw_sp_port, f, ingress);
case FLOW_BLOCK_UNBIND:
- mlxsw_sp_setup_tc_block_flower_unbind(mlxsw_sp_port,
- f, ingress);
- block_cb = flow_block_cb_lookup(f->block, cb, mlxsw_sp_port);
- if (!block_cb)
- return -ENOENT;
-
- flow_block_cb_remove(block_cb, f);
- list_del(&block_cb->driver_list);
+ mlxsw_sp_setup_tc_block_unbind(mlxsw_sp_port, f, ingress);
return 0;
default:
return -EOPNOTSUPP;
if (!enable) {
if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) ||
- mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block) ||
- !list_empty(&mlxsw_sp_port->mall_list)) {
+ mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) {
netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
return -EINVAL;
}
mlxsw_sp_port->mapping = *port_mapping;
mlxsw_sp_port->link.autoneg = 1;
INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
- INIT_LIST_HEAD(&mlxsw_sp_port->mall_list);
mlxsw_sp_port->pcpu_stats =
netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
* the same localport can have
* different mapping.
*/
- struct list_head mall_list;
struct {
#define MLXSW_HW_STATS_UPDATE_TIME HZ
struct rtnl_link_stats64 stats;
/* spectrum_flow.c */
struct mlxsw_sp_flow_block {
struct list_head binding_list;
+ struct list_head mall_list;
struct mlxsw_sp_acl_ruleset *ruleset_zero;
struct mlxsw_sp *mlxsw_sp;
unsigned int rule_count;
extern const struct mlxsw_afk_ops mlxsw_sp2_afk_ops;
/* spectrum_matchall.c */
-int mlxsw_sp_mall_replace(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tc_cls_matchall_offload *f, bool ingress);
-void mlxsw_sp_mall_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+int mlxsw_sp_mall_replace(struct mlxsw_sp_flow_block *block,
+ struct tc_cls_matchall_offload *f);
+void mlxsw_sp_mall_destroy(struct mlxsw_sp_flow_block *block,
struct tc_cls_matchall_offload *f);
+int mlxsw_sp_mall_port_bind(struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_mall_port_unbind(struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port);
/* spectrum_flower.c */
int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
if (!block)
return NULL;
INIT_LIST_HEAD(&block->binding_list);
+ INIT_LIST_HEAD(&block->mall_list);
block->mlxsw_sp = mlxsw_sp;
block->net = net;
return block;
return -EOPNOTSUPP;
}
+ err = mlxsw_sp_mall_port_bind(block, mlxsw_sp_port);
+ if (err)
+ return err;
+
binding = kzalloc(sizeof(*binding), GFP_KERNEL);
- if (!binding)
- return -ENOMEM;
+ if (!binding) {
+ err = -ENOMEM;
+ goto err_binding_alloc;
+ }
binding->mlxsw_sp_port = mlxsw_sp_port;
binding->ingress = ingress;
err_ruleset_bind:
kfree(binding);
+err_binding_alloc:
+ mlxsw_sp_mall_port_unbind(block, mlxsw_sp_port);
+
return err;
}
mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, block, binding);
kfree(binding);
+
+ mlxsw_sp_mall_port_unbind(block, mlxsw_sp_port);
+
return 0;
}
};
static struct mlxsw_sp_mall_entry *
-mlxsw_sp_mall_entry_find(struct mlxsw_sp_port *port, unsigned long cookie)
+mlxsw_sp_mall_entry_find(struct mlxsw_sp_flow_block *block, unsigned long cookie)
{
struct mlxsw_sp_mall_entry *mall_entry;
- list_for_each_entry(mall_entry, &port->mall_list, list)
+ list_for_each_entry(mall_entry, &block->mall_list, list)
if (mall_entry->cookie == cookie)
return mall_entry;
}
}
-int mlxsw_sp_mall_replace(struct mlxsw_sp_port *mlxsw_sp_port,
- struct tc_cls_matchall_offload *f, bool ingress)
+int mlxsw_sp_mall_replace(struct mlxsw_sp_flow_block *block,
+ struct tc_cls_matchall_offload *f)
{
+ struct mlxsw_sp_flow_block_binding *binding;
struct mlxsw_sp_mall_entry *mall_entry;
__be16 protocol = f->common.protocol;
struct flow_action_entry *act;
int err;
if (!flow_offload_has_one_action(&f->rule->action)) {
- netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
+ NL_SET_ERR_MSG(f->common.extack, "Only singular actions are supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (f->common.chain_index) {
+ NL_SET_ERR_MSG(f->common.extack, "Only chain 0 is supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (mlxsw_sp_flow_block_is_mixed_bound(block)) {
+ NL_SET_ERR_MSG(f->common.extack, "Only not mixed bound blocks are supported");
return -EOPNOTSUPP;
}
if (!mall_entry)
return -ENOMEM;
mall_entry->cookie = f->cookie;
- mall_entry->ingress = ingress;
+ mall_entry->ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
act = &f->rule->action.entries[0];
} else if (act->id == FLOW_ACTION_SAMPLE &&
protocol == htons(ETH_P_ALL)) {
if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) {
- netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
+ NL_SET_ERR_MSG(f->common.extack, "Sample rate not supported");
err = -EOPNOTSUPP;
goto errout;
}
goto errout;
}
- err = mlxsw_sp_mall_port_rule_add(mlxsw_sp_port, mall_entry);
- if (err)
- goto errout;
+ list_for_each_entry(binding, &block->binding_list, list) {
+ err = mlxsw_sp_mall_port_rule_add(binding->mlxsw_sp_port,
+ mall_entry);
+ if (err)
+ goto rollback;
+ }
- list_add_tail(&mall_entry->list, &mlxsw_sp_port->mall_list);
+ block->rule_count++;
+ if (mall_entry->ingress)
+ block->egress_blocker_rule_count++;
+ else
+ block->ingress_blocker_rule_count++;
+ list_add_tail(&mall_entry->list, &block->mall_list);
return 0;
+rollback:
+ list_for_each_entry_continue_reverse(binding, &block->binding_list,
+ list)
+ mlxsw_sp_mall_port_rule_del(binding->mlxsw_sp_port, mall_entry);
errout:
kfree(mall_entry);
return err;
}
-void mlxsw_sp_mall_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+void mlxsw_sp_mall_destroy(struct mlxsw_sp_flow_block *block,
struct tc_cls_matchall_offload *f)
{
+ struct mlxsw_sp_flow_block_binding *binding;
struct mlxsw_sp_mall_entry *mall_entry;
- mall_entry = mlxsw_sp_mall_entry_find(mlxsw_sp_port, f->cookie);
+ mall_entry = mlxsw_sp_mall_entry_find(block, f->cookie);
if (!mall_entry) {
- netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
+ NL_SET_ERR_MSG(f->common.extack, "Entry not found");
return;
}
- mlxsw_sp_mall_port_rule_del(mlxsw_sp_port, mall_entry);
-
list_del(&mall_entry->list);
+ if (mall_entry->ingress)
+ block->egress_blocker_rule_count--;
+ else
+ block->ingress_blocker_rule_count--;
+ block->rule_count--;
+ list_for_each_entry(binding, &block->binding_list, list)
+ mlxsw_sp_mall_port_rule_del(binding->mlxsw_sp_port, mall_entry);
kfree_rcu(mall_entry, rcu); /* sample RX packets may be in-flight */
}
+
+int mlxsw_sp_mall_port_bind(struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_mall_entry *mall_entry;
+ int err;
+
+ list_for_each_entry(mall_entry, &block->mall_list, list) {
+ err = mlxsw_sp_mall_port_rule_add(mlxsw_sp_port, mall_entry);
+ if (err)
+ goto rollback;
+ }
+ return 0;
+
+rollback:
+ list_for_each_entry_continue_reverse(mall_entry, &block->mall_list,
+ list)
+ mlxsw_sp_mall_port_rule_del(mlxsw_sp_port, mall_entry);
+ return err;
+}
+
+void mlxsw_sp_mall_port_unbind(struct mlxsw_sp_flow_block *block,
+ struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_mall_entry *mall_entry;
+
+ list_for_each_entry(mall_entry, &block->mall_list, list)
+ mlxsw_sp_mall_port_rule_del(mlxsw_sp_port, mall_entry);
+}