net/mlx5e: Update restore chain id for slow path packets
authorPaul Blakey <paulb@nvidia.com>
Wed, 26 Oct 2022 13:51:43 +0000 (14:51 +0100)
committerJakub Kicinski <kuba@kernel.org>
Thu, 27 Oct 2022 18:06:55 +0000 (11:06 -0700)
Currently encap slow path rules just forward to software without
setting the chain id miss register, so driver doesn't restore
the chain, and packets hitting this rule will restart from tc chain
0 instead of continuing to the chain the encap rule was on.

Fix this by setting the chain id miss register to the chain id mapping.

Fixes: 8f1e0b97cc70 ("net/mlx5: E-Switch, Mark miss packets with new chain id mapping")
Signed-off-by: Paul Blakey <paulb@nvidia.com>
Reviewed-by: Oz Shlomo <ozsh@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Link: https://lore.kernel.org/r/20221026135153.154807-6-saeed@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c

index 10c9a8a79d0051d44a8a82f85d2ccfaf64d98b35..2e42d7c5451e916aa691dfe00d8f953ab04e3787 100644 (file)
@@ -96,6 +96,7 @@ struct mlx5e_tc_flow {
        struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
        struct mlx5e_tc_flow *peer_flow;
        struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */
+       struct mlx5e_mod_hdr_handle *slow_mh; /* attached mod header instance for slow path */
        struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
        struct list_head hairpin; /* flows sharing the same hairpin */
        struct list_head peer;    /* flows with peer flow */
@@ -111,6 +112,7 @@ struct mlx5e_tc_flow {
        struct completion del_hw_done;
        struct mlx5_flow_attr *attr;
        struct list_head attrs;
+       u32 chain_mapping;
 };
 
 struct mlx5_flow_handle *
index 70a7a61f97087a22991448978af56164b2353fb5..2cceace36c7763ef43015aefbbefa71650783b27 100644 (file)
@@ -1405,8 +1405,13 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
                              struct mlx5e_tc_flow *flow,
                              struct mlx5_flow_spec *spec)
 {
+       struct mlx5e_tc_mod_hdr_acts mod_acts = {};
+       struct mlx5e_mod_hdr_handle *mh = NULL;
        struct mlx5_flow_attr *slow_attr;
        struct mlx5_flow_handle *rule;
+       bool fwd_and_modify_cap;
+       u32 chain_mapping = 0;
+       int err;
 
        slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
        if (!slow_attr)
@@ -1417,13 +1422,56 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
        slow_attr->esw_attr->split_count = 0;
        slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
 
+       fwd_and_modify_cap = MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table);
+       if (!fwd_and_modify_cap)
+               goto skip_restore;
+
+       err = mlx5_chains_get_chain_mapping(esw_chains(esw), flow->attr->chain, &chain_mapping);
+       if (err)
+               goto err_get_chain;
+
+       err = mlx5e_tc_match_to_reg_set(esw->dev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB,
+                                       CHAIN_TO_REG, chain_mapping);
+       if (err)
+               goto err_reg_set;
+
+       mh = mlx5e_mod_hdr_attach(esw->dev, get_mod_hdr_table(flow->priv, flow),
+                                 MLX5_FLOW_NAMESPACE_FDB, &mod_acts);
+       if (IS_ERR(mh)) {
+               err = PTR_ERR(mh);
+               goto err_attach;
+       }
+
+       slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+       slow_attr->modify_hdr = mlx5e_mod_hdr_get(mh);
+
+skip_restore:
        rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
-       if (!IS_ERR(rule))
-               flow_flag_set(flow, SLOW);
+       if (IS_ERR(rule)) {
+               err = PTR_ERR(rule);
+               goto err_offload;
+       }
 
+       flow->slow_mh = mh;
+       flow->chain_mapping = chain_mapping;
+       flow_flag_set(flow, SLOW);
+
+       mlx5e_mod_hdr_dealloc(&mod_acts);
        kfree(slow_attr);
 
        return rule;
+
+err_offload:
+       if (fwd_and_modify_cap)
+               mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), mh);
+err_attach:
+err_reg_set:
+       if (fwd_and_modify_cap)
+               mlx5_chains_put_chain_mapping(esw_chains(esw), chain_mapping);
+err_get_chain:
+       mlx5e_mod_hdr_dealloc(&mod_acts);
+       kfree(slow_attr);
+       return ERR_PTR(err);
 }
 
 void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
@@ -1441,7 +1489,17 @@ void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
        slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
        slow_attr->esw_attr->split_count = 0;
        slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
+       if (flow->slow_mh) {
+               slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+               slow_attr->modify_hdr = mlx5e_mod_hdr_get(flow->slow_mh);
+       }
        mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
+       if (flow->slow_mh) {
+               mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), flow->slow_mh);
+               mlx5_chains_put_chain_mapping(esw_chains(esw), flow->chain_mapping);
+               flow->chain_mapping = 0;
+               flow->slow_mh = NULL;
+       }
        flow_flag_clear(flow, SLOW);
        kfree(slow_attr);
 }