net/mlx5e: Implement CT entry update
authorVlad Buslov <vladbu@nvidia.com>
Thu, 1 Dec 2022 15:56:10 +0000 (16:56 +0100)
committerSaeed Mahameed <saeedm@nvidia.com>
Sat, 18 Feb 2023 09:01:33 +0000 (01:01 -0800)
With support for UDP NEW offload the flow_table may now send updates for
existing flows. Support properly replacing existing entries by updating
flow restore_cookie and replacing the rule with new one with the same match
but new mod_hdr action that sets updated ctinfo.

Signed-off-by: Vlad Buslov <vladbu@nvidia.com>
Reviewed-by: Oz Shlomo <ozsh@nvidia.com>
Reviewed-by: Paul Blakey <paulb@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c

index 193562c..a7e0ab6 100644 (file)
@@ -871,6 +871,68 @@ err_attr:
        return err;
 }
 
+static int
+mlx5_tc_ct_entry_replace_rule(struct mlx5_tc_ct_priv *ct_priv,
+                             struct flow_rule *flow_rule,
+                             struct mlx5_ct_entry *entry,
+                             bool nat, u8 zone_restore_id)
+{
+       struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat];
+       struct mlx5_flow_attr *attr = zone_rule->attr, *old_attr;
+       struct mlx5e_mod_hdr_handle *mh;
+       struct mlx5_ct_fs_rule *rule;
+       struct mlx5_flow_spec *spec;
+       int err;
+
+       spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+       if (!spec)
+               return -ENOMEM;
+
+       old_attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
+       if (!old_attr) {
+               err = -ENOMEM;
+               goto err_attr;
+       }
+       *old_attr = *attr;
+
+       err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule, &mh, zone_restore_id,
+                                             nat, mlx5_tc_ct_entry_has_nat(entry));
+       if (err) {
+               ct_dbg("Failed to create ct entry mod hdr");
+               goto err_mod_hdr;
+       }
+
+       mlx5_tc_ct_set_tuple_match(ct_priv, spec, flow_rule);
+       mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->tuple.zone, MLX5_CT_ZONE_MASK);
+
+       rule = ct_priv->fs_ops->ct_rule_add(ct_priv->fs, spec, attr, flow_rule);
+       if (IS_ERR(rule)) {
+               err = PTR_ERR(rule);
+               ct_dbg("Failed to add replacement ct entry rule, nat: %d", nat);
+               goto err_rule;
+       }
+
+       ct_priv->fs_ops->ct_rule_del(ct_priv->fs, zone_rule->rule);
+       zone_rule->rule = rule;
+       mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, old_attr, zone_rule->mh);
+       zone_rule->mh = mh;
+
+       kfree(old_attr);
+       kvfree(spec);
+       ct_dbg("Replaced ct entry rule in zone %d", entry->tuple.zone);
+
+       return 0;
+
+err_rule:
+       mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, mh);
+       mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
+err_mod_hdr:
+       kfree(old_attr);
+err_attr:
+       kvfree(spec);
+       return err;
+}
+
 static bool
 mlx5_tc_ct_entry_valid(struct mlx5_ct_entry *entry)
 {
@@ -1066,6 +1128,52 @@ err_orig:
 }
 
 static int
+mlx5_tc_ct_entry_replace_rules(struct mlx5_tc_ct_priv *ct_priv,
+                              struct flow_rule *flow_rule,
+                              struct mlx5_ct_entry *entry,
+                              u8 zone_restore_id)
+{
+       int err;
+
+       err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, false,
+                                           zone_restore_id);
+       if (err)
+               return err;
+
+       err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, true,
+                                           zone_restore_id);
+       if (err)
+               mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
+       return err;
+}
+
+static int
+mlx5_tc_ct_block_flow_offload_replace(struct mlx5_ct_ft *ft, struct flow_rule *flow_rule,
+                                     struct mlx5_ct_entry *entry, unsigned long cookie)
+{
+       struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
+       int err;
+
+       err = mlx5_tc_ct_entry_replace_rules(ct_priv, flow_rule, entry, ft->zone_restore_id);
+       if (!err)
+               return 0;
+
+       /* If failed to update the entry, then look it up again under ht_lock
+        * protection and properly delete it.
+        */
+       spin_lock_bh(&ct_priv->ht_lock);
+       entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params);
+       if (entry) {
+               rhashtable_remove_fast(&ft->ct_entries_ht, &entry->node, cts_ht_params);
+               spin_unlock_bh(&ct_priv->ht_lock);
+               mlx5_tc_ct_entry_put(entry);
+       } else {
+               spin_unlock_bh(&ct_priv->ht_lock);
+       }
+       return err;
+}
+
+static int
 mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
                                  struct flow_cls_offload *flow)
 {
@@ -1087,9 +1195,17 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
        spin_lock_bh(&ct_priv->ht_lock);
        entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params);
        if (entry && refcount_inc_not_zero(&entry->refcnt)) {
+               if (entry->restore_cookie == meta_action->ct_metadata.cookie) {
+                       spin_unlock_bh(&ct_priv->ht_lock);
+                       mlx5_tc_ct_entry_put(entry);
+                       return -EEXIST;
+               }
+               entry->restore_cookie = meta_action->ct_metadata.cookie;
                spin_unlock_bh(&ct_priv->ht_lock);
+
+               err = mlx5_tc_ct_block_flow_offload_replace(ft, flow_rule, entry, cookie);
                mlx5_tc_ct_entry_put(entry);
-               return -EEXIST;
+               return err;
        }
        spin_unlock_bh(&ct_priv->ht_lock);