net/mlx5: E-Switch, properly handle ingress tagged packets on VST
authorMoshe Shemesh <moshe@nvidia.com>
Mon, 12 Dec 2022 08:42:15 +0000 (10:42 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 12 Jan 2023 10:59:11 +0000 (11:59 +0100)
[ Upstream commit 1f0ae22ab470946143485a02cc1cd7e05c0f9120 ]

Fix SRIOV VST mode behavior to insert cvlan when a guest tag is already
present in the frame. Previous VST mode behavior was to drop packets or
override existing tag, depending on the device version.

In this patch we fix this behavior by correctly building the HW steering
rule with a push vlan action, or for older devices we ask the FW to stack
the vlan when a vlan is already present.

Fixes: 07bab9502641 ("net/mlx5: E-Switch, Refactor eswitch ingress acl codes")
Fixes: dfcb1ed3c331 ("net/mlx5: E-Switch, Vport ingress/egress ACLs rules for VST mode")
Signed-off-by: Moshe Shemesh <moshe@nvidia.com>
Reviewed-by: Mark Bloch <mbloch@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
include/linux/mlx5/device.h
include/linux/mlx5/mlx5_ifc.h

index 60a7399..6b4c9ff 100644 (file)
@@ -67,6 +67,7 @@ static void esw_acl_egress_lgcy_groups_destroy(struct mlx5_vport *vport)
 int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
                              struct mlx5_vport *vport)
 {
+       bool vst_mode_steering = esw_vst_mode_is_steering(esw);
        struct mlx5_flow_destination drop_ctr_dst = {};
        struct mlx5_flow_destination *dst = NULL;
        struct mlx5_fc *drop_counter = NULL;
@@ -77,6 +78,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
         */
        int table_size = 2;
        int dest_num = 0;
+       int actions_flag;
        int err = 0;
 
        if (vport->egress.legacy.drop_counter) {
@@ -119,8 +121,11 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
                  vport->vport, vport->info.vlan, vport->info.qos);
 
        /* Allowed vlan rule */
+       actions_flag = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+       if (vst_mode_steering)
+               actions_flag |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
        err = esw_egress_acl_vlan_create(esw, vport, NULL, vport->info.vlan,
-                                        MLX5_FLOW_CONTEXT_ACTION_ALLOW);
+                                        actions_flag);
        if (err)
                goto out;
 
index b1a5199..093ed86 100644 (file)
@@ -139,11 +139,14 @@ static void esw_acl_ingress_lgcy_groups_destroy(struct mlx5_vport *vport)
 int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
                               struct mlx5_vport *vport)
 {
+       bool vst_mode_steering = esw_vst_mode_is_steering(esw);
        struct mlx5_flow_destination drop_ctr_dst = {};
        struct mlx5_flow_destination *dst = NULL;
        struct mlx5_flow_act flow_act = {};
        struct mlx5_flow_spec *spec = NULL;
        struct mlx5_fc *counter = NULL;
+       bool vst_check_cvlan = false;
+       bool vst_push_cvlan = false;
        /* The ingress acl table contains 4 groups
         * (2 active rules at the same time -
         *      1 allow rule from one of the first 3 groups.
@@ -203,7 +206,26 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
                goto out;
        }
 
-       if (vport->info.vlan || vport->info.qos)
+       if ((vport->info.vlan || vport->info.qos)) {
+               if (vst_mode_steering)
+                       vst_push_cvlan = true;
+               else if (!MLX5_CAP_ESW(esw->dev, vport_cvlan_insert_always))
+                       vst_check_cvlan = true;
+       }
+
+       if (vst_check_cvlan || vport->info.spoofchk)
+               spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+
+       /* Create ingress allow rule */
+       flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+       if (vst_push_cvlan) {
+               flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
+               flow_act.vlan[0].prio = vport->info.qos;
+               flow_act.vlan[0].vid = vport->info.vlan;
+               flow_act.vlan[0].ethtype = ETH_P_8021Q;
+       }
+
+       if (vst_check_cvlan)
                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
                                 outer_headers.cvlan_tag);
 
@@ -218,9 +240,6 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
                ether_addr_copy(smac_v, vport->info.mac);
        }
 
-       /* Create ingress allow rule */
-       spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-       flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
        vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec,
                                                        &flow_act, NULL, 0);
        if (IS_ERR(vport->ingress.allow_rule)) {
@@ -232,6 +251,9 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
                goto out;
        }
 
+       if (!vst_check_cvlan && !vport->info.spoofchk)
+               goto out;
+
        memset(&flow_act, 0, sizeof(flow_act));
        flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
        /* Attach drop flow counter */
@@ -257,7 +279,8 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
        return 0;
 
 out:
-       esw_acl_ingress_lgcy_cleanup(esw, vport);
+       if (err)
+               esw_acl_ingress_lgcy_cleanup(esw, vport);
        kvfree(spec);
        return err;
 }
index 51a8cec..2b92780 100644 (file)
@@ -160,10 +160,17 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
                         esw_vport_context.vport_cvlan_strip, 1);
 
        if (set_flags & SET_VLAN_INSERT) {
-               /* insert only if no vlan in packet */
-               MLX5_SET(modify_esw_vport_context_in, in,
-                        esw_vport_context.vport_cvlan_insert, 1);
-
+               if (MLX5_CAP_ESW(dev, vport_cvlan_insert_always)) {
+                       /* insert either if vlan exist in packet or not */
+                       MLX5_SET(modify_esw_vport_context_in, in,
+                                esw_vport_context.vport_cvlan_insert,
+                                MLX5_VPORT_CVLAN_INSERT_ALWAYS);
+               } else {
+                       /* insert only if no vlan in packet */
+                       MLX5_SET(modify_esw_vport_context_in, in,
+                                esw_vport_context.vport_cvlan_insert,
+                                MLX5_VPORT_CVLAN_INSERT_WHEN_NO_CVLAN);
+               }
                MLX5_SET(modify_esw_vport_context_in, in,
                         esw_vport_context.cvlan_pcp, qos);
                MLX5_SET(modify_esw_vport_context_in, in,
@@ -773,6 +780,7 @@ static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw,
 
 static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
 {
+       bool vst_mode_steering = esw_vst_mode_is_steering(esw);
        u16 vport_num = vport->vport;
        int flags;
        int err;
@@ -802,8 +810,9 @@ static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
 
        flags = (vport->info.vlan || vport->info.qos) ?
                SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
-       modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan,
-                              vport->info.qos, flags);
+       if (esw->mode == MLX5_ESWITCH_OFFLOADS || !vst_mode_steering)
+               modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan,
+                                      vport->info.qos, flags);
 
        return 0;
 }
@@ -1846,6 +1855,7 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
                                  u16 vport, u16 vlan, u8 qos, u8 set_flags)
 {
        struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
+       bool vst_mode_steering = esw_vst_mode_is_steering(esw);
        int err = 0;
 
        if (IS_ERR(evport))
@@ -1853,9 +1863,11 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
        if (vlan > 4095 || qos > 7)
                return -EINVAL;
 
-       err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
-       if (err)
-               return err;
+       if (esw->mode == MLX5_ESWITCH_OFFLOADS || !vst_mode_steering) {
+               err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
+               if (err)
+                       return err;
+       }
 
        evport->info.vlan = vlan;
        evport->info.qos = qos;
index 2c74441..0e2c9e6 100644 (file)
@@ -505,6 +505,12 @@ static inline bool mlx5_esw_qos_enabled(struct mlx5_eswitch *esw)
        return esw->qos.enabled;
 }
 
+static inline bool esw_vst_mode_is_steering(struct mlx5_eswitch *esw)
+{
+       return (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, pop_vlan) &&
+               MLX5_CAP_ESW_INGRESS_ACL(esw->dev, push_vlan));
+}
+
 static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev,
                                                       u8 vlan_depth)
 {
index 66eaf0a..3e72133 100644 (file)
@@ -1075,6 +1075,11 @@ enum {
 };
 
 enum {
+       MLX5_VPORT_CVLAN_INSERT_WHEN_NO_CVLAN  = 0x1,
+       MLX5_VPORT_CVLAN_INSERT_ALWAYS         = 0x3,
+};
+
+enum {
        MLX5_L3_PROT_TYPE_IPV4          = 0,
        MLX5_L3_PROT_TYPE_IPV6          = 1,
 };
index cd9d1c9..49ea000 100644 (file)
@@ -822,7 +822,8 @@ struct mlx5_ifc_e_switch_cap_bits {
        u8         vport_svlan_insert[0x1];
        u8         vport_cvlan_insert_if_not_exist[0x1];
        u8         vport_cvlan_insert_overwrite[0x1];
-       u8         reserved_at_5[0x2];
+       u8         reserved_at_5[0x1];
+       u8         vport_cvlan_insert_always[0x1];
        u8         esw_shared_ingress_acl[0x1];
        u8         esw_uplink_ingress_acl[0x1];
        u8         root_ft_on_other_esw[0x1];