net/mlx5e: TC, Extract indr setup block checks to function
authorMaor Dickman <maord@nvidia.com>
Tue, 14 Mar 2023 05:42:32 +0000 (22:42 -0700)
committerJakub Kicinski <kuba@kernel.org>
Thu, 16 Mar 2023 05:12:09 +0000 (22:12 -0700)
In preparation for next patch which will add new check
if device block can be setup, extract all existing checks
to function to make it more readable and maintainable.

Signed-off-by: Maor Dickman <maord@nvidia.com>
Reviewed-by: Roi Dayan <roid@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Link: https://lore.kernel.org/r/20230314054234.267365-14-saeed@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c

index 8f7452d..b4af006 100644 (file)
@@ -426,39 +426,53 @@ static bool mlx5e_rep_macvlan_mode_supported(const struct net_device *dev)
        return macvlan->mode == MACVLAN_MODE_PASSTHRU;
 }
 
-static int
-mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch,
-                          struct mlx5e_rep_priv *rpriv,
-                          struct flow_block_offload *f,
-                          flow_setup_cb_t *setup_cb,
-                          void *data,
-                          void (*cleanup)(struct flow_block_cb *block_cb))
+static bool
+mlx5e_rep_check_indr_block_supported(struct mlx5e_rep_priv *rpriv,
+                                    struct net_device *netdev,
+                                    struct flow_block_offload *f)
 {
        struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-       bool is_ovs_int_port = netif_is_ovs_master(netdev);
-       struct mlx5e_rep_indr_block_priv *indr_priv;
-       struct flow_block_cb *block_cb;
 
-       if (!mlx5e_tc_tun_device_to_offload(priv, netdev) &&
-           !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev) &&
-           !is_ovs_int_port) {
-               if (!(netif_is_macvlan(netdev) && macvlan_dev_real_dev(netdev) == rpriv->netdev))
-                       return -EOPNOTSUPP;
+       if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
+           f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
+               return false;
+
+       if (mlx5e_tc_tun_device_to_offload(priv, netdev))
+               return true;
+
+       if (is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev)
+               return true;
+
+       if (netif_is_macvlan(netdev)) {
                if (!mlx5e_rep_macvlan_mode_supported(netdev)) {
                        netdev_warn(netdev, "Offloading ingress filter is supported only with macvlan passthru mode");
-                       return -EOPNOTSUPP;
+                       return false;
                }
+
+               if (macvlan_dev_real_dev(netdev) == rpriv->netdev)
+                       return true;
        }
 
-       if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
-           f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
-               return -EOPNOTSUPP;
+       if (netif_is_ovs_master(netdev) && f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
+           mlx5e_tc_int_port_supported(esw))
+               return true;
 
-       if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS && !is_ovs_int_port)
-               return -EOPNOTSUPP;
+       return false;
+}
+
+static int
+mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch,
+                          struct mlx5e_rep_priv *rpriv,
+                          struct flow_block_offload *f,
+                          flow_setup_cb_t *setup_cb,
+                          void *data,
+                          void (*cleanup)(struct flow_block_cb *block_cb))
+{
+       struct mlx5e_rep_indr_block_priv *indr_priv;
+       struct flow_block_cb *block_cb;
 
-       if (is_ovs_int_port && !mlx5e_tc_int_port_supported(esw))
+       if (!mlx5e_rep_check_indr_block_supported(rpriv, netdev, f))
                return -EOPNOTSUPP;
 
        f->unlocked_driver_cb = true;