net/mlx5: Implement devlink port function cmds to control ipsec_packet
authorDima Chumak <dchumak@nvidia.com>
Fri, 25 Aug 2023 06:28:36 +0000 (23:28 -0700)
committerJakub Kicinski <kuba@kernel.org>
Mon, 28 Aug 2023 00:08:45 +0000 (17:08 -0700)
Implement devlink port function commands to enable / disable IPsec
packet offloads. This is used to control the IPsec capability of the
device.

When ipsec_offload is enabled for a VF, it prevents adding IPsec packet
offloads on the PF, because the two cannot be active simultaneously due
to HW constraints. Conversely, if there are any active IPsec packet
offloads on the PF, it's not allowed to enable ipsec_packet on a VF,
until PF IPsec offloads are cleared.

Signed-off-by: Dima Chumak <dchumak@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Link: https://lore.kernel.org/r/20230825062836.103744-9-saeed@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Documentation/networking/device_drivers/ethernet/mellanox/mlx5/switchdev.rst
drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c

index de51e55dcfe3329c806005b00d86efa2410948b5..b617e93d7c2c3973f526cb4e8da9c79c95b994f1 100644 (file)
@@ -200,6 +200,16 @@ IPsec capability enabled, any IPsec offloading is blocked on the PF.
 mlx5 driver support devlink port function attr mechanism to setup ipsec_crypto
 capability. (refer to Documentation/networking/devlink/devlink-port.rst)
 
+IPsec packet capability setup
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+User who wants mlx5 PCI VFs to be able to perform IPsec packet offloading need
+to explicitly enable the VF ipsec_packet capability. Enabling IPsec capability
+for VFs is supported starting with ConnectX6dx devices and above. When a VF has
+IPsec capability enabled, any IPsec offloading is blocked on the PF.
+
+mlx5 driver support devlink port function attr mechanism to setup ipsec_packet
+capability. (refer to Documentation/networking/devlink/devlink-port.rst)
+
 SF state setup
 --------------
 
index 12205e913417be13b2ad3649fd9e756c2765964a..d8e739cbcbced12a23b5c43dde27232dd8965285 100644 (file)
@@ -95,6 +95,8 @@ static const struct devlink_port_ops mlx5_esw_pf_vf_dl_port_ops = {
 #ifdef CONFIG_XFRM_OFFLOAD
        .port_fn_ipsec_crypto_get = mlx5_devlink_port_fn_ipsec_crypto_get,
        .port_fn_ipsec_crypto_set = mlx5_devlink_port_fn_ipsec_crypto_set,
+       .port_fn_ipsec_packet_get = mlx5_devlink_port_fn_ipsec_packet_get,
+       .port_fn_ipsec_packet_set = mlx5_devlink_port_fn_ipsec_packet_set,
 #endif /* CONFIG_XFRM_OFFLOAD */
 };
 
index 187fb5f2d0cb6689fb0714e158c41a6052ca8ee4..da10e04777cf74e5d059a49a348e380d807d32b3 100644 (file)
@@ -37,6 +37,7 @@ free:
 
 enum esw_vport_ipsec_offload {
        MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD,
+       MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD,
 };
 
 int mlx5_esw_ipsec_vf_offload_get(struct mlx5_core_dev *dev, struct mlx5_vport *vport)
@@ -55,6 +56,7 @@ int mlx5_esw_ipsec_vf_offload_get(struct mlx5_core_dev *dev, struct mlx5_vport *
 
        if (!ipsec_enabled) {
                vport->info.ipsec_crypto_enabled = false;
+               vport->info.ipsec_packet_enabled = false;
                return 0;
        }
 
@@ -69,6 +71,8 @@ int mlx5_esw_ipsec_vf_offload_get(struct mlx5_core_dev *dev, struct mlx5_vport *
        hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
        vport->info.ipsec_crypto_enabled =
                MLX5_GET(ipsec_cap, hca_cap, ipsec_crypto_offload);
+       vport->info.ipsec_packet_enabled =
+               MLX5_GET(ipsec_cap, hca_cap, ipsec_full_offload);
 free:
        kvfree(query_cap);
        return err;
@@ -143,6 +147,9 @@ static int esw_ipsec_vf_set_bytype(struct mlx5_core_dev *dev, struct mlx5_vport
        case MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD:
                MLX5_SET(ipsec_cap, cap, ipsec_crypto_offload, enable);
                break;
+       case MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD:
+               MLX5_SET(ipsec_cap, cap, ipsec_full_offload, enable);
+               break;
        default:
                ret = -EOPNOTSUPP;
                goto free;
@@ -222,15 +229,28 @@ static int esw_ipsec_vf_offload_set_bytype(struct mlx5_eswitch *esw, struct mlx5
                err = esw_ipsec_vf_set_bytype(dev, vport, enable, type);
                if (err)
                        return err;
-               err = esw_ipsec_vf_set_generic(dev, vport->vport, enable);
+               err = mlx5_esw_ipsec_vf_offload_get(dev, vport);
                if (err)
                        return err;
+
+               /* The generic ipsec_offload cap can be disabled only if both
+                * ipsec_crypto_offload and ipsec_full_offload aren't enabled.
+                */
+               if (!vport->info.ipsec_crypto_enabled &&
+                   !vport->info.ipsec_packet_enabled) {
+                       err = esw_ipsec_vf_set_generic(dev, vport->vport, enable);
+                       if (err)
+                               return err;
+               }
        }
 
        switch (type) {
        case MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD:
                vport->info.ipsec_crypto_enabled = enable;
                break;
+       case MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD:
+               vport->info.ipsec_packet_enabled = enable;
+               break;
        default:
                return -EINVAL;
        }
@@ -301,9 +321,49 @@ free:
        return err;
 }
 
+int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev,
+                                              u16 vport_num)
+{
+       int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+       void *hca_cap, *query_cap;
+       int ret;
+
+       if (!mlx5_esw_ipsec_vf_offload_supported(dev))
+               return -EOPNOTSUPP;
+
+       ret = esw_ipsec_offload_supported(dev, vport_num);
+       if (ret)
+               return ret;
+
+       query_cap = kvzalloc(query_sz, GFP_KERNEL);
+       if (!query_cap)
+               return -ENOMEM;
+
+       ret = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_FLOW_TABLE);
+       if (ret)
+               goto out;
+
+       hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
+       if (!MLX5_GET(flow_table_nic_cap, hca_cap, flow_table_properties_nic_receive.decap)) {
+               ret = -EOPNOTSUPP;
+               goto out;
+       }
+
+out:
+       kvfree(query_cap);
+       return ret;
+}
+
 int mlx5_esw_ipsec_vf_crypto_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
                                         bool enable)
 {
        return esw_ipsec_vf_offload_set_bytype(esw, vport, enable,
                                               MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD);
 }
+
+int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+                                        bool enable)
+{
+       return esw_ipsec_vf_offload_set_bytype(esw, vport, enable,
+                                              MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD);
+}
index 79ae6ad94f556b31d95417c967a0c52135e46eac..6cd7d6497e10996f891774d5406f10dfce2ba889 100644 (file)
@@ -834,7 +834,6 @@ static int mlx5_esw_vport_caps_get(struct mlx5_eswitch *esw, struct mlx5_vport *
        vport->info.mig_enabled = MLX5_GET(cmd_hca_cap_2, hca_caps, migratable);
 
        err = mlx5_esw_ipsec_vf_offload_get(esw->dev, vport);
-
 out_free:
        kfree(query_ctx);
        return err;
@@ -917,7 +916,8 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
        /* Sync with current vport context */
        vport->enabled_events = enabled_events;
        vport->enabled = true;
-       if (vport->vport != MLX5_VPORT_PF && vport->info.ipsec_crypto_enabled)
+       if (vport->vport != MLX5_VPORT_PF &&
+           (vport->info.ipsec_crypto_enabled || vport->info.ipsec_packet_enabled))
                esw->enabled_ipsec_vf_count++;
 
        /* Esw manager is trusted by default. Host PF (vport 0) is trusted as well
@@ -975,7 +975,8 @@ void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
            MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
                mlx5_esw_vport_vhca_id_clear(esw, vport_num);
 
-       if (vport->vport != MLX5_VPORT_PF && vport->info.ipsec_crypto_enabled)
+       if (vport->vport != MLX5_VPORT_PF &&
+           (vport->info.ipsec_crypto_enabled || vport->info.ipsec_packet_enabled))
                esw->enabled_ipsec_vf_count--;
 
        /* We don't assume VFs will cleanup after themselves.
index cde5712aa6977df6c738a3a523a09b75768582a5..37ab66e7b403f1d86d3242e3fdabe7ce5bdcc49a 100644 (file)
@@ -164,6 +164,7 @@ struct mlx5_vport_info {
        u8                      roce_enabled: 1;
        u8                      mig_enabled: 1;
        u8                      ipsec_crypto_enabled: 1;
+       u8                      ipsec_packet_enabled: 1;
 };
 
 /* Vport context events */
@@ -565,6 +566,10 @@ int mlx5_devlink_port_fn_ipsec_crypto_get(struct devlink_port *port, bool *is_en
                                          struct netlink_ext_ack *extack);
 int mlx5_devlink_port_fn_ipsec_crypto_set(struct devlink_port *port, bool enable,
                                          struct netlink_ext_ack *extack);
+int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_enabled,
+                                         struct netlink_ext_ack *extack);
+int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port, bool enable,
+                                         struct netlink_ext_ack *extack);
 #endif /* CONFIG_XFRM_OFFLOAD */
 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
 
@@ -872,6 +877,13 @@ int mlx5_esw_ipsec_vf_crypto_offload_supported(struct mlx5_core_dev *dev,
                                               u16 vport_num);
 int mlx5_esw_ipsec_vf_crypto_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
                                         bool enable);
+int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+                                        bool enable);
+int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev,
+                                              u16 vport_num);
+void mlx5_esw_vport_ipsec_offload_enable(struct mlx5_eswitch *esw);
+void mlx5_esw_vport_ipsec_offload_disable(struct mlx5_eswitch *esw);
+
 #else  /* CONFIG_MLX5_ESWITCH */
 /* eswitch API stubs */
 static inline int  mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
index f37d30a096c01d6c0d4f95552bfd8f99cf77f50e..752fb0dfb111b27075cd1ead2688d83bb6d1bac7 100644 (file)
@@ -4451,4 +4451,88 @@ unlock:
        mutex_unlock(&esw->state_lock);
        return err;
 }
+
+int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_enabled,
+                                         struct netlink_ext_ack *extack)
+{
+       struct mlx5_eswitch *esw;
+       struct mlx5_vport *vport;
+       int err = 0;
+
+       esw = mlx5_devlink_eswitch_get(port->devlink);
+       if (IS_ERR(esw))
+               return PTR_ERR(esw);
+
+       if (!mlx5_esw_ipsec_vf_offload_supported(esw->dev)) {
+               NL_SET_ERR_MSG_MOD(extack, "Device doesn't support IPsec packet");
+               return -EOPNOTSUPP;
+       }
+
+       vport = mlx5_devlink_port_vport_get(port);
+
+       mutex_lock(&esw->state_lock);
+       if (!vport->enabled) {
+               err = -EOPNOTSUPP;
+               goto unlock;
+       }
+
+       *is_enabled = vport->info.ipsec_packet_enabled;
+unlock:
+       mutex_unlock(&esw->state_lock);
+       return err;
+}
+
+int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port,
+                                         bool enable,
+                                         struct netlink_ext_ack *extack)
+{
+       struct mlx5_eswitch *esw;
+       struct mlx5_vport *vport;
+       u16 vport_num;
+       int err;
+
+       esw = mlx5_devlink_eswitch_get(port->devlink);
+       if (IS_ERR(esw))
+               return PTR_ERR(esw);
+
+       vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
+       err = mlx5_esw_ipsec_vf_packet_offload_supported(esw->dev, vport_num);
+       if (err) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "Device doesn't support IPsec packet mode");
+               return err;
+       }
+
+       vport = mlx5_devlink_port_vport_get(port);
+       mutex_lock(&esw->state_lock);
+       if (!vport->enabled) {
+               err = -EOPNOTSUPP;
+               NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
+               goto unlock;
+       }
+
+       if (vport->info.ipsec_packet_enabled == enable)
+               goto unlock;
+
+       if (!esw->enabled_ipsec_vf_count && esw->dev->num_ipsec_offloads) {
+               err = -EBUSY;
+               goto unlock;
+       }
+
+       err = mlx5_esw_ipsec_vf_packet_offload_set(esw, vport, enable);
+       if (err) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "Failed to set IPsec packet mode");
+               goto unlock;
+       }
+
+       vport->info.ipsec_packet_enabled = enable;
+       if (enable)
+               esw->enabled_ipsec_vf_count++;
+       else
+               esw->enabled_ipsec_vf_count--;
+unlock:
+       mutex_unlock(&esw->state_lock);
+       return err;
+}
 #endif /* CONFIG_XFRM_OFFLOAD */