}
}
+static void mlx5_eswitch_clear_ec_vf_vports_info(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ unsigned long i;
+
+ mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs) {
+ memset(&vport->qos, 0, sizeof(vport->qos));
+ memset(&vport->info, 0, sizeof(vport->info));
+ vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
+ }
+}
+
/* Public E-Switch API */
int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
enum mlx5_eswitch_vport_event enabled_events)
}
}
+static void mlx5_eswitch_unload_ec_vf_vports(struct mlx5_eswitch *esw,
+ u16 num_ec_vfs)
+{
+ struct mlx5_vport *vport;
+ unsigned long i;
+
+ mlx5_esw_for_each_ec_vf_vport(esw, i, vport, num_ec_vfs) {
+ if (!vport->enabled)
+ continue;
+ mlx5_eswitch_unload_vport(esw, vport->vport);
+ }
+}
+
int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
enum mlx5_eswitch_vport_event enabled_events)
{
return err;
}
+static int mlx5_eswitch_load_ec_vf_vports(struct mlx5_eswitch *esw, u16 num_ec_vfs,
+ enum mlx5_eswitch_vport_event enabled_events)
+{
+ struct mlx5_vport *vport;
+ unsigned long i;
+ int err;
+
+ mlx5_esw_for_each_ec_vf_vport(esw, i, vport, num_ec_vfs) {
+ err = mlx5_eswitch_load_vport(esw, vport->vport, enabled_events);
+ if (err)
+ goto vf_err;
+ }
+
+ return 0;
+
+vf_err:
+ mlx5_eswitch_unload_ec_vf_vports(esw, num_ec_vfs);
+ return err;
+}
+
static int host_pf_enable_hca(struct mlx5_core_dev *dev)
{
if (!mlx5_core_is_ecpf(dev))
ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_ECPF, enabled_events);
if (ret)
goto ecpf_err;
+ if (mlx5_core_ec_sriov_enabled(esw->dev)) {
+ ret = mlx5_eswitch_load_ec_vf_vports(esw, esw->esw_funcs.num_ec_vfs,
+ enabled_events);
+ if (ret)
+ goto ec_vf_err;
+ }
}
/* Enable VF vports */
return 0;
vf_err:
+ if (mlx5_core_ec_sriov_enabled(esw->dev))
+ mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_ec_vfs);
+ec_vf_err:
if (mlx5_ecpf_vport_exists(esw->dev))
mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
ecpf_err:
{
mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
- if (mlx5_ecpf_vport_exists(esw->dev))
+ if (mlx5_ecpf_vport_exists(esw->dev)) {
+ if (mlx5_core_ec_sriov_enabled(esw->dev))
+ mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_vfs);
mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
+ }
host_pf_disable_hca(esw->dev);
mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out,
host_params_context.host_num_of_vfs);
+ if (mlx5_core_ec_sriov_enabled(esw->dev))
+ esw->esw_funcs.num_ec_vfs = num_vfs;
+
kvfree(out);
}
mlx5_eswitch_event_handlers_register(esw);
- esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n",
+ esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), necvfs(%d), active vports(%d)\n",
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
- esw->esw_funcs.num_vfs, esw->enabled_vports);
+ esw->esw_funcs.num_vfs, esw->esw_funcs.num_ec_vfs, esw->enabled_vports);
mlx5_esw_mode_change_notify(esw, esw->mode);
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
{
bool toggle_lag;
- int ret;
+ int ret = 0;
if (!mlx5_esw_allowed(esw))
return 0;
vport_events = (esw->mode == MLX5_ESWITCH_LEGACY) ?
MLX5_LEGACY_SRIOV_VPORT_EVENTS : MLX5_VPORT_UC_ADDR_CHANGE;
- ret = mlx5_eswitch_load_vf_vports(esw, num_vfs, vport_events);
- if (!ret)
- esw->esw_funcs.num_vfs = num_vfs;
+ /* If this is the ECPF the number of host VFs is managed via the
+ * eswitch function change event handler, and any num_vfs provided
+ * here are intended to be EC VFs.
+ */
+ if (!mlx5_core_is_ecpf(esw->dev)) {
+ ret = mlx5_eswitch_load_vf_vports(esw, num_vfs, vport_events);
+ if (!ret)
+ esw->esw_funcs.num_vfs = num_vfs;
+ } else if (mlx5_core_ec_sriov_enabled(esw->dev)) {
+ ret = mlx5_eswitch_load_ec_vf_vports(esw, num_vfs, vport_events);
+ if (!ret)
+ esw->esw_funcs.num_ec_vfs = num_vfs;
+ }
}
+
up_write(&esw->mode_lock);
if (toggle_lag)
/* If driver is unloaded, this function is called twice by remove_one()
* and mlx5_unload(). Prevent the second call.
*/
- if (!esw->esw_funcs.num_vfs && !clear_vf)
+ if (!esw->esw_funcs.num_vfs && !esw->esw_funcs.num_ec_vfs && !clear_vf)
goto unlock;
- esw_info(esw->dev, "Unload vfs: mode(%s), nvfs(%d), active vports(%d)\n",
+ esw_info(esw->dev, "Unload vfs: mode(%s), nvfs(%d), necvfs(%d), active vports(%d)\n",
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
- esw->esw_funcs.num_vfs, esw->enabled_vports);
-
- mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
- if (clear_vf)
- mlx5_eswitch_clear_vf_vports_info(esw);
+ esw->esw_funcs.num_vfs, esw->esw_funcs.num_ec_vfs, esw->enabled_vports);
+
+ if (!mlx5_core_is_ecpf(esw->dev)) {
+ mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
+ if (clear_vf)
+ mlx5_eswitch_clear_vf_vports_info(esw);
+ } else if (mlx5_core_ec_sriov_enabled(esw->dev)) {
+ mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_ec_vfs);
+ if (clear_vf)
+ mlx5_eswitch_clear_ec_vf_vports_info(esw);
+ }
if (esw->mode == MLX5_ESWITCH_OFFLOADS) {
struct devlink *devlink = priv_to_devlink(esw->dev);
if (esw->mode == MLX5_ESWITCH_LEGACY)
mlx5_eswitch_disable_locked(esw);
- esw->esw_funcs.num_vfs = 0;
+ if (!mlx5_core_is_ecpf(esw->dev))
+ esw->esw_funcs.num_vfs = 0;
+ else
+ esw->esw_funcs.num_ec_vfs = 0;
unlock:
up_write(&esw->mode_lock);
mlx5_eswitch_event_handlers_unregister(esw);
- esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n",
+ esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), necvfs(%d), active vports(%d)\n",
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
- esw->esw_funcs.num_vfs, esw->enabled_vports);
+ esw->esw_funcs.num_vfs, esw->esw_funcs.num_ec_vfs, esw->enabled_vports);
if (esw->fdb_table.flags & MLX5_ESW_FDB_CREATED) {
esw->fdb_table.flags &= ~MLX5_ESW_FDB_CREATED;
idx++;
}
+ if (mlx5_core_ec_sriov_enabled(esw->dev)) {
+ int ec_vf_base_num = mlx5_core_ec_vf_vport_base(dev);
+
+ for (i = 0; i < mlx5_core_max_ec_vfs(esw->dev); i++) {
+ err = mlx5_esw_vport_alloc(esw, idx, ec_vf_base_num + i);
+ if (err)
+ goto err;
+ idx++;
+ }
+ }
+
if (mlx5_ecpf_vport_exists(dev) ||
mlx5_core_is_ecpf_esw_manager(dev)) {
err = mlx5_esw_vport_alloc(esw, idx, MLX5_VPORT_ECPF);
/* Representor will control the vport link state */
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
+ if (mlx5_core_ec_sriov_enabled(esw->dev))
+ mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs)
+ vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
/* Uplink vport rep must load first. */
err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK);
goto revert_inline_mode;
}
}
+ if (mlx5_core_ec_sriov_enabled(esw->dev)) {
+ mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs) {
+ err = mlx5_modify_nic_vport_min_inline(dev, vport->vport, mlx5_mode);
+ if (err) {
+ err_vport_num = vport->vport;
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set min inline on vport");
+ goto revert_ec_vf_inline_mode;
+ }
+ }
+ }
return 0;
+revert_ec_vf_inline_mode:
+ mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs) {
+ if (vport->vport == err_vport_num)
+ break;
+ mlx5_modify_nic_vport_min_inline(dev,
+ vport->vport,
+ esw->offloads.inline_mode);
+ }
revert_inline_mode:
mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
if (vport->vport == err_vport_num)