struct mlxsw_afk_key_info *key_info;
struct mlxsw_sp_acl_tcam *tcam;
struct mlxsw_sp_acl_tcam_vgroup *vgroup;
- struct delayed_work rehash_dw;
+ struct {
+ struct delayed_work dw;
+ } rehash;
struct mlxsw_sp *mlxsw_sp;
bool failed_rollback; /* Indicates failed rollback during migration */
unsigned int ref_count;
if (!interval)
return;
- mlxsw_core_schedule_dw(&vregion->rehash_dw,
+ mlxsw_core_schedule_dw(&vregion->rehash.dw,
msecs_to_jiffies(interval));
}
{
struct mlxsw_sp_acl_tcam_vregion *vregion =
container_of(work, struct mlxsw_sp_acl_tcam_vregion,
- rehash_dw.work);
+ rehash.dw.work);
mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion);
mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
/* Create the delayed work for vregion periodic rehash */
- INIT_DELAYED_WORK(&vregion->rehash_dw,
+ INIT_DELAYED_WORK(&vregion->rehash.dw,
mlxsw_sp_acl_tcam_vregion_rehash_work);
mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
mutex_lock(&tcam->lock);
mutex_lock(&tcam->lock);
list_del(&vregion->tlist);
mutex_unlock(&tcam->lock);
- cancel_delayed_work_sync(&vregion->rehash_dw);
+ cancel_delayed_work_sync(&vregion->rehash.dw);
}
mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp, vregion);
if (vregion->region2)
mutex_lock(&tcam->lock);
list_for_each_entry(vregion, &tcam->vregion_list, tlist) {
if (val)
- mlxsw_core_schedule_dw(&vregion->rehash_dw, 0);
+ mlxsw_core_schedule_dw(&vregion->rehash.dw, 0);
else
- cancel_delayed_work_sync(&vregion->rehash_dw);
+ cancel_delayed_work_sync(&vregion->rehash.dw);
}
mutex_unlock(&tcam->lock);
return 0;