throttling_attr.size = sizeof(struct sched_attr);
throttling_attr.sched_nice = cpu_nice;
switch (cpu_sched_type) {
- case CPU_SCHED_OTHER:
- throttling_attr.sched_policy = SCHED_OTHER;
- break;
case CPU_SCHED_IDLE:
throttling_attr.sched_policy = SCHED_IDLE;
break;
case CPU_SCHED_BATCH:
throttling_attr.sched_policy = SCHED_BATCH;
break;
+ case CPU_SCHED_OTHER:
+ throttling_attr.sched_policy = SCHED_OTHER;
+ break;
default:
if (!skip_share || !skip_bandwidth)
rmdir(CPUCG_THROTTLING_PATH);
ret = check_oom_and_set_limit(memcg, memcg_limit_bytes);
else {
/* If the group is empty don't set the limit to enable adding processes. */
- if (memcg_memsw_is_supported())
+ if (memcg_memsw_is_supported()) {
ret = cgroup_write_node_int32(memcg, MEMCG_SWAP_LIMIT_BYTE, -1);
+ if (ret != RESOURCED_ERROR_NONE)
+ goto hard_limit_out;
+ }
ret = cgroup_write_node_int32(memcg, MEMCG_LIMIT_BYTE, -1);
}
+hard_limit_out:
if (ret != RESOURCED_ERROR_NONE)
_E("[SWAP] Not able to set hard limit of %s memory cgroup", memcg);
return RESOURCED_ERROR_NONE;
if (mi->limit_ratio == MEMCG_NO_LIMIT) {
- if (memcg_memsw_is_supported())
+ if (memcg_memsw_is_supported()) {
ret = cgroup_write_node_int32(mi->name, MEMCG_SWAP_LIMIT_BYTE, -1);
+ if (ret != RESOURCED_ERROR_NONE)
+ goto limit_update_out;
+ }
ret = cgroup_write_node_int32(mi->name, MEMCG_LIMIT_BYTE, -1);
}
else
ret = check_oom_and_set_limit(mi->name, mi->limit_bytes);
+
+limit_update_out:
if (ret != RESOURCED_ERROR_NONE)
_E("[SWAP] Failed to change hard limit of %s cgroup to -1", mi->name);
else