A read-write single value file which exists on non-root cgroups.
The default is "0", i.e. no bandwidth boosting.
- The minimum utilization in the range [0, 1023].
+ The requested minimum utilization in the range [0, 1023].
This interface allows reading and setting minimum utilization clamp
values similar to the sched_setattr(2). This minimum utilization
value is used to clamp the task specific minimum utilization clamp.
+ cpu.util.min.effective
+ A read-only single value file which exists on non-root cgroups and
+ reports minimum utilization clamp value currently enforced on a task
+ group.
+
+ The actual minimum utilization in the range [0, 1023].
+
+ This value can be lower then cpu.util.min in case a parent cgroup
+ is enforcing a more restrictive clamping on minimum utilization.
+
cpu.util.max
A read-write single value file which exists on non-root cgroups.
The default is "1023". i.e. no bandwidth clamping
- The maximum utilization in the range [0, 1023].
+ The requested maximum utilization in the range [0, 1023].
This interface allows reading and setting maximum utilization clamp
values similar to the sched_setattr(2). This maximum utilization
value is used to clamp the task specific maximum utilization clamp.
+ cpu.util.max.effective
+ A read-only single value file which exists on non-root cgroups and
+ reports maximum utilization clamp value currently enforced on a task
+ group.
+
+ The actual maximum utilization in the range [0, 1023].
+
+ This value can be lower then cpu.util.max in case a parent cgroup
+ is enforcing a more restrictive clamping on max utilization.
+
+
Memory
------
uc_se = &root_task_group.uclamp[clamp_id];
uc_se->value = uclamp_none(clamp_id);
uc_se->group_id = group_id;
+ uc_se->effective.value = uclamp_none(clamp_id);
+ uc_se->effective.group_id = group_id;
/* Attach root TG's clamp group */
uc_map[group_id].se_count = 1;
uc_se->value = parent->uclamp[clamp_id].value;
uc_se->group_id = UCLAMP_NOT_VALID;
+ uc_se->effective.value =
+ parent->uclamp[clamp_id].effective.value;
+ uc_se->effective.group_id =
+ parent->uclamp[clamp_id].effective.group_id;
}
return 1;
}
#ifdef CONFIG_UCLAMP_TASK_GROUP
+static void cpu_util_update_hier(struct cgroup_subsys_state *css,
+ int clamp_id, int value)
+{
+ struct cgroup_subsys_state *top_css = css;
+ struct uclamp_se *uc_se, *uc_parent;
+
+ css_for_each_descendant_pre(css, top_css) {
+ /*
+ * The first visited task group is top_css, which clamp value
+ * is the one passed as parameter. For descendent task
+ * groups we consider their current value.
+ */
+ uc_se = &css_tg(css)->uclamp[clamp_id];
+ if (css != top_css)
+ value = uc_se->value;
+ /*
+ * Skip the whole subtrees if the current effective clamp is
+ * alredy matching the TG's clamp value.
+ * In this case, all the subtrees already have top_value, or a
+ * more restrictive, as effective clamp.
+ */
+ uc_parent = &css_tg(css)->parent->uclamp[clamp_id];
+ if (uc_se->effective.value == value &&
+ uc_parent->effective.value >= value) {
+ css = css_rightmost_descendant(css);
+ continue;
+ }
+
+ /* Propagate the most restrictive effective value */
+ if (uc_parent->effective.value < value)
+ value = uc_parent->effective.value;
+ if (uc_se->effective.value == value)
+ continue;
+
+ uc_se->effective.value = value;
+ }
+}
+
static int cpu_util_min_write_u64(struct cgroup_subsys_state *css,
struct cftype *cftype, u64 min_value)
{
if (tg->uclamp[UCLAMP_MAX].value < min_value)
goto out;
+ /* Update effective clamps to track the most restrictive value */
+ cpu_util_update_hier(css, UCLAMP_MIN, min_value);
+
out:
rcu_read_unlock();
mutex_unlock(&uclamp_mutex);
if (tg->uclamp[UCLAMP_MIN].value > max_value)
goto out;
+ /* Update effective clamps to track the most restrictive value */
+ cpu_util_update_hier(css, UCLAMP_MAX, max_value);
+
out:
rcu_read_unlock();
mutex_unlock(&uclamp_mutex);
}
static inline u64 cpu_uclamp_read(struct cgroup_subsys_state *css,
- enum uclamp_id clamp_id)
+ enum uclamp_id clamp_id,
+ bool effective)
{
struct task_group *tg;
u64 util_clamp;
rcu_read_lock();
tg = css_tg(css);
- util_clamp = tg->uclamp[clamp_id].value;
+ util_clamp = effective
+ ? tg->uclamp[clamp_id].effective.value
+ : tg->uclamp[clamp_id].value;
rcu_read_unlock();
return util_clamp;
static u64 cpu_util_min_read_u64(struct cgroup_subsys_state *css,
struct cftype *cft)
{
- return cpu_uclamp_read(css, UCLAMP_MIN);
+ return cpu_uclamp_read(css, UCLAMP_MIN, false);
}
static u64 cpu_util_max_read_u64(struct cgroup_subsys_state *css,
struct cftype *cft)
{
- return cpu_uclamp_read(css, UCLAMP_MAX);
+ return cpu_uclamp_read(css, UCLAMP_MAX, false);
+}
+
+static u64 cpu_util_min_effective_read_u64(struct cgroup_subsys_state *css,
+ struct cftype *cft)
+{
+ return cpu_uclamp_read(css, UCLAMP_MIN, true);
+}
+
+static u64 cpu_util_max_effective_read_u64(struct cgroup_subsys_state *css,
+ struct cftype *cft)
+{
+ return cpu_uclamp_read(css, UCLAMP_MAX, true);
}
#endif /* CONFIG_UCLAMP_TASK_GROUP */
.read_u64 = cpu_util_min_read_u64,
.write_u64 = cpu_util_min_write_u64,
},
+ {
+ .name = "util.min.effective",
+ .read_u64 = cpu_util_min_effective_read_u64,
+ },
{
.name = "util.max",
.read_u64 = cpu_util_max_read_u64,
.write_u64 = cpu_util_max_write_u64,
},
+ {
+ .name = "util.max.effective",
+ .read_u64 = cpu_util_max_effective_read_u64,
+ },
#endif
{ } /* Terminate */
};