{
struct rq *rq = cpu_rq(cpu);
unsigned long util, irq, max;
+ unsigned long util_rt;
max = arch_scale_cpu_capacity(NULL, cpu);
* utilization (PELT windows are synchronized) we can directly add them
* to obtain the CPU's actual utilization.
*/
-
+ util_rt = cpu_util_rt(rq);
if (type == frequency_util) {
/*
- * CFS utilization can be boosted or capped, depending on utilization
- * clamp constraints configured for currently RUNNABLE tasks.
+ * CFS and RT utilizations can be boosted or capped, depending on
+ * utilization constraints enforce by currently RUNNABLE tasks.
+ * They are individually clamped to ensure fairness across classes,
+ * meaning that CFS always gets (if possible) the (minimum) required
+ * bandwidth on top of that required by higher priority classes.
*/
- util = util_cfs ? uclamp_util(cpu, util_cfs) : 0;
- util += cpu_util_rt(rq);
+ if (sched_feat(UCLAMP_SCHED_CLASS)) {
+ util = 0;
+ if (util_cfs)
+ util += uclamp_util(cpu, util_cfs);
+ if (util_rt)
+ util += uclamp_util(cpu, util_rt);
+ } else {
+ util = util_cfs + util_rt;
+ util = uclamp_util(cpu, util);
+ }
/*
* For frequency selection we do not make cpu_util_dl() a
return max;
} else {
- util = util_cfs + cpu_util_rt(rq);
+ util = util_cfs + util_rt;
/*
* OTOH, for energy computation we need the estimated
*
* Since DL tasks have a much more advanced bandwidth control, it's
* safe to assume that IO boost does not apply to those tasks.
- * Instead, since RT tasks are currently not utiliation clamped,
- * we don't want to apply clamping on IO boost while there is
- * blocked RT utilization.
+ * Instead, for CFS and RT tasks we clamp the IO boost max value
+ * considering the current constraints for the CPU.
*/
max_boost = sg_cpu->iowait_boost_max;
- if (!cpu_util_rt(cpu_rq(sg_cpu->cpu)))
- max_boost = uclamp_util(sg_cpu->cpu, max_boost);
+ max_boost = uclamp_util(sg_cpu->cpu, max_boost);
/* Double the boost at each request */
if (sg_cpu->iowait_boost) {