* based on the task model parameters and gives the minimal utilization
* required to meet deadlines.
*/
-static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
+unsigned long schedutil_freq_util(int cpu, unsigned long util_cfs,
+ enum schedutil_type type)
{
- struct rq *rq = cpu_rq(sg_cpu->cpu);
+ struct rq *rq = cpu_rq(cpu);
unsigned long util, irq, max;
- sg_cpu->max = max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu);
- sg_cpu->bw_dl = cpu_bw_dl(rq);
+ max = arch_scale_cpu_capacity(NULL, cpu);
- if (rt_rq_is_runnable(&rq->rt))
+ if (type == frequency_util && rt_rq_is_runnable(&rq->rt))
return max;
/*
* utilization (PELT windows are synchronized) we can directly add them
* to obtain the CPU's actual utilization.
*/
- util = cpu_util_cfs(rq);
+ util = util_cfs;
util += cpu_util_rt(rq);
- /*
- * We do not make cpu_util_dl() a permanent part of this sum because we
- * want to use cpu_bw_dl() later on, but we need to check if the
- * CFS+RT+DL sum is saturated (ie. no idle time) such that we select
- * f_max when there is no idle time.
- *
- * NOTE: numerical errors or stop class might cause us to not quite hit
- * saturation when we should -- something for later.
- */
- if ((util + cpu_util_dl(rq)) >= max)
- return max;
+ if (type == frequency_util) {
+ /*
+ * For frequency selection we do not make cpu_util_dl() a
+ * permanent part of this sum because we want to use
+ * cpu_bw_dl() later on, but we need to check if the
+ * CFS+RT+DL sum is saturated (ie. no idle time) such
+ * that we select f_max when there is no idle time.
+ *
+ * NOTE: numerical errors or stop class might cause us
+ * to not quite hit saturation when we should --
+ * something for later.
+ */
+
+ if ((util + cpu_util_dl(rq)) >= max)
+ return max;
+ } else {
+ /*
+ * OTOH, for energy computation we need the estimated
+ * running time, so include util_dl and ignore dl_bw.
+ */
+ util += cpu_util_dl(rq);
+ if (util >= max)
+ return max;
+ }
/*
* There is still idle time; further improve the number by using the
util = scale_irq_capacity(util, irq, max);
util += irq;
- /*
- * Bandwidth required by DEADLINE must always be granted while, for
- * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism
- * to gracefully reduce the frequency when no tasks show up for longer
- * periods of time.
- *
- * Ideally we would like to set bw_dl as min/guaranteed freq and util +
- * bw_dl as requested freq. However, cpufreq is not yet ready for such
- * an interface. So, we only do the latter for now.
- */
- return min(max, util + sg_cpu->bw_dl);
+ if (type == frequency_util) {
+ /*
+ * Bandwidth required by DEADLINE must always be granted
+ * while, for FAIR and RT, we use blocked utilization of
+ * IDLE CPUs as a mechanism to gracefully reduce the
+ * frequency when no tasks show up for longer periods of
+ * time.
+ *
+ * Ideally we would like to set bw_dl as min/guaranteed
+ * freq and util + bw_dl as requested freq. However,
+ * cpufreq is not yet ready for such an interface. So,
+ * we only do the latter for now.
+ */
+ util += cpu_bw_dl(rq);
+ }
+
+ return min(max, util);
+}
+
+static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
+{
+ struct rq *rq = cpu_rq(sg_cpu->cpu);
+ unsigned long util = cpu_util_cfs(rq);
+
+ sg_cpu->max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu);
+ sg_cpu->bw_dl = cpu_bw_dl(rq);
+
+ return schedutil_freq_util(sg_cpu->cpu, util, frequency_util);
}
/**