static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
{
- u32 contrib;
int cpu = -1; /* not used in normal case */
#ifdef CONFIG_HMP_FREQUENCY_INVARIANT_SCALE
__update_entity_runnable_avg(rq_clock_task(rq), cpu, &rq->avg, runnable,
runnable);
__update_tg_runnable_avg(&rq->avg, &rq->cfs);
- contrib = rq->avg.runnable_avg_sum * scale_load_down(1024);
- contrib /= (rq->avg.avg_period + 1);
- trace_sched_rq_runnable_ratio(cpu_of(rq), scale_load(contrib));
+ trace_sched_rq_runnable_ratio(cpu_of(rq), rq->avg.load_avg_ratio);
trace_sched_rq_runnable_load(cpu_of(rq), rq->cfs.runnable_load_avg);
}
#else /* CONFIG_FAIR_GROUP_SCHED */
}
/* Compute the current contribution to load_avg by se, return any delta */
-static long __update_entity_load_avg_contrib(struct sched_entity *se)
+static long __update_entity_load_avg_contrib(struct sched_entity *se, long *ratio)
{
long old_contrib = se->avg.load_avg_contrib;
+ long old_ratio = se->avg.load_avg_ratio;
if (entity_is_task(se)) {
__update_task_entity_contrib(se);
__update_group_entity_contrib(se);
}
+ if (ratio)
+ *ratio = se->avg.load_avg_ratio - old_ratio;
return se->avg.load_avg_contrib - old_contrib;
}
int update_cfs_rq)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
- long contrib_delta, utilization_delta;
+ long contrib_delta, utilization_delta, ratio_delta;
u64 now;
int cpu = -1; /* not used in normal case */
cfs_rq->curr == se))
return;
- contrib_delta = __update_entity_load_avg_contrib(se);
+ contrib_delta = __update_entity_load_avg_contrib(se, &ratio_delta);
utilization_delta = __update_entity_utilization_avg_contrib(se);
if (!update_cfs_rq)
if (se->on_rq) {
cfs_rq->runnable_load_avg += contrib_delta;
cfs_rq->utilization_load_avg += utilization_delta;
+ rq_of(cfs_rq)->avg.load_avg_ratio += ratio_delta;
} else {
subtract_blocked_load_contrib(cfs_rq, -contrib_delta);
}
cfs_rq->runnable_load_avg += se->avg.load_avg_contrib;
cfs_rq->utilization_load_avg += se->avg.utilization_avg_contrib;
+
/* we force update consideration on load-balancer moves */
update_cfs_rq_blocked_load(cfs_rq, !wakeup);
}
cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib;
cfs_rq->utilization_load_avg -= se->avg.utilization_avg_contrib;
+
if (sleep) {
cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
unsigned long contrib, scaled_contrib;
struct sched_avg *avg;
- for_each_cpu_mask(cpu, hmpd->cpus) {
+ for_each_cpu(cpu, &hmpd->cpus) {
avg = &cpu_rq(cpu)->avg;
/* used for both up and down migration */
curr_last_migration = avg->hmp_last_up_migration ?
avg->hmp_last_up_migration : avg->hmp_last_down_migration;
/* don't use the divisor in the loop, just at the end */
- contrib = avg->runnable_avg_sum * scale_load_down(1024);
- scaled_contrib = contrib >> 22;
+ contrib = avg->load_avg_ratio * scale_load_down(1024);
+ scaled_contrib = contrib >> 13;
if ((contrib < min_runnable_load) ||
(scaled_contrib == scaled_min_runnable_load &&
*min_cpu = min_cpu_runnable_temp;
/* domain will often have at least one empty CPU */
- return min_runnable_load ? min_runnable_load / (LOAD_AVG_MAX + 1) : 0;
+ trace_printk("hmp_domain_min_load returning %lu\n",
+ min_runnable_load > 1023 ? 1023 : min_runnable_load);
+ return min_runnable_load > 1023 ? 1023 : min_runnable_load;
}
/*