if (cfs_rq_is_decayed(cfs_rq))
list_del_leaf_cfs_rq(cfs_rq);
}
+#ifdef CONFIG_NO_HZ_COMMON
+ rq->last_blocked_load_update_tick = jiffies;
+#endif
rq_unlock_irqrestore(rq, &rf);
}
rq_lock_irqsave(rq, &rf);
update_rq_clock(rq);
update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
+#ifdef CONFIG_NO_HZ_COMMON
+ rq->last_blocked_load_update_tick = jiffies;
+#endif
rq_unlock_irqrestore(rq, &rf);
}
}
#endif /* CONFIG_NUMA_BALANCING */
+#ifdef CONFIG_NO_HZ_COMMON
+static struct {
+ cpumask_var_t idle_cpus_mask;
+ atomic_t nr_cpus;
+ unsigned long next_balance; /* in jiffy units */
+ unsigned long next_update; /* in jiffy units */
+} nohz ____cacheline_aligned;
+#endif
+
/**
* update_sd_lb_stats - Update sched_domain's statistics for load balancing.
* @env: The load balancing environment.
if (child && child->flags & SD_PREFER_SIBLING)
prefer_sibling = 1;
+#ifdef CONFIG_NO_HZ_COMMON
+ if (env->idle == CPU_NEWLY_IDLE) {
+ int cpu;
+
+ /* Update the stats of NOHZ idle CPUs in the sd */
+ for_each_cpu_and(cpu, sched_domain_span(env->sd),
+ nohz.idle_cpus_mask) {
+ struct rq *rq = cpu_rq(cpu);
+
+ /* ... Unless we've already done since the last tick */
+ if (time_after(jiffies, rq->last_blocked_load_update_tick))
+ update_blocked_averages(cpu);
+ }
+ }
+ /*
+ * If we've just updated all of the NOHZ idle CPUs, then we can push
+ * back the next nohz.next_update, which will prevent an unnecessary
+ * wakeup for the nohz stats kick
+ */
+ if (cpumask_subset(nohz.idle_cpus_mask, sched_domain_span(env->sd)))
+ nohz.next_update = jiffies + LOAD_AVG_PERIOD;
+#endif
+
load_idx = get_sd_load_idx(env->sd, env->idle);
do {
* needed, they will kick the idle load balancer, which then does idle
* load balancing for all the idle CPUs.
*/
-static struct {
- cpumask_var_t idle_cpus_mask;
- atomic_t nr_cpus;
- unsigned long next_balance; /* in jiffy units */
- unsigned long next_update; /* in jiffy units */
-} nohz ____cacheline_aligned;
static inline int find_new_ilb(void)
{