if (!decays && !force_update)
return;
- if (atomic64_read(&cfs_rq->removed_load)) {
- u64 removed_load = atomic64_xchg(&cfs_rq->removed_load, 0);
+ if (atomic_long_read(&cfs_rq->removed_load)) {
+ unsigned long removed_load;
+ removed_load = atomic_long_xchg(&cfs_rq->removed_load, 0);
subtract_blocked_load_contrib(cfs_rq, removed_load);
}
*/
if (se->avg.decay_count) {
se->avg.decay_count = -__synchronize_entity_decay(se);
- atomic64_add(se->avg.load_avg_contrib, &cfs_rq->removed_load);
+ atomic_long_add(se->avg.load_avg_contrib,
+ &cfs_rq->removed_load);
}
}
#endif /* CONFIG_SMP */
#endif
#ifdef CONFIG_SMP
atomic64_set(&cfs_rq->decay_counter, 1);
- atomic64_set(&cfs_rq->removed_load, 0);
+ atomic_long_set(&cfs_rq->removed_load, 0);
#endif
}
* the FAIR_GROUP_SCHED case).
*/
unsigned long runnable_load_avg, blocked_load_avg;
- atomic64_t decay_counter, removed_load;
+ atomic64_t decay_counter;
u64 last_decay;
+ atomic_long_t removed_load;
#ifdef CONFIG_FAIR_GROUP_SCHED
/* Required to track per-cpu representation of a task_group */