char *buf, size_t buflen) {}
#endif /* !CONFIG_CGROUPS */
+#ifdef CONFIG_CGROUPS
/*
- * Basic resource stats.
+ * cgroup scalable recursive statistics.
*/
-#ifdef CONFIG_CGROUPS
+void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
+void cgroup_rstat_flush(struct cgroup *cgrp);
+void cgroup_rstat_flush_hold(struct cgroup *cgrp);
+void cgroup_rstat_flush_release(void);
+/*
+ * Basic resource stats.
+ */
#ifdef CONFIG_CGROUP_CPUACCT
void cpuacct_charge(struct task_struct *tsk, u64 cputime);
void cpuacct_account_field(struct task_struct *tsk, int index, u64 val);
}
/**
- * cgroup_rstat_cpu_updated - keep track of updated rstat_cpu
+ * cgroup_rstat_updated - keep track of updated rstat_cpu
* @cgrp: target cgroup
* @cpu: cpu on which rstat_cpu was updated
*
* rstat_cpu->updated_children list. See the comment on top of
* cgroup_rstat_cpu definition for details.
*/
-static void cgroup_rstat_cpu_updated(struct cgroup *cgrp, int cpu)
+void cgroup_rstat_updated(struct cgroup *cgrp, int cpu)
{
raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu);
struct cgroup *parent;
raw_spin_unlock_irqrestore(cpu_lock, flags);
}
+EXPORT_SYMBOL_GPL(cgroup_rstat_updated);
/**
* cgroup_rstat_cpu_pop_updated - iterate and dismantle rstat_cpu updated tree
mutex_unlock(&cgroup_rstat_mutex);
}
+/**
+ * cgroup_rstat_flush_begin - flush stats in @cgrp's subtree and hold
+ * @cgrp: target cgroup
+ *
+ * Flush stats in @cgrp's subtree and prevent further flushes. Must be
+ * paired with cgroup_rstat_flush_release().
+ */
+void cgroup_rstat_flush_hold(struct cgroup *cgrp)
+ __acquires(&cgroup_rstat_mutex)
+{
+ mutex_lock(&cgroup_rstat_mutex);
+ cgroup_rstat_flush_locked(cgrp);
+}
+
+/**
+ * cgroup_rstat_flush_release - release cgroup_rstat_flush_hold()
+ */
+void cgroup_rstat_flush_release(void)
+ __releases(&cgroup_rstat_mutex)
+{
+ mutex_unlock(&cgroup_rstat_mutex);
+}
+
int cgroup_rstat_init(struct cgroup *cgrp)
{
int cpu;
struct cgroup_base_stat delta;
unsigned seq;
- lockdep_assert_held(&cgroup_rstat_mutex);
-
/* fetch the current per-cpu values */
do {
seq = __u64_stats_fetch_begin(&rstatc->bsync);
cputime = rstatc->bstat.cputime;
} while (__u64_stats_fetch_retry(&rstatc->bsync, seq));
- /* accumulate the deltas to propgate */
+ /* calculate the delta to propgate */
delta.cputime.utime = cputime.utime - last_cputime->utime;
delta.cputime.stime = cputime.stime - last_cputime->stime;
delta.cputime.sum_exec_runtime = cputime.sum_exec_runtime -
struct cgroup_rstat_cpu *rstatc)
{
u64_stats_update_end(&rstatc->bsync);
- cgroup_rstat_cpu_updated(cgrp, smp_processor_id());
+ cgroup_rstat_updated(cgrp, smp_processor_id());
put_cpu_ptr(rstatc);
}
if (!cgroup_parent(cgrp))
return;
- mutex_lock(&cgroup_rstat_mutex);
-
- cgroup_rstat_flush_locked(cgrp);
-
+ cgroup_rstat_flush_hold(cgrp);
usage = cgrp->bstat.cputime.sum_exec_runtime;
cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime, &utime, &stime);
-
- mutex_unlock(&cgroup_rstat_mutex);
+ cgroup_rstat_flush_release();
do_div(usage, NSEC_PER_USEC);
do_div(utime, NSEC_PER_USEC);