add per task and per rq BKL usage statistics.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
/* timestamps */
unsigned long long last_arrival,/* when we last ran on a cpu */
last_queued; /* when we were last queued to run */
+#ifdef CONFIG_SCHEDSTATS
+ /* BKL stats */
+ unsigned long bkl_cnt;
+#endif
};
#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
/* try_to_wake_up() stats */
unsigned long ttwu_cnt;
unsigned long ttwu_local;
+
+ /* BKL stats */
+ unsigned long bkl_cnt;
#endif
struct lock_class_key rq_lock_key;
};
profile_hit(SCHED_PROFILING, __builtin_return_address(0));
schedstat_inc(this_rq(), sched_cnt);
+#ifdef CONFIG_SCHEDSTATS
+ if (unlikely(prev->lock_depth >= 0)) {
+ schedstat_inc(this_rq(), bkl_cnt);
+ schedstat_inc(prev, sched_info.bkl_cnt);
+ }
+#endif
}
/*
SPLIT_NS(spread0));
SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running);
SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
+ SEQ_printf(m, " .%-30s: %ld\n", "bkl_cnt",
+ rq->bkl_cnt);
}
static void print_cpu(struct seq_file *m, int cpu)
PN(se.exec_max);
PN(se.slice_max);
PN(se.wait_max);
+ P(sched_info.bkl_cnt);
#endif
SEQ_printf(m, "%-25s:%20Ld\n",
"nr_switches", (long long)(p->nvcsw + p->nivcsw));
p->se.exec_max = 0;
p->se.slice_max = 0;
p->se.wait_max = 0;
+ p->sched_info.bkl_cnt = 0;
#endif
p->se.sum_exec_runtime = 0;
p->se.prev_sum_exec_runtime = 0;