1 // SPDX-License-Identifier: GPL-2.0-only
5 * Print the CFS rbtree and other debugging details
7 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
12 * This allows printing both to /proc/sched_debug and
15 #define SEQ_printf(m, x...) \
24 * Ease the printing of nsec fields:
26 static long long nsec_high(unsigned long long nsec)
28 if ((long long)nsec < 0) {
30 do_div(nsec, 1000000);
33 do_div(nsec, 1000000);
38 static unsigned long nsec_low(unsigned long long nsec)
40 if ((long long)nsec < 0)
43 return do_div(nsec, 1000000);
46 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
48 #define SCHED_FEAT(name, enabled) \
51 static const char * const sched_feat_names[] = {
57 static int sched_feat_show(struct seq_file *m, void *v)
61 for (i = 0; i < __SCHED_FEAT_NR; i++) {
62 if (!(sysctl_sched_features & (1UL << i)))
64 seq_printf(m, "%s ", sched_feat_names[i]);
71 #ifdef CONFIG_JUMP_LABEL
73 #define jump_label_key__true STATIC_KEY_INIT_TRUE
74 #define jump_label_key__false STATIC_KEY_INIT_FALSE
76 #define SCHED_FEAT(name, enabled) \
77 jump_label_key__##enabled ,
79 struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
85 static void sched_feat_disable(int i)
87 static_key_disable_cpuslocked(&sched_feat_keys[i]);
90 static void sched_feat_enable(int i)
92 static_key_enable_cpuslocked(&sched_feat_keys[i]);
95 static void sched_feat_disable(int i) { };
96 static void sched_feat_enable(int i) { };
97 #endif /* CONFIG_JUMP_LABEL */
99 static int sched_feat_set(char *cmp)
104 if (strncmp(cmp, "NO_", 3) == 0) {
109 i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
114 sysctl_sched_features &= ~(1UL << i);
115 sched_feat_disable(i);
117 sysctl_sched_features |= (1UL << i);
118 sched_feat_enable(i);
125 sched_feat_write(struct file *filp, const char __user *ubuf,
126 size_t cnt, loff_t *ppos)
136 if (copy_from_user(&buf, ubuf, cnt))
142 /* Ensure the static_key remains in a consistent state */
143 inode = file_inode(filp);
146 ret = sched_feat_set(cmp);
157 static int sched_feat_open(struct inode *inode, struct file *filp)
159 return single_open(filp, sched_feat_show, NULL);
162 static const struct file_operations sched_feat_fops = {
163 .open = sched_feat_open,
164 .write = sched_feat_write,
167 .release = single_release,
172 static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
173 size_t cnt, loff_t *ppos)
180 if (copy_from_user(&buf, ubuf, cnt))
183 if (kstrtouint(buf, 10, &sysctl_sched_tunable_scaling))
186 if (sched_update_scaling())
193 static int sched_scaling_show(struct seq_file *m, void *v)
195 seq_printf(m, "%d\n", sysctl_sched_tunable_scaling);
199 static int sched_scaling_open(struct inode *inode, struct file *filp)
201 return single_open(filp, sched_scaling_show, NULL);
204 static const struct file_operations sched_scaling_fops = {
205 .open = sched_scaling_open,
206 .write = sched_scaling_write,
209 .release = single_release,
214 #ifdef CONFIG_PREEMPT_DYNAMIC
216 static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf,
217 size_t cnt, loff_t *ppos)
225 if (copy_from_user(&buf, ubuf, cnt))
229 mode = sched_dynamic_mode(strstrip(buf));
233 sched_dynamic_update(mode);
240 static int sched_dynamic_show(struct seq_file *m, void *v)
242 static const char * preempt_modes[] = {
243 "none", "voluntary", "full"
247 for (i = 0; i < ARRAY_SIZE(preempt_modes); i++) {
248 if (preempt_dynamic_mode == i)
250 seq_puts(m, preempt_modes[i]);
251 if (preempt_dynamic_mode == i)
261 static int sched_dynamic_open(struct inode *inode, struct file *filp)
263 return single_open(filp, sched_dynamic_show, NULL);
266 static const struct file_operations sched_dynamic_fops = {
267 .open = sched_dynamic_open,
268 .write = sched_dynamic_write,
271 .release = single_release,
274 #endif /* CONFIG_PREEMPT_DYNAMIC */
276 __read_mostly bool sched_debug_verbose;
278 static const struct seq_operations sched_debug_sops;
280 static int sched_debug_open(struct inode *inode, struct file *filp)
282 return seq_open(filp, &sched_debug_sops);
285 static const struct file_operations sched_debug_fops = {
286 .open = sched_debug_open,
289 .release = seq_release,
292 static struct dentry *debugfs_sched;
294 static __init int sched_init_debug(void)
296 struct dentry __maybe_unused *numa;
298 debugfs_sched = debugfs_create_dir("sched", NULL);
300 debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
301 debugfs_create_bool("verbose", 0644, debugfs_sched, &sched_debug_verbose);
302 #ifdef CONFIG_PREEMPT_DYNAMIC
303 debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
306 debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency);
307 debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity);
308 debugfs_create_u32("wakeup_granularity_ns", 0644, debugfs_sched, &sysctl_sched_wakeup_granularity);
310 debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
311 debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
314 debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops);
315 debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost);
316 debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate);
318 mutex_lock(&sched_domains_mutex);
319 update_sched_domain_debugfs();
320 mutex_unlock(&sched_domains_mutex);
323 #ifdef CONFIG_NUMA_BALANCING
324 numa = debugfs_create_dir("numa_balancing", debugfs_sched);
326 debugfs_create_u32("scan_delay_ms", 0644, numa, &sysctl_numa_balancing_scan_delay);
327 debugfs_create_u32("scan_period_min_ms", 0644, numa, &sysctl_numa_balancing_scan_period_min);
328 debugfs_create_u32("scan_period_max_ms", 0644, numa, &sysctl_numa_balancing_scan_period_max);
329 debugfs_create_u32("scan_size_mb", 0644, numa, &sysctl_numa_balancing_scan_size);
332 debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
336 late_initcall(sched_init_debug);
340 static cpumask_var_t sd_sysctl_cpus;
341 static struct dentry *sd_dentry;
343 static int sd_flags_show(struct seq_file *m, void *v)
345 unsigned long flags = *(unsigned int *)m->private;
348 for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
349 seq_puts(m, sd_flag_debug[idx].name);
357 static int sd_flags_open(struct inode *inode, struct file *file)
359 return single_open(file, sd_flags_show, inode->i_private);
362 static const struct file_operations sd_flags_fops = {
363 .open = sd_flags_open,
366 .release = single_release,
369 static void register_sd(struct sched_domain *sd, struct dentry *parent)
371 #define SDM(type, mode, member) \
372 debugfs_create_##type(#member, mode, parent, &sd->member)
374 SDM(ulong, 0644, min_interval);
375 SDM(ulong, 0644, max_interval);
376 SDM(u64, 0644, max_newidle_lb_cost);
377 SDM(u32, 0644, busy_factor);
378 SDM(u32, 0644, imbalance_pct);
379 SDM(u32, 0644, cache_nice_tries);
380 SDM(str, 0444, name);
384 debugfs_create_file("flags", 0444, parent, &sd->flags, &sd_flags_fops);
387 void update_sched_domain_debugfs(void)
392 * This can unfortunately be invoked before sched_debug_init() creates
393 * the debug directory. Don't touch sd_sysctl_cpus until then.
398 if (!cpumask_available(sd_sysctl_cpus)) {
399 if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
401 cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
405 sd_dentry = debugfs_create_dir("domains", debugfs_sched);
407 for_each_cpu(cpu, sd_sysctl_cpus) {
408 struct sched_domain *sd;
409 struct dentry *d_cpu;
412 snprintf(buf, sizeof(buf), "cpu%d", cpu);
413 debugfs_remove(debugfs_lookup(buf, sd_dentry));
414 d_cpu = debugfs_create_dir(buf, sd_dentry);
417 for_each_domain(cpu, sd) {
420 snprintf(buf, sizeof(buf), "domain%d", i);
421 d_sd = debugfs_create_dir(buf, d_cpu);
423 register_sd(sd, d_sd);
427 __cpumask_clear_cpu(cpu, sd_sysctl_cpus);
431 void dirty_sched_domain_sysctl(int cpu)
433 if (cpumask_available(sd_sysctl_cpus))
434 __cpumask_set_cpu(cpu, sd_sysctl_cpus);
437 #endif /* CONFIG_SMP */
439 #ifdef CONFIG_FAIR_GROUP_SCHED
440 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
442 struct sched_entity *se = tg->se[cpu];
444 #define P(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
445 #define P_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)schedstat_val(F))
446 #define PN(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
447 #define PN_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
454 PN(se->sum_exec_runtime);
456 if (schedstat_enabled()) {
457 PN_SCHEDSTAT(se->statistics.wait_start);
458 PN_SCHEDSTAT(se->statistics.sleep_start);
459 PN_SCHEDSTAT(se->statistics.block_start);
460 PN_SCHEDSTAT(se->statistics.sleep_max);
461 PN_SCHEDSTAT(se->statistics.block_max);
462 PN_SCHEDSTAT(se->statistics.exec_max);
463 PN_SCHEDSTAT(se->statistics.slice_max);
464 PN_SCHEDSTAT(se->statistics.wait_max);
465 PN_SCHEDSTAT(se->statistics.wait_sum);
466 P_SCHEDSTAT(se->statistics.wait_count);
473 P(se->avg.runnable_avg);
483 #ifdef CONFIG_CGROUP_SCHED
484 static DEFINE_SPINLOCK(sched_debug_lock);
485 static char group_path[PATH_MAX];
487 static void task_group_path(struct task_group *tg, char *path, int plen)
489 if (autogroup_path(tg, path, plen))
492 cgroup_path(tg->css.cgroup, path, plen);
496 * Only 1 SEQ_printf_task_group_path() caller can use the full length
497 * group_path[] for cgroup path. Other simultaneous callers will have
498 * to use a shorter stack buffer. A "..." suffix is appended at the end
499 * of the stack buffer so that it will show up in case the output length
500 * matches the given buffer size to indicate possible path name truncation.
502 #define SEQ_printf_task_group_path(m, tg, fmt...) \
504 if (spin_trylock(&sched_debug_lock)) { \
505 task_group_path(tg, group_path, sizeof(group_path)); \
506 SEQ_printf(m, fmt, group_path); \
507 spin_unlock(&sched_debug_lock); \
510 char *bufend = buf + sizeof(buf) - 3; \
511 task_group_path(tg, buf, bufend - buf); \
512 strcpy(bufend - 1, "..."); \
513 SEQ_printf(m, fmt, buf); \
519 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
521 if (task_current(rq, p))
524 SEQ_printf(m, " %c", task_state_to_char(p));
526 SEQ_printf(m, " %15s %5d %9Ld.%06ld %9Ld %5d ",
527 p->comm, task_pid_nr(p),
528 SPLIT_NS(p->se.vruntime),
529 (long long)(p->nvcsw + p->nivcsw),
532 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
533 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)),
534 SPLIT_NS(p->se.sum_exec_runtime),
535 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime)));
537 #ifdef CONFIG_NUMA_BALANCING
538 SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
540 #ifdef CONFIG_CGROUP_SCHED
541 SEQ_printf_task_group_path(m, task_group(p), " %s")
547 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
549 struct task_struct *g, *p;
552 SEQ_printf(m, "runnable tasks:\n");
553 SEQ_printf(m, " S task PID tree-key switches prio"
554 " wait-time sum-exec sum-sleep\n");
555 SEQ_printf(m, "-------------------------------------------------------"
556 "------------------------------------------------------\n");
559 for_each_process_thread(g, p) {
560 if (task_cpu(p) != rq_cpu)
563 print_task(m, rq, p);
568 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
570 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
571 spread, rq0_min_vruntime, spread0;
572 struct rq *rq = cpu_rq(cpu);
573 struct sched_entity *last;
576 #ifdef CONFIG_FAIR_GROUP_SCHED
578 SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu);
581 SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
583 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
584 SPLIT_NS(cfs_rq->exec_clock));
586 raw_spin_rq_lock_irqsave(rq, flags);
587 if (rb_first_cached(&cfs_rq->tasks_timeline))
588 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
589 last = __pick_last_entity(cfs_rq);
591 max_vruntime = last->vruntime;
592 min_vruntime = cfs_rq->min_vruntime;
593 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
594 raw_spin_rq_unlock_irqrestore(rq, flags);
595 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
596 SPLIT_NS(MIN_vruntime));
597 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
598 SPLIT_NS(min_vruntime));
599 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
600 SPLIT_NS(max_vruntime));
601 spread = max_vruntime - MIN_vruntime;
602 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
604 spread0 = min_vruntime - rq0_min_vruntime;
605 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
607 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
608 cfs_rq->nr_spread_over);
609 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
610 SEQ_printf(m, " .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running);
611 SEQ_printf(m, " .%-30s: %d\n", "idle_h_nr_running",
612 cfs_rq->idle_h_nr_running);
613 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
615 SEQ_printf(m, " .%-30s: %lu\n", "load_avg",
616 cfs_rq->avg.load_avg);
617 SEQ_printf(m, " .%-30s: %lu\n", "runnable_avg",
618 cfs_rq->avg.runnable_avg);
619 SEQ_printf(m, " .%-30s: %lu\n", "util_avg",
620 cfs_rq->avg.util_avg);
621 SEQ_printf(m, " .%-30s: %u\n", "util_est_enqueued",
622 cfs_rq->avg.util_est.enqueued);
623 SEQ_printf(m, " .%-30s: %ld\n", "removed.load_avg",
624 cfs_rq->removed.load_avg);
625 SEQ_printf(m, " .%-30s: %ld\n", "removed.util_avg",
626 cfs_rq->removed.util_avg);
627 SEQ_printf(m, " .%-30s: %ld\n", "removed.runnable_avg",
628 cfs_rq->removed.runnable_avg);
629 #ifdef CONFIG_FAIR_GROUP_SCHED
630 SEQ_printf(m, " .%-30s: %lu\n", "tg_load_avg_contrib",
631 cfs_rq->tg_load_avg_contrib);
632 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
633 atomic_long_read(&cfs_rq->tg->load_avg));
636 #ifdef CONFIG_CFS_BANDWIDTH
637 SEQ_printf(m, " .%-30s: %d\n", "throttled",
639 SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
640 cfs_rq->throttle_count);
643 #ifdef CONFIG_FAIR_GROUP_SCHED
644 print_cfs_group_stats(m, cpu, cfs_rq->tg);
648 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
650 #ifdef CONFIG_RT_GROUP_SCHED
652 SEQ_printf_task_group_path(m, rt_rq->tg, "rt_rq[%d]:%s\n", cpu);
655 SEQ_printf(m, "rt_rq[%d]:\n", cpu);
659 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
661 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
663 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
678 void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
683 SEQ_printf(m, "dl_rq[%d]:\n", cpu);
686 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
691 dl_bw = &cpu_rq(cpu)->rd->dl_bw;
693 dl_bw = &dl_rq->dl_bw;
695 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
696 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
701 static void print_cpu(struct seq_file *m, int cpu)
703 struct rq *rq = cpu_rq(cpu);
707 unsigned int freq = cpu_khz ? : 1;
709 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
710 cpu, freq / 1000, (freq % 1000));
713 SEQ_printf(m, "cpu#%d\n", cpu);
718 if (sizeof(rq->x) == 4) \
719 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \
721 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
725 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
729 P(nr_uninterruptible);
731 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
738 #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
740 P64(max_idle_balance_cost);
744 #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n));
745 if (schedstat_enabled()) {
754 print_cfs_stats(m, cpu);
755 print_rt_stats(m, cpu);
756 print_dl_stats(m, cpu);
758 print_rq(m, rq, cpu);
762 static const char *sched_tunable_scaling_names[] = {
768 static void sched_debug_header(struct seq_file *m)
770 u64 ktime, sched_clk, cpu_clk;
773 local_irq_save(flags);
774 ktime = ktime_to_ns(ktime_get());
775 sched_clk = sched_clock();
776 cpu_clk = local_clock();
777 local_irq_restore(flags);
779 SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
780 init_utsname()->release,
781 (int)strcspn(init_utsname()->version, " "),
782 init_utsname()->version);
785 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
787 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
792 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
793 P(sched_clock_stable());
799 SEQ_printf(m, "sysctl_sched\n");
802 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
804 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
805 PN(sysctl_sched_latency);
806 PN(sysctl_sched_min_granularity);
807 PN(sysctl_sched_wakeup_granularity);
808 P(sysctl_sched_child_runs_first);
809 P(sysctl_sched_features);
813 SEQ_printf(m, " .%-40s: %d (%s)\n",
814 "sysctl_sched_tunable_scaling",
815 sysctl_sched_tunable_scaling,
816 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
820 static int sched_debug_show(struct seq_file *m, void *v)
822 int cpu = (unsigned long)(v - 2);
827 sched_debug_header(m);
832 void sysrq_sched_debug_show(void)
836 sched_debug_header(NULL);
837 for_each_online_cpu(cpu) {
839 * Need to reset softlockup watchdogs on all CPUs, because
840 * another CPU might be blocked waiting for us to process
841 * an IPI or stop_machine.
843 touch_nmi_watchdog();
844 touch_all_softlockup_watchdogs();
845 print_cpu(NULL, cpu);
850 * This iterator needs some explanation.
851 * It returns 1 for the header position.
852 * This means 2 is CPU 0.
853 * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
854 * to use cpumask_* to iterate over the CPUs.
856 static void *sched_debug_start(struct seq_file *file, loff_t *offset)
858 unsigned long n = *offset;
866 n = cpumask_next(n - 1, cpu_online_mask);
868 n = cpumask_first(cpu_online_mask);
873 return (void *)(unsigned long)(n + 2);
878 static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
881 return sched_debug_start(file, offset);
884 static void sched_debug_stop(struct seq_file *file, void *data)
888 static const struct seq_operations sched_debug_sops = {
889 .start = sched_debug_start,
890 .next = sched_debug_next,
891 .stop = sched_debug_stop,
892 .show = sched_debug_show,
895 #define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
896 #define __P(F) __PS(#F, F)
897 #define P(F) __PS(#F, p->F)
898 #define PM(F, M) __PS(#F, p->F & (M))
899 #define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
900 #define __PN(F) __PSN(#F, F)
901 #define PN(F) __PSN(#F, p->F)
904 #ifdef CONFIG_NUMA_BALANCING
905 void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
906 unsigned long tpf, unsigned long gsf, unsigned long gpf)
908 SEQ_printf(m, "numa_faults node=%d ", node);
909 SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
910 SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
915 static void sched_show_numa(struct task_struct *p, struct seq_file *m)
917 #ifdef CONFIG_NUMA_BALANCING
918 struct mempolicy *pol;
921 P(mm->numa_scan_seq);
925 if (pol && !(pol->flags & MPOL_F_MORON))
930 P(numa_pages_migrated);
931 P(numa_preferred_nid);
932 P(total_numa_faults);
933 SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
934 task_node(p), task_numa_group_id(p));
935 show_numa_stats(p, m);
940 void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
943 unsigned long nr_switches;
945 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
948 "---------------------------------------------------------"
951 #define P_SCHEDSTAT(F) __PS(#F, schedstat_val(p->F))
952 #define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->F))
956 PN(se.sum_exec_runtime);
958 nr_switches = p->nvcsw + p->nivcsw;
962 if (schedstat_enabled()) {
963 u64 avg_atom, avg_per_cpu;
965 PN_SCHEDSTAT(se.statistics.sum_sleep_runtime);
966 PN_SCHEDSTAT(se.statistics.wait_start);
967 PN_SCHEDSTAT(se.statistics.sleep_start);
968 PN_SCHEDSTAT(se.statistics.block_start);
969 PN_SCHEDSTAT(se.statistics.sleep_max);
970 PN_SCHEDSTAT(se.statistics.block_max);
971 PN_SCHEDSTAT(se.statistics.exec_max);
972 PN_SCHEDSTAT(se.statistics.slice_max);
973 PN_SCHEDSTAT(se.statistics.wait_max);
974 PN_SCHEDSTAT(se.statistics.wait_sum);
975 P_SCHEDSTAT(se.statistics.wait_count);
976 PN_SCHEDSTAT(se.statistics.iowait_sum);
977 P_SCHEDSTAT(se.statistics.iowait_count);
978 P_SCHEDSTAT(se.statistics.nr_migrations_cold);
979 P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine);
980 P_SCHEDSTAT(se.statistics.nr_failed_migrations_running);
981 P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot);
982 P_SCHEDSTAT(se.statistics.nr_forced_migrations);
983 P_SCHEDSTAT(se.statistics.nr_wakeups);
984 P_SCHEDSTAT(se.statistics.nr_wakeups_sync);
985 P_SCHEDSTAT(se.statistics.nr_wakeups_migrate);
986 P_SCHEDSTAT(se.statistics.nr_wakeups_local);
987 P_SCHEDSTAT(se.statistics.nr_wakeups_remote);
988 P_SCHEDSTAT(se.statistics.nr_wakeups_affine);
989 P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
990 P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
991 P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
993 avg_atom = p->se.sum_exec_runtime;
995 avg_atom = div64_ul(avg_atom, nr_switches);
999 avg_per_cpu = p->se.sum_exec_runtime;
1000 if (p->se.nr_migrations) {
1001 avg_per_cpu = div64_u64(avg_per_cpu,
1002 p->se.nr_migrations);
1012 __PS("nr_voluntary_switches", p->nvcsw);
1013 __PS("nr_involuntary_switches", p->nivcsw);
1018 P(se.avg.runnable_sum);
1021 P(se.avg.runnable_avg);
1023 P(se.avg.last_update_time);
1024 P(se.avg.util_est.ewma);
1025 PM(se.avg.util_est.enqueued, ~UTIL_AVG_UNCHANGED);
1027 #ifdef CONFIG_UCLAMP_TASK
1028 __PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
1029 __PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
1030 __PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
1031 __PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
1035 if (task_has_dl_policy(p)) {
1043 unsigned int this_cpu = raw_smp_processor_id();
1046 t0 = cpu_clock(this_cpu);
1047 t1 = cpu_clock(this_cpu);
1048 __PS("clock-delta", t1-t0);
1051 sched_show_numa(p, m);
1054 void proc_sched_set_task(struct task_struct *p)
1056 #ifdef CONFIG_SCHEDSTATS
1057 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
1061 void resched_latency_warn(int cpu, u64 latency)
1063 static DEFINE_RATELIMIT_STATE(latency_check_ratelimit, 60 * 60 * HZ, 1);
1065 WARN(__ratelimit(&latency_check_ratelimit),
1066 "sched: CPU %d need_resched set for > %llu ns (%d ticks) "
1067 "without schedule\n",
1068 cpu, latency, cpu_rq(cpu)->ticks_without_resched);