Merge tag 'drm-intel-next-2017-11-17-1' of git://anongit.freedesktop.org/drm/drm...
[platform/kernel/linux-rpi.git] / kernel / watchdog.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Detect hard and soft lockups on a system
4  *
5  * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
6  *
7  * Note: Most of this code is borrowed heavily from the original softlockup
8  * detector, so thanks to Ingo for the initial implementation.
9  * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
10  * to those contributors as well.
11  */
12
13 #define pr_fmt(fmt) "watchdog: " fmt
14
15 #include <linux/mm.h>
16 #include <linux/cpu.h>
17 #include <linux/nmi.h>
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/sysctl.h>
21 #include <linux/smpboot.h>
22 #include <linux/sched/rt.h>
23 #include <uapi/linux/sched/types.h>
24 #include <linux/tick.h>
25 #include <linux/workqueue.h>
26 #include <linux/sched/clock.h>
27 #include <linux/sched/debug.h>
28 #include <linux/sched/isolation.h>
29
30 #include <asm/irq_regs.h>
31 #include <linux/kvm_para.h>
32 #include <linux/kthread.h>
33
34 static DEFINE_MUTEX(watchdog_mutex);
35
36 #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG)
37 # define WATCHDOG_DEFAULT       (SOFT_WATCHDOG_ENABLED | NMI_WATCHDOG_ENABLED)
38 # define NMI_WATCHDOG_DEFAULT   1
39 #else
40 # define WATCHDOG_DEFAULT       (SOFT_WATCHDOG_ENABLED)
41 # define NMI_WATCHDOG_DEFAULT   0
42 #endif
43
44 unsigned long __read_mostly watchdog_enabled;
45 int __read_mostly watchdog_user_enabled = 1;
46 int __read_mostly nmi_watchdog_user_enabled = NMI_WATCHDOG_DEFAULT;
47 int __read_mostly soft_watchdog_user_enabled = 1;
48 int __read_mostly watchdog_thresh = 10;
49 int __read_mostly nmi_watchdog_available;
50
51 struct cpumask watchdog_allowed_mask __read_mostly;
52
53 struct cpumask watchdog_cpumask __read_mostly;
54 unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
55
56 #ifdef CONFIG_HARDLOCKUP_DETECTOR
57 /*
58  * Should we panic when a soft-lockup or hard-lockup occurs:
59  */
60 unsigned int __read_mostly hardlockup_panic =
61                         CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
62 /*
63  * We may not want to enable hard lockup detection by default in all cases,
64  * for example when running the kernel as a guest on a hypervisor. In these
65  * cases this function can be called to disable hard lockup detection. This
66  * function should only be executed once by the boot processor before the
67  * kernel command line parameters are parsed, because otherwise it is not
68  * possible to override this in hardlockup_panic_setup().
69  */
70 void __init hardlockup_detector_disable(void)
71 {
72         nmi_watchdog_user_enabled = 0;
73 }
74
75 static int __init hardlockup_panic_setup(char *str)
76 {
77         if (!strncmp(str, "panic", 5))
78                 hardlockup_panic = 1;
79         else if (!strncmp(str, "nopanic", 7))
80                 hardlockup_panic = 0;
81         else if (!strncmp(str, "0", 1))
82                 nmi_watchdog_user_enabled = 0;
83         else if (!strncmp(str, "1", 1))
84                 nmi_watchdog_user_enabled = 1;
85         return 1;
86 }
87 __setup("nmi_watchdog=", hardlockup_panic_setup);
88
89 # ifdef CONFIG_SMP
90 int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
91
92 static int __init hardlockup_all_cpu_backtrace_setup(char *str)
93 {
94         sysctl_hardlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0);
95         return 1;
96 }
97 __setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
98 # endif /* CONFIG_SMP */
99 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
100
101 /*
102  * These functions can be overridden if an architecture implements its
103  * own hardlockup detector.
104  *
105  * watchdog_nmi_enable/disable can be implemented to start and stop when
106  * softlockup watchdog threads start and stop. The arch must select the
107  * SOFTLOCKUP_DETECTOR Kconfig.
108  */
109 int __weak watchdog_nmi_enable(unsigned int cpu)
110 {
111         hardlockup_detector_perf_enable();
112         return 0;
113 }
114
115 void __weak watchdog_nmi_disable(unsigned int cpu)
116 {
117         hardlockup_detector_perf_disable();
118 }
119
120 /* Return 0, if a NMI watchdog is available. Error code otherwise */
121 int __weak __init watchdog_nmi_probe(void)
122 {
123         return hardlockup_detector_perf_init();
124 }
125
126 /**
127  * watchdog_nmi_stop - Stop the watchdog for reconfiguration
128  *
129  * The reconfiguration steps are:
130  * watchdog_nmi_stop();
131  * update_variables();
132  * watchdog_nmi_start();
133  */
134 void __weak watchdog_nmi_stop(void) { }
135
136 /**
137  * watchdog_nmi_start - Start the watchdog after reconfiguration
138  *
139  * Counterpart to watchdog_nmi_stop().
140  *
141  * The following variables have been updated in update_variables() and
142  * contain the currently valid configuration:
143  * - watchdog_enabled
144  * - watchdog_thresh
145  * - watchdog_cpumask
146  */
147 void __weak watchdog_nmi_start(void) { }
148
149 /**
150  * lockup_detector_update_enable - Update the sysctl enable bit
151  *
152  * Caller needs to make sure that the NMI/perf watchdogs are off, so this
153  * can't race with watchdog_nmi_disable().
154  */
155 static void lockup_detector_update_enable(void)
156 {
157         watchdog_enabled = 0;
158         if (!watchdog_user_enabled)
159                 return;
160         if (nmi_watchdog_available && nmi_watchdog_user_enabled)
161                 watchdog_enabled |= NMI_WATCHDOG_ENABLED;
162         if (soft_watchdog_user_enabled)
163                 watchdog_enabled |= SOFT_WATCHDOG_ENABLED;
164 }
165
166 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
167
168 /* Global variables, exported for sysctl */
169 unsigned int __read_mostly softlockup_panic =
170                         CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
171
172 static bool softlockup_threads_initialized __read_mostly;
173 static u64 __read_mostly sample_period;
174
175 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
176 static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
177 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
178 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
179 static DEFINE_PER_CPU(bool, soft_watchdog_warn);
180 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
181 static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
182 static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
183 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
184 static unsigned long soft_lockup_nmi_warn;
185
186 static int __init softlockup_panic_setup(char *str)
187 {
188         softlockup_panic = simple_strtoul(str, NULL, 0);
189         return 1;
190 }
191 __setup("softlockup_panic=", softlockup_panic_setup);
192
193 static int __init nowatchdog_setup(char *str)
194 {
195         watchdog_user_enabled = 0;
196         return 1;
197 }
198 __setup("nowatchdog", nowatchdog_setup);
199
200 static int __init nosoftlockup_setup(char *str)
201 {
202         soft_watchdog_user_enabled = 0;
203         return 1;
204 }
205 __setup("nosoftlockup", nosoftlockup_setup);
206
207 #ifdef CONFIG_SMP
208 int __read_mostly sysctl_softlockup_all_cpu_backtrace;
209
210 static int __init softlockup_all_cpu_backtrace_setup(char *str)
211 {
212         sysctl_softlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0);
213         return 1;
214 }
215 __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
216 #endif
217
218 static void __lockup_detector_cleanup(void);
219
220 /*
221  * Hard-lockup warnings should be triggered after just a few seconds. Soft-
222  * lockups can have false positives under extreme conditions. So we generally
223  * want a higher threshold for soft lockups than for hard lockups. So we couple
224  * the thresholds with a factor: we make the soft threshold twice the amount of
225  * time the hard threshold is.
226  */
227 static int get_softlockup_thresh(void)
228 {
229         return watchdog_thresh * 2;
230 }
231
232 /*
233  * Returns seconds, approximately.  We don't need nanosecond
234  * resolution, and we don't need to waste time with a big divide when
235  * 2^30ns == 1.074s.
236  */
237 static unsigned long get_timestamp(void)
238 {
239         return running_clock() >> 30LL;  /* 2^30 ~= 10^9 */
240 }
241
242 static void set_sample_period(void)
243 {
244         /*
245          * convert watchdog_thresh from seconds to ns
246          * the divide by 5 is to give hrtimer several chances (two
247          * or three with the current relation between the soft
248          * and hard thresholds) to increment before the
249          * hardlockup detector generates a warning
250          */
251         sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
252         watchdog_update_hrtimer_threshold(sample_period);
253 }
254
255 /* Commands for resetting the watchdog */
256 static void __touch_watchdog(void)
257 {
258         __this_cpu_write(watchdog_touch_ts, get_timestamp());
259 }
260
261 /**
262  * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
263  *
264  * Call when the scheduler may have stalled for legitimate reasons
265  * preventing the watchdog task from executing - e.g. the scheduler
266  * entering idle state.  This should only be used for scheduler events.
267  * Use touch_softlockup_watchdog() for everything else.
268  */
269 void touch_softlockup_watchdog_sched(void)
270 {
271         /*
272          * Preemption can be enabled.  It doesn't matter which CPU's timestamp
273          * gets zeroed here, so use the raw_ operation.
274          */
275         raw_cpu_write(watchdog_touch_ts, 0);
276 }
277
278 void touch_softlockup_watchdog(void)
279 {
280         touch_softlockup_watchdog_sched();
281         wq_watchdog_touch(raw_smp_processor_id());
282 }
283 EXPORT_SYMBOL(touch_softlockup_watchdog);
284
285 void touch_all_softlockup_watchdogs(void)
286 {
287         int cpu;
288
289         /*
290          * watchdog_mutex cannpt be taken here, as this might be called
291          * from (soft)interrupt context, so the access to
292          * watchdog_allowed_cpumask might race with a concurrent update.
293          *
294          * The watchdog time stamp can race against a concurrent real
295          * update as well, the only side effect might be a cycle delay for
296          * the softlockup check.
297          */
298         for_each_cpu(cpu, &watchdog_allowed_mask)
299                 per_cpu(watchdog_touch_ts, cpu) = 0;
300         wq_watchdog_touch(-1);
301 }
302
303 void touch_softlockup_watchdog_sync(void)
304 {
305         __this_cpu_write(softlockup_touch_sync, true);
306         __this_cpu_write(watchdog_touch_ts, 0);
307 }
308
309 static int is_softlockup(unsigned long touch_ts)
310 {
311         unsigned long now = get_timestamp();
312
313         if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
314                 /* Warn about unreasonable delays. */
315                 if (time_after(now, touch_ts + get_softlockup_thresh()))
316                         return now - touch_ts;
317         }
318         return 0;
319 }
320
321 /* watchdog detector functions */
322 bool is_hardlockup(void)
323 {
324         unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
325
326         if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
327                 return true;
328
329         __this_cpu_write(hrtimer_interrupts_saved, hrint);
330         return false;
331 }
332
333 static void watchdog_interrupt_count(void)
334 {
335         __this_cpu_inc(hrtimer_interrupts);
336 }
337
338 /* watchdog kicker functions */
339 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
340 {
341         unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
342         struct pt_regs *regs = get_irq_regs();
343         int duration;
344         int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
345
346         if (!watchdog_enabled)
347                 return HRTIMER_NORESTART;
348
349         /* kick the hardlockup detector */
350         watchdog_interrupt_count();
351
352         /* kick the softlockup detector */
353         wake_up_process(__this_cpu_read(softlockup_watchdog));
354
355         /* .. and repeat */
356         hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
357
358         if (touch_ts == 0) {
359                 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
360                         /*
361                          * If the time stamp was touched atomically
362                          * make sure the scheduler tick is up to date.
363                          */
364                         __this_cpu_write(softlockup_touch_sync, false);
365                         sched_clock_tick();
366                 }
367
368                 /* Clear the guest paused flag on watchdog reset */
369                 kvm_check_and_clear_guest_paused();
370                 __touch_watchdog();
371                 return HRTIMER_RESTART;
372         }
373
374         /* check for a softlockup
375          * This is done by making sure a high priority task is
376          * being scheduled.  The task touches the watchdog to
377          * indicate it is getting cpu time.  If it hasn't then
378          * this is a good indication some task is hogging the cpu
379          */
380         duration = is_softlockup(touch_ts);
381         if (unlikely(duration)) {
382                 /*
383                  * If a virtual machine is stopped by the host it can look to
384                  * the watchdog like a soft lockup, check to see if the host
385                  * stopped the vm before we issue the warning
386                  */
387                 if (kvm_check_and_clear_guest_paused())
388                         return HRTIMER_RESTART;
389
390                 /* only warn once */
391                 if (__this_cpu_read(soft_watchdog_warn) == true) {
392                         /*
393                          * When multiple processes are causing softlockups the
394                          * softlockup detector only warns on the first one
395                          * because the code relies on a full quiet cycle to
396                          * re-arm.  The second process prevents the quiet cycle
397                          * and never gets reported.  Use task pointers to detect
398                          * this.
399                          */
400                         if (__this_cpu_read(softlockup_task_ptr_saved) !=
401                             current) {
402                                 __this_cpu_write(soft_watchdog_warn, false);
403                                 __touch_watchdog();
404                         }
405                         return HRTIMER_RESTART;
406                 }
407
408                 if (softlockup_all_cpu_backtrace) {
409                         /* Prevent multiple soft-lockup reports if one cpu is already
410                          * engaged in dumping cpu back traces
411                          */
412                         if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
413                                 /* Someone else will report us. Let's give up */
414                                 __this_cpu_write(soft_watchdog_warn, true);
415                                 return HRTIMER_RESTART;
416                         }
417                 }
418
419                 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
420                         smp_processor_id(), duration,
421                         current->comm, task_pid_nr(current));
422                 __this_cpu_write(softlockup_task_ptr_saved, current);
423                 print_modules();
424                 print_irqtrace_events(current);
425                 if (regs)
426                         show_regs(regs);
427                 else
428                         dump_stack();
429
430                 if (softlockup_all_cpu_backtrace) {
431                         /* Avoid generating two back traces for current
432                          * given that one is already made above
433                          */
434                         trigger_allbutself_cpu_backtrace();
435
436                         clear_bit(0, &soft_lockup_nmi_warn);
437                         /* Barrier to sync with other cpus */
438                         smp_mb__after_atomic();
439                 }
440
441                 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
442                 if (softlockup_panic)
443                         panic("softlockup: hung tasks");
444                 __this_cpu_write(soft_watchdog_warn, true);
445         } else
446                 __this_cpu_write(soft_watchdog_warn, false);
447
448         return HRTIMER_RESTART;
449 }
450
451 static void watchdog_set_prio(unsigned int policy, unsigned int prio)
452 {
453         struct sched_param param = { .sched_priority = prio };
454
455         sched_setscheduler(current, policy, &param);
456 }
457
458 static void watchdog_enable(unsigned int cpu)
459 {
460         struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
461
462         /*
463          * Start the timer first to prevent the NMI watchdog triggering
464          * before the timer has a chance to fire.
465          */
466         hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
467         hrtimer->function = watchdog_timer_fn;
468         hrtimer_start(hrtimer, ns_to_ktime(sample_period),
469                       HRTIMER_MODE_REL_PINNED);
470
471         /* Initialize timestamp */
472         __touch_watchdog();
473         /* Enable the perf event */
474         if (watchdog_enabled & NMI_WATCHDOG_ENABLED)
475                 watchdog_nmi_enable(cpu);
476
477         watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
478 }
479
480 static void watchdog_disable(unsigned int cpu)
481 {
482         struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
483
484         watchdog_set_prio(SCHED_NORMAL, 0);
485         /*
486          * Disable the perf event first. That prevents that a large delay
487          * between disabling the timer and disabling the perf event causes
488          * the perf NMI to detect a false positive.
489          */
490         watchdog_nmi_disable(cpu);
491         hrtimer_cancel(hrtimer);
492 }
493
494 static void watchdog_cleanup(unsigned int cpu, bool online)
495 {
496         watchdog_disable(cpu);
497 }
498
499 static int watchdog_should_run(unsigned int cpu)
500 {
501         return __this_cpu_read(hrtimer_interrupts) !=
502                 __this_cpu_read(soft_lockup_hrtimer_cnt);
503 }
504
505 /*
506  * The watchdog thread function - touches the timestamp.
507  *
508  * It only runs once every sample_period seconds (4 seconds by
509  * default) to reset the softlockup timestamp. If this gets delayed
510  * for more than 2*watchdog_thresh seconds then the debug-printout
511  * triggers in watchdog_timer_fn().
512  */
513 static void watchdog(unsigned int cpu)
514 {
515         __this_cpu_write(soft_lockup_hrtimer_cnt,
516                          __this_cpu_read(hrtimer_interrupts));
517         __touch_watchdog();
518 }
519
520 static struct smp_hotplug_thread watchdog_threads = {
521         .store                  = &softlockup_watchdog,
522         .thread_should_run      = watchdog_should_run,
523         .thread_fn              = watchdog,
524         .thread_comm            = "watchdog/%u",
525         .setup                  = watchdog_enable,
526         .cleanup                = watchdog_cleanup,
527         .park                   = watchdog_disable,
528         .unpark                 = watchdog_enable,
529 };
530
531 static void softlockup_update_smpboot_threads(void)
532 {
533         lockdep_assert_held(&watchdog_mutex);
534
535         if (!softlockup_threads_initialized)
536                 return;
537
538         smpboot_update_cpumask_percpu_thread(&watchdog_threads,
539                                              &watchdog_allowed_mask);
540 }
541
542 /* Temporarily park all watchdog threads */
543 static void softlockup_park_all_threads(void)
544 {
545         cpumask_clear(&watchdog_allowed_mask);
546         softlockup_update_smpboot_threads();
547 }
548
549 /* Unpark enabled threads */
550 static void softlockup_unpark_threads(void)
551 {
552         cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask);
553         softlockup_update_smpboot_threads();
554 }
555
556 static void lockup_detector_reconfigure(void)
557 {
558         cpus_read_lock();
559         watchdog_nmi_stop();
560         softlockup_park_all_threads();
561         set_sample_period();
562         lockup_detector_update_enable();
563         if (watchdog_enabled && watchdog_thresh)
564                 softlockup_unpark_threads();
565         watchdog_nmi_start();
566         cpus_read_unlock();
567         /*
568          * Must be called outside the cpus locked section to prevent
569          * recursive locking in the perf code.
570          */
571         __lockup_detector_cleanup();
572 }
573
574 /*
575  * Create the watchdog thread infrastructure and configure the detector(s).
576  *
577  * The threads are not unparked as watchdog_allowed_mask is empty.  When
578  * the threads are sucessfully initialized, take the proper locks and
579  * unpark the threads in the watchdog_cpumask if the watchdog is enabled.
580  */
581 static __init void lockup_detector_setup(void)
582 {
583         int ret;
584
585         /*
586          * If sysctl is off and watchdog got disabled on the command line,
587          * nothing to do here.
588          */
589         lockup_detector_update_enable();
590
591         if (!IS_ENABLED(CONFIG_SYSCTL) &&
592             !(watchdog_enabled && watchdog_thresh))
593                 return;
594
595         ret = smpboot_register_percpu_thread_cpumask(&watchdog_threads,
596                                                      &watchdog_allowed_mask);
597         if (ret) {
598                 pr_err("Failed to initialize soft lockup detector threads\n");
599                 return;
600         }
601
602         mutex_lock(&watchdog_mutex);
603         softlockup_threads_initialized = true;
604         lockup_detector_reconfigure();
605         mutex_unlock(&watchdog_mutex);
606 }
607
608 #else /* CONFIG_SOFTLOCKUP_DETECTOR */
609 static inline int watchdog_park_threads(void) { return 0; }
610 static inline void watchdog_unpark_threads(void) { }
611 static inline int watchdog_enable_all_cpus(void) { return 0; }
612 static inline void watchdog_disable_all_cpus(void) { }
613 static void lockup_detector_reconfigure(void)
614 {
615         cpus_read_lock();
616         watchdog_nmi_stop();
617         lockup_detector_update_enable();
618         watchdog_nmi_start();
619         cpus_read_unlock();
620 }
621 static inline void lockup_detector_setup(void)
622 {
623         lockup_detector_reconfigure();
624 }
625 #endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
626
627 static void __lockup_detector_cleanup(void)
628 {
629         lockdep_assert_held(&watchdog_mutex);
630         hardlockup_detector_perf_cleanup();
631 }
632
633 /**
634  * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes
635  *
636  * Caller must not hold the cpu hotplug rwsem.
637  */
638 void lockup_detector_cleanup(void)
639 {
640         mutex_lock(&watchdog_mutex);
641         __lockup_detector_cleanup();
642         mutex_unlock(&watchdog_mutex);
643 }
644
645 /**
646  * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
647  *
648  * Special interface for parisc. It prevents lockup detector warnings from
649  * the default pm_poweroff() function which busy loops forever.
650  */
651 void lockup_detector_soft_poweroff(void)
652 {
653         watchdog_enabled = 0;
654 }
655
656 #ifdef CONFIG_SYSCTL
657
658 /* Propagate any changes to the watchdog threads */
659 static void proc_watchdog_update(void)
660 {
661         /* Remove impossible cpus to keep sysctl output clean. */
662         cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
663         lockup_detector_reconfigure();
664 }
665
666 /*
667  * common function for watchdog, nmi_watchdog and soft_watchdog parameter
668  *
669  * caller             | table->data points to      | 'which'
670  * -------------------|----------------------------|--------------------------
671  * proc_watchdog      | watchdog_user_enabled      | NMI_WATCHDOG_ENABLED |
672  *                    |                            | SOFT_WATCHDOG_ENABLED
673  * -------------------|----------------------------|--------------------------
674  * proc_nmi_watchdog  | nmi_watchdog_user_enabled  | NMI_WATCHDOG_ENABLED
675  * -------------------|----------------------------|--------------------------
676  * proc_soft_watchdog | soft_watchdog_user_enabled | SOFT_WATCHDOG_ENABLED
677  */
678 static int proc_watchdog_common(int which, struct ctl_table *table, int write,
679                                 void __user *buffer, size_t *lenp, loff_t *ppos)
680 {
681         int err, old, *param = table->data;
682
683         mutex_lock(&watchdog_mutex);
684
685         if (!write) {
686                 /*
687                  * On read synchronize the userspace interface. This is a
688                  * racy snapshot.
689                  */
690                 *param = (watchdog_enabled & which) != 0;
691                 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
692         } else {
693                 old = READ_ONCE(*param);
694                 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
695                 if (!err && old != READ_ONCE(*param))
696                         proc_watchdog_update();
697         }
698         mutex_unlock(&watchdog_mutex);
699         return err;
700 }
701
702 /*
703  * /proc/sys/kernel/watchdog
704  */
705 int proc_watchdog(struct ctl_table *table, int write,
706                   void __user *buffer, size_t *lenp, loff_t *ppos)
707 {
708         return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
709                                     table, write, buffer, lenp, ppos);
710 }
711
712 /*
713  * /proc/sys/kernel/nmi_watchdog
714  */
715 int proc_nmi_watchdog(struct ctl_table *table, int write,
716                       void __user *buffer, size_t *lenp, loff_t *ppos)
717 {
718         if (!nmi_watchdog_available && write)
719                 return -ENOTSUPP;
720         return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
721                                     table, write, buffer, lenp, ppos);
722 }
723
724 /*
725  * /proc/sys/kernel/soft_watchdog
726  */
727 int proc_soft_watchdog(struct ctl_table *table, int write,
728                         void __user *buffer, size_t *lenp, loff_t *ppos)
729 {
730         return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
731                                     table, write, buffer, lenp, ppos);
732 }
733
734 /*
735  * /proc/sys/kernel/watchdog_thresh
736  */
737 int proc_watchdog_thresh(struct ctl_table *table, int write,
738                          void __user *buffer, size_t *lenp, loff_t *ppos)
739 {
740         int err, old;
741
742         mutex_lock(&watchdog_mutex);
743
744         old = READ_ONCE(watchdog_thresh);
745         err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
746
747         if (!err && write && old != READ_ONCE(watchdog_thresh))
748                 proc_watchdog_update();
749
750         mutex_unlock(&watchdog_mutex);
751         return err;
752 }
753
754 /*
755  * The cpumask is the mask of possible cpus that the watchdog can run
756  * on, not the mask of cpus it is actually running on.  This allows the
757  * user to specify a mask that will include cpus that have not yet
758  * been brought online, if desired.
759  */
760 int proc_watchdog_cpumask(struct ctl_table *table, int write,
761                           void __user *buffer, size_t *lenp, loff_t *ppos)
762 {
763         int err;
764
765         mutex_lock(&watchdog_mutex);
766
767         err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
768         if (!err && write)
769                 proc_watchdog_update();
770
771         mutex_unlock(&watchdog_mutex);
772         return err;
773 }
774 #endif /* CONFIG_SYSCTL */
775
776 void __init lockup_detector_init(void)
777 {
778         if (tick_nohz_full_enabled())
779                 pr_info("Disabling watchdog on nohz_full cores by default\n");
780
781         cpumask_copy(&watchdog_cpumask,
782                      housekeeping_cpumask(HK_FLAG_TIMER));
783
784         if (!watchdog_nmi_probe())
785                 nmi_watchdog_available = true;
786         lockup_detector_setup();
787 }