void watchdog_hardlockup_touch_cpu(unsigned int cpu)
{
per_cpu(watchdog_hardlockup_touched, cpu) = true;
-
- /* Match with smp_rmb() in watchdog_hardlockup_check() */
- smp_wmb();
}
static bool is_hardlockup(unsigned int cpu)
void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs)
{
- /* Match with smp_wmb() in watchdog_hardlockup_touch_cpu() */
- smp_rmb();
-
if (per_cpu(watchdog_hardlockup_touched, cpu)) {
per_cpu(watchdog_hardlockup_touched, cpu) = false;
return;
if (next_cpu < nr_cpu_ids)
watchdog_hardlockup_touch_cpu(next_cpu);
+ /*
+ * Makes sure that watchdog is touched on this CPU before
+ * other CPUs could see it in watchdog_cpus. The counter
+ * part is in watchdog_buddy_check_hardlockup().
+ */
+ smp_wmb();
+
cpumask_set_cpu(cpu, &watchdog_cpus);
}
if (next_cpu < nr_cpu_ids)
watchdog_hardlockup_touch_cpu(next_cpu);
+ /*
+ * Makes sure that watchdog is touched on the next CPU before
+ * this CPU disappear in watchdog_cpus. The counter part is in
+ * watchdog_buddy_check_hardlockup().
+ */
+ smp_wmb();
+
cpumask_clear_cpu(cpu, &watchdog_cpus);
}
if (next_cpu >= nr_cpu_ids)
return;
+ /*
+ * Make sure that the watchdog was touched on next CPU when
+ * watchdog_next_cpu() returned another one because of
+ * a change in watchdog_hardlockup_enable()/disable().
+ */
+ smp_rmb();
+
watchdog_hardlockup_check(next_cpu, NULL);
}