1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * SGI NMI support routines
5 * (C) Copyright 2020 Hewlett Packard Enterprise Development LP
6 * Copyright (C) 2007-2017 Silicon Graphics, Inc. All rights reserved.
7 * Copyright (c) Mike Travis
10 #include <linux/cpu.h>
11 #include <linux/delay.h>
12 #include <linux/kdb.h>
13 #include <linux/kexec.h>
14 #include <linux/kgdb.h>
15 #include <linux/moduleparam.h>
16 #include <linux/nmi.h>
17 #include <linux/sched.h>
18 #include <linux/sched/debug.h>
19 #include <linux/slab.h>
20 #include <linux/clocksource.h>
23 #include <asm/current.h>
24 #include <asm/kdebug.h>
25 #include <asm/local64.h>
27 #include <asm/reboot.h>
28 #include <asm/traps.h>
29 #include <asm/uv/uv.h>
30 #include <asm/uv/uv_hub.h>
31 #include <asm/uv/uv_mmrs.h>
36 * Handle system-wide NMI events generated by the global 'power nmi' command.
38 * Basic operation is to field the NMI interrupt on each CPU and wait
39 * until all CPU's have arrived into the nmi handler. If some CPU's do not
40 * make it into the handler, try and force them in with the IPI(NMI) signal.
42 * We also have to lessen UV Hub MMR accesses as much as possible as this
43 * disrupts the UV Hub's primary mission of directing NumaLink traffic and
44 * can cause system problems to occur.
46 * To do this we register our primary NMI notifier on the NMI_UNKNOWN
47 * chain. This reduces the number of false NMI calls when the perf
48 * tools are running which generate an enormous number of NMIs per
49 * second (~4M/s for 1024 CPU threads). Our secondary NMI handler is
50 * very short as it only checks that if it has been "pinged" with the
51 * IPI(NMI) signal as mentioned above, and does not read the UV Hub's MMR.
55 static struct uv_hub_nmi_s **uv_hub_nmi_list;
57 DEFINE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi);
59 /* Newer SMM NMI handler, not present in all systems */
60 static unsigned long uvh_nmi_mmrx; /* UVH_EVENT_OCCURRED0/1 */
61 static unsigned long uvh_nmi_mmrx_clear; /* UVH_EVENT_OCCURRED0/1_ALIAS */
62 static int uvh_nmi_mmrx_shift; /* UVH_EVENT_OCCURRED0/1_EXTIO_INT0_SHFT */
63 static char *uvh_nmi_mmrx_type; /* "EXTIO_INT0" */
65 /* Non-zero indicates newer SMM NMI handler present */
66 static unsigned long uvh_nmi_mmrx_supported; /* UVH_EXTIO_INT0_BROADCAST */
68 /* Indicates to BIOS that we want to use the newer SMM NMI handler */
69 static unsigned long uvh_nmi_mmrx_req; /* UVH_BIOS_KERNEL_MMR_ALIAS_2 */
70 static int uvh_nmi_mmrx_req_shift; /* 62 */
72 /* UV hubless values */
73 #define NMI_CONTROL_PORT 0x70
74 #define NMI_DUMMY_PORT 0x71
75 #define PAD_OWN_GPP_D_0 0x2c
76 #define GPI_NMI_STS_GPP_D_0 0x164
77 #define GPI_NMI_ENA_GPP_D_0 0x174
78 #define STS_GPP_D_0_MASK 0x1
79 #define PAD_CFG_DW0_GPP_D_0 0x4c0
80 #define GPIROUTNMI (1ul << 17)
81 #define PCH_PCR_GPIO_1_BASE 0xfdae0000ul
82 #define PCH_PCR_GPIO_ADDRESS(offset) (int *)((u64)(pch_base) | (u64)(offset))
85 static unsigned long nmi_mmr;
86 static unsigned long nmi_mmr_clear;
87 static unsigned long nmi_mmr_pending;
89 static atomic_t uv_in_nmi;
90 static atomic_t uv_nmi_cpu = ATOMIC_INIT(-1);
91 static atomic_t uv_nmi_cpus_in_nmi = ATOMIC_INIT(-1);
92 static atomic_t uv_nmi_slave_continue;
93 static cpumask_var_t uv_nmi_cpu_mask;
95 /* Values for uv_nmi_slave_continue */
97 #define SLAVE_CONTINUE 1
101 * Default is all stack dumps go to the console and buffer.
102 * Lower level to send to log buffer only.
104 static int uv_nmi_loglevel = CONSOLE_LOGLEVEL_DEFAULT;
105 module_param_named(dump_loglevel, uv_nmi_loglevel, int, 0644);
108 * The following values show statistics on how perf events are affecting
111 static int param_get_local64(char *buffer, const struct kernel_param *kp)
113 return sprintf(buffer, "%lu\n", local64_read((local64_t *)kp->arg));
116 static int param_set_local64(const char *val, const struct kernel_param *kp)
118 /* Clear on any write */
119 local64_set((local64_t *)kp->arg, 0);
123 static const struct kernel_param_ops param_ops_local64 = {
124 .get = param_get_local64,
125 .set = param_set_local64,
127 #define param_check_local64(name, p) __param_check(name, p, local64_t)
129 static local64_t uv_nmi_count;
130 module_param_named(nmi_count, uv_nmi_count, local64, 0644);
132 static local64_t uv_nmi_misses;
133 module_param_named(nmi_misses, uv_nmi_misses, local64, 0644);
135 static local64_t uv_nmi_ping_count;
136 module_param_named(ping_count, uv_nmi_ping_count, local64, 0644);
138 static local64_t uv_nmi_ping_misses;
139 module_param_named(ping_misses, uv_nmi_ping_misses, local64, 0644);
142 * Following values allow tuning for large systems under heavy loading
144 static int uv_nmi_initial_delay = 100;
145 module_param_named(initial_delay, uv_nmi_initial_delay, int, 0644);
147 static int uv_nmi_slave_delay = 100;
148 module_param_named(slave_delay, uv_nmi_slave_delay, int, 0644);
150 static int uv_nmi_loop_delay = 100;
151 module_param_named(loop_delay, uv_nmi_loop_delay, int, 0644);
153 static int uv_nmi_trigger_delay = 10000;
154 module_param_named(trigger_delay, uv_nmi_trigger_delay, int, 0644);
156 static int uv_nmi_wait_count = 100;
157 module_param_named(wait_count, uv_nmi_wait_count, int, 0644);
159 static int uv_nmi_retry_count = 500;
160 module_param_named(retry_count, uv_nmi_retry_count, int, 0644);
162 static bool uv_pch_intr_enable = true;
163 static bool uv_pch_intr_now_enabled;
164 module_param_named(pch_intr_enable, uv_pch_intr_enable, bool, 0644);
166 static bool uv_pch_init_enable = true;
167 module_param_named(pch_init_enable, uv_pch_init_enable, bool, 0644);
169 static int uv_nmi_debug;
170 module_param_named(debug, uv_nmi_debug, int, 0644);
172 #define nmi_debug(fmt, ...) \
175 pr_info(fmt, ##__VA_ARGS__); \
178 /* Valid NMI Actions */
179 #define ACTION_LEN 16
180 static struct nmi_action {
184 { "kdump", "do kernel crash dump" },
185 { "dump", "dump process stack for each cpu" },
186 { "ips", "dump Inst Ptr info for each cpu" },
187 { "kdb", "enter KDB (needs kgdboc= assignment)" },
188 { "kgdb", "enter KGDB (needs gdb target remote)" },
189 { "health", "check if CPUs respond to NMI" },
191 typedef char action_t[ACTION_LEN];
192 static action_t uv_nmi_action = { "dump" };
194 static int param_get_action(char *buffer, const struct kernel_param *kp)
196 return sprintf(buffer, "%s\n", uv_nmi_action);
199 static int param_set_action(const char *val, const struct kernel_param *kp)
202 int n = ARRAY_SIZE(valid_acts);
203 char arg[ACTION_LEN], *p;
205 /* (remove possible '\n') */
206 strncpy(arg, val, ACTION_LEN - 1);
207 arg[ACTION_LEN - 1] = '\0';
208 p = strchr(arg, '\n');
212 for (i = 0; i < n; i++)
213 if (!strcmp(arg, valid_acts[i].action))
217 strcpy(uv_nmi_action, arg);
218 pr_info("UV: New NMI action:%s\n", uv_nmi_action);
222 pr_err("UV: Invalid NMI action:%s, valid actions are:\n", arg);
223 for (i = 0; i < n; i++)
224 pr_err("UV: %-8s - %s\n",
225 valid_acts[i].action, valid_acts[i].desc);
229 static const struct kernel_param_ops param_ops_action = {
230 .get = param_get_action,
231 .set = param_set_action,
233 #define param_check_action(name, p) __param_check(name, p, action_t)
235 module_param_named(action, uv_nmi_action, action, 0644);
237 static inline bool uv_nmi_action_is(const char *action)
239 return (strncmp(uv_nmi_action, action, strlen(action)) == 0);
242 /* Setup which NMI support is present in system */
243 static void uv_nmi_setup_mmrs(void)
245 /* First determine arch specific MMRs to handshake with BIOS */
246 if (UVH_EVENT_OCCURRED0_EXTIO_INT0_MASK) {
247 uvh_nmi_mmrx = UVH_EVENT_OCCURRED0;
248 uvh_nmi_mmrx_clear = UVH_EVENT_OCCURRED0_ALIAS;
249 uvh_nmi_mmrx_shift = UVH_EVENT_OCCURRED0_EXTIO_INT0_SHFT;
250 uvh_nmi_mmrx_type = "OCRD0-EXTIO_INT0";
252 uvh_nmi_mmrx_supported = UVH_EXTIO_INT0_BROADCAST;
253 uvh_nmi_mmrx_req = UVH_BIOS_KERNEL_MMR_ALIAS_2;
254 uvh_nmi_mmrx_req_shift = 62;
256 } else if (UVH_EVENT_OCCURRED1_EXTIO_INT0_MASK) {
257 uvh_nmi_mmrx = UVH_EVENT_OCCURRED1;
258 uvh_nmi_mmrx_clear = UVH_EVENT_OCCURRED1_ALIAS;
259 uvh_nmi_mmrx_shift = UVH_EVENT_OCCURRED1_EXTIO_INT0_SHFT;
260 uvh_nmi_mmrx_type = "OCRD1-EXTIO_INT0";
262 uvh_nmi_mmrx_supported = UVH_EXTIO_INT0_BROADCAST;
263 uvh_nmi_mmrx_req = UVH_BIOS_KERNEL_MMR_ALIAS_2;
264 uvh_nmi_mmrx_req_shift = 62;
267 pr_err("UV:%s:cannot find EVENT_OCCURRED*_EXTIO_INT0\n",
272 /* Then find out if new NMI is supported */
273 if (likely(uv_read_local_mmr(uvh_nmi_mmrx_supported))) {
274 uv_write_local_mmr(uvh_nmi_mmrx_req,
275 1UL << uvh_nmi_mmrx_req_shift);
276 nmi_mmr = uvh_nmi_mmrx;
277 nmi_mmr_clear = uvh_nmi_mmrx_clear;
278 nmi_mmr_pending = 1UL << uvh_nmi_mmrx_shift;
279 pr_info("UV: SMI NMI support: %s\n", uvh_nmi_mmrx_type);
281 nmi_mmr = UVH_NMI_MMR;
282 nmi_mmr_clear = UVH_NMI_MMR_CLEAR;
283 nmi_mmr_pending = 1UL << UVH_NMI_MMR_SHIFT;
284 pr_info("UV: SMI NMI support: %s\n", UVH_NMI_MMR_TYPE);
288 /* Read NMI MMR and check if NMI flag was set by BMC. */
289 static inline int uv_nmi_test_mmr(struct uv_hub_nmi_s *hub_nmi)
291 hub_nmi->nmi_value = uv_read_local_mmr(nmi_mmr);
292 atomic_inc(&hub_nmi->read_mmr_count);
293 return !!(hub_nmi->nmi_value & nmi_mmr_pending);
296 static inline void uv_local_mmr_clear_nmi(void)
298 uv_write_local_mmr(nmi_mmr_clear, nmi_mmr_pending);
302 * UV hubless NMI handler functions
304 static inline void uv_reassert_nmi(void)
306 /* (from arch/x86/include/asm/mach_traps.h) */
307 outb(0x8f, NMI_CONTROL_PORT);
308 inb(NMI_DUMMY_PORT); /* dummy read */
309 outb(0x0f, NMI_CONTROL_PORT);
310 inb(NMI_DUMMY_PORT); /* dummy read */
313 static void uv_init_hubless_pch_io(int offset, int mask, int data)
315 int *addr = PCH_PCR_GPIO_ADDRESS(offset);
316 int readd = readl(addr);
318 if (mask) { /* OR in new data */
319 int writed = (readd & ~mask) | data;
321 nmi_debug("UV:PCH: %p = %x & %x | %x (%x)\n",
322 addr, readd, ~mask, data, writed);
323 writel(writed, addr);
324 } else if (readd & data) { /* clear status bit */
325 nmi_debug("UV:PCH: %p = %x\n", addr, data);
329 (void)readl(addr); /* flush write data */
332 static void uv_nmi_setup_hubless_intr(void)
334 uv_pch_intr_now_enabled = uv_pch_intr_enable;
336 uv_init_hubless_pch_io(
337 PAD_CFG_DW0_GPP_D_0, GPIROUTNMI,
338 uv_pch_intr_now_enabled ? GPIROUTNMI : 0);
340 nmi_debug("UV:NMI: GPP_D_0 interrupt %s\n",
341 uv_pch_intr_now_enabled ? "enabled" : "disabled");
344 static struct init_nmi {
349 { /* HOSTSW_OWN_GPP_D_0 */
352 .data = 0x0, /* ACPI Mode */
356 { /* GPI_INT_STS_GPP_D_0 */
359 .data = 0x1, /* Clear Status */
361 { /* GPI_GPE_STS_GPP_D_0 */
364 .data = 0x1, /* Clear Status */
366 { /* GPI_SMI_STS_GPP_D_0 */
369 .data = 0x1, /* Clear Status */
371 { /* GPI_NMI_STS_GPP_D_0 */
374 .data = 0x1, /* Clear Status */
377 /* Disable interrupts: */
378 { /* GPI_INT_EN_GPP_D_0 */
381 .data = 0x0, /* Disable interrupt generation */
383 { /* GPI_GPE_EN_GPP_D_0 */
386 .data = 0x0, /* Disable interrupt generation */
388 { /* GPI_SMI_EN_GPP_D_0 */
391 .data = 0x0, /* Disable interrupt generation */
393 { /* GPI_NMI_EN_GPP_D_0 */
396 .data = 0x0, /* Disable interrupt generation */
399 /* Setup GPP_D_0 Pad Config: */
400 { /* PAD_CFG_DW0_GPP_D_0 */
405 * 31:30 Pad Reset Config (PADRSTCFG): = 2h # PLTRST# (default)
407 * 29 RX Pad State Select (RXPADSTSEL): = 0 # Raw RX pad state directly
408 * from RX buffer (default)
410 * 28 RX Raw Override to '1' (RXRAW1): = 0 # No Override
412 * 26:25 RX Level/Edge Configuration (RXEVCFG):
416 * 23 RX Invert (RXINV): = 0 # No Inversion (signal active high)
418 * 20 GPIO Input Route IOxAPIC (GPIROUTIOXAPIC):
419 * = 0 # Routing does not cause peripheral IRQ...
420 * # (we want an NMI not an IRQ)
422 * 19 GPIO Input Route SCI (GPIROUTSCI): = 0 # Routing does not cause SCI.
423 * 18 GPIO Input Route SMI (GPIROUTSMI): = 0 # Routing does not cause SMI.
424 * 17 GPIO Input Route NMI (GPIROUTNMI): = 1 # Routing can cause NMI.
426 * 11:10 Pad Mode (PMODE1/0): = 0h = GPIO control the Pad.
427 * 9 GPIO RX Disable (GPIORXDIS):
428 * = 0 # Enable the input buffer (active low enable)
430 * 8 GPIO TX Disable (GPIOTXDIS):
431 * = 1 # Disable the output buffer; i.e. Hi-Z
433 * 1 GPIO RX State (GPIORXSTATE): This is the current internal RX pad state..
434 * 0 GPIO TX State (GPIOTXSTATE):
435 * = 0 # (Leave at default)
440 { /* PAD_CFG_DW1_GPP_D_0 */
443 .data = 0, /* Termination = none (default) */
447 static void uv_init_hubless_pch_d0(void)
451 read = *PCH_PCR_GPIO_ADDRESS(PAD_OWN_GPP_D_0);
453 pr_info("UV: Hubless NMI already configured\n");
457 nmi_debug("UV: Initializing UV Hubless NMI on PCH\n");
458 for (i = 0; i < ARRAY_SIZE(init_nmi); i++) {
459 uv_init_hubless_pch_io(init_nmi[i].offset,
465 static int uv_nmi_test_hubless(struct uv_hub_nmi_s *hub_nmi)
467 int *pstat = PCH_PCR_GPIO_ADDRESS(GPI_NMI_STS_GPP_D_0);
470 hub_nmi->nmi_value = status;
471 atomic_inc(&hub_nmi->read_mmr_count);
473 if (!(status & STS_GPP_D_0_MASK)) /* Not a UV external NMI */
476 *pstat = STS_GPP_D_0_MASK; /* Is a UV NMI: clear GPP_D_0 status */
477 (void)*pstat; /* Flush write */
482 static int uv_test_nmi(struct uv_hub_nmi_s *hub_nmi)
484 if (hub_nmi->hub_present)
485 return uv_nmi_test_mmr(hub_nmi);
487 if (hub_nmi->pch_owner) /* Only PCH owner can check status */
488 return uv_nmi_test_hubless(hub_nmi);
494 * If first CPU in on this hub, set hub_nmi "in_nmi" and "owner" values and
495 * return true. If first CPU in on the system, set global "in_nmi" flag.
497 static int uv_set_in_nmi(int cpu, struct uv_hub_nmi_s *hub_nmi)
499 int first = atomic_add_unless(&hub_nmi->in_nmi, 1, 1);
502 atomic_set(&hub_nmi->cpu_owner, cpu);
503 if (atomic_add_unless(&uv_in_nmi, 1, 1))
504 atomic_set(&uv_nmi_cpu, cpu);
506 atomic_inc(&hub_nmi->nmi_count);
511 /* Check if this is a system NMI event */
512 static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
514 int cpu = smp_processor_id();
516 int nmi_detected = 0;
518 local64_inc(&uv_nmi_count);
519 this_cpu_inc(uv_cpu_nmi.queries);
522 nmi = atomic_read(&hub_nmi->in_nmi);
526 if (raw_spin_trylock(&hub_nmi->nmi_lock)) {
527 nmi_detected = uv_test_nmi(hub_nmi);
529 /* Check flag for UV external NMI */
530 if (nmi_detected > 0) {
531 uv_set_in_nmi(cpu, hub_nmi);
536 /* A non-PCH node in a hubless system waits for NMI */
537 else if (nmi_detected < 0)
540 /* MMR/PCH NMI flag is clear */
541 raw_spin_unlock(&hub_nmi->nmi_lock);
545 /* Wait a moment for the HUB NMI locker to set flag */
546 slave_wait: cpu_relax();
547 udelay(uv_nmi_slave_delay);
549 /* Re-check hub in_nmi flag */
550 nmi = atomic_read(&hub_nmi->in_nmi);
556 * Check if this BMC missed setting the MMR NMI flag (or)
557 * UV hubless system where only PCH owner can check flag
560 nmi = atomic_read(&uv_in_nmi);
562 uv_set_in_nmi(cpu, hub_nmi);
565 /* If we're holding the hub lock, release it now */
566 if (nmi_detected < 0)
567 raw_spin_unlock(&hub_nmi->nmi_lock);
572 local64_inc(&uv_nmi_misses);
577 /* Need to reset the NMI MMR register, but only once per hub. */
578 static inline void uv_clear_nmi(int cpu)
580 struct uv_hub_nmi_s *hub_nmi = uv_hub_nmi;
582 if (cpu == atomic_read(&hub_nmi->cpu_owner)) {
583 atomic_set(&hub_nmi->cpu_owner, -1);
584 atomic_set(&hub_nmi->in_nmi, 0);
585 if (hub_nmi->hub_present)
586 uv_local_mmr_clear_nmi();
589 raw_spin_unlock(&hub_nmi->nmi_lock);
593 /* Ping non-responding CPU's attempting to force them into the NMI handler */
594 static void uv_nmi_nr_cpus_ping(void)
598 for_each_cpu(cpu, uv_nmi_cpu_mask)
599 uv_cpu_nmi_per(cpu).pinging = 1;
601 apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI);
604 /* Clean up flags for CPU's that ignored both NMI and ping */
605 static void uv_nmi_cleanup_mask(void)
609 for_each_cpu(cpu, uv_nmi_cpu_mask) {
610 uv_cpu_nmi_per(cpu).pinging = 0;
611 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_OUT;
612 cpumask_clear_cpu(cpu, uv_nmi_cpu_mask);
616 /* Loop waiting as CPU's enter NMI handler */
617 static int uv_nmi_wait_cpus(int first)
619 int i, j, k, n = num_online_cpus();
620 int last_k = 0, waiting = 0;
621 int cpu = smp_processor_id();
624 cpumask_copy(uv_nmi_cpu_mask, cpu_online_mask);
627 k = n - cpumask_weight(uv_nmi_cpu_mask);
630 /* PCH NMI causes only one CPU to respond */
631 if (first && uv_pch_intr_now_enabled) {
632 cpumask_clear_cpu(cpu, uv_nmi_cpu_mask);
636 udelay(uv_nmi_initial_delay);
637 for (i = 0; i < uv_nmi_retry_count; i++) {
638 int loop_delay = uv_nmi_loop_delay;
640 for_each_cpu(j, uv_nmi_cpu_mask) {
641 if (uv_cpu_nmi_per(j).state) {
642 cpumask_clear_cpu(j, uv_nmi_cpu_mask);
647 if (k >= n) { /* all in? */
651 if (last_k != k) { /* abort if no new CPU's coming in */
654 } else if (++waiting > uv_nmi_wait_count)
657 /* Extend delay if waiting only for CPU 0: */
658 if (waiting && (n - k) == 1 &&
659 cpumask_test_cpu(0, uv_nmi_cpu_mask))
664 atomic_set(&uv_nmi_cpus_in_nmi, k);
668 /* Wait until all slave CPU's have entered UV NMI handler */
669 static void uv_nmi_wait(int master)
671 /* Indicate this CPU is in: */
672 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_IN);
674 /* If not the first CPU in (the master), then we are a slave CPU */
679 /* Wait for all other CPU's to gather here */
680 if (!uv_nmi_wait_cpus(1))
683 /* If not all made it in, send IPI NMI to them */
684 pr_alert("UV: Sending NMI IPI to %d CPUs: %*pbl\n",
685 cpumask_weight(uv_nmi_cpu_mask),
686 cpumask_pr_args(uv_nmi_cpu_mask));
688 uv_nmi_nr_cpus_ping();
690 /* If all CPU's are in, then done */
691 if (!uv_nmi_wait_cpus(0))
694 pr_alert("UV: %d CPUs not in NMI loop: %*pbl\n",
695 cpumask_weight(uv_nmi_cpu_mask),
696 cpumask_pr_args(uv_nmi_cpu_mask));
699 pr_alert("UV: %d of %d CPUs in NMI\n",
700 atomic_read(&uv_nmi_cpus_in_nmi), num_online_cpus());
703 /* Dump Instruction Pointer header */
704 static void uv_nmi_dump_cpu_ip_hdr(void)
706 pr_info("\nUV: %4s %6s %-32s %s (Note: PID 0 not listed)\n",
707 "CPU", "PID", "COMMAND", "IP");
710 /* Dump Instruction Pointer info */
711 static void uv_nmi_dump_cpu_ip(int cpu, struct pt_regs *regs)
713 pr_info("UV: %4d %6d %-32.32s %pS",
714 cpu, current->pid, current->comm, (void *)regs->ip);
718 * Dump this CPU's state. If action was set to "kdump" and the crash_kexec
719 * failed, then we provide "dump" as an alternate action. Action "dump" now
720 * also includes the show "ips" (instruction pointers) action whereas the
721 * action "ips" only displays instruction pointers for the non-idle CPU's.
722 * This is an abbreviated form of the "ps" command.
724 static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs)
726 const char *dots = " ................................. ";
729 uv_nmi_dump_cpu_ip_hdr();
731 if (current->pid != 0 || !uv_nmi_action_is("ips"))
732 uv_nmi_dump_cpu_ip(cpu, regs);
734 if (uv_nmi_action_is("dump")) {
735 pr_info("UV:%sNMI process trace for CPU %d\n", dots, cpu);
739 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE);
742 /* Trigger a slave CPU to dump it's state */
743 static void uv_nmi_trigger_dump(int cpu)
745 int retry = uv_nmi_trigger_delay;
747 if (uv_cpu_nmi_per(cpu).state != UV_NMI_STATE_IN)
750 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP;
754 if (uv_cpu_nmi_per(cpu).state
755 != UV_NMI_STATE_DUMP)
757 } while (--retry > 0);
759 pr_crit("UV: CPU %d stuck in process dump function\n", cpu);
760 uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP_DONE;
763 /* Wait until all CPU's ready to exit */
764 static void uv_nmi_sync_exit(int master)
766 atomic_dec(&uv_nmi_cpus_in_nmi);
768 while (atomic_read(&uv_nmi_cpus_in_nmi) > 0)
770 atomic_set(&uv_nmi_slave_continue, SLAVE_CLEAR);
772 while (atomic_read(&uv_nmi_slave_continue))
777 /* Current "health" check is to check which CPU's are responsive */
778 static void uv_nmi_action_health(int cpu, struct pt_regs *regs, int master)
781 int in = atomic_read(&uv_nmi_cpus_in_nmi);
782 int out = num_online_cpus() - in;
784 pr_alert("UV: NMI CPU health check (non-responding:%d)\n", out);
785 atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
787 while (!atomic_read(&uv_nmi_slave_continue))
790 uv_nmi_sync_exit(master);
793 /* Walk through CPU list and dump state of each */
794 static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master)
799 int saved_console_loglevel = console_loglevel;
801 pr_alert("UV: tracing %s for %d CPUs from CPU %d\n",
802 uv_nmi_action_is("ips") ? "IPs" : "processes",
803 atomic_read(&uv_nmi_cpus_in_nmi), cpu);
805 console_loglevel = uv_nmi_loglevel;
806 atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
807 for_each_online_cpu(tcpu) {
808 if (cpumask_test_cpu(tcpu, uv_nmi_cpu_mask))
810 else if (tcpu == cpu)
811 uv_nmi_dump_state_cpu(tcpu, regs);
813 uv_nmi_trigger_dump(tcpu);
816 pr_alert("UV: %d CPUs ignored NMI\n", ignored);
818 console_loglevel = saved_console_loglevel;
819 pr_alert("UV: process trace complete\n");
821 while (!atomic_read(&uv_nmi_slave_continue))
823 while (this_cpu_read(uv_cpu_nmi.state) != UV_NMI_STATE_DUMP)
825 uv_nmi_dump_state_cpu(cpu, regs);
827 uv_nmi_sync_exit(master);
830 static void uv_nmi_touch_watchdogs(void)
832 touch_softlockup_watchdog_sync();
833 clocksource_touch_watchdog();
834 rcu_cpu_stall_reset();
835 touch_nmi_watchdog();
838 #if defined(CONFIG_KEXEC_CORE)
839 static atomic_t uv_nmi_kexec_failed;
840 static void uv_nmi_kdump(int cpu, int main, struct pt_regs *regs)
842 /* Check if kdump kernel loaded for both main and secondary CPUs */
843 if (!kexec_crash_image) {
845 pr_err("UV: NMI error: kdump kernel not loaded\n");
849 /* Call crash to dump system state */
851 pr_emerg("UV: NMI executing crash_kexec on CPU%d\n", cpu);
854 pr_emerg("UV: crash_kexec unexpectedly returned\n");
855 atomic_set(&uv_nmi_kexec_failed, 1);
857 } else { /* secondary */
859 /* If kdump kernel fails, secondaries will exit this loop */
860 while (atomic_read(&uv_nmi_kexec_failed) == 0) {
862 /* Once shootdown cpus starts, they do not return */
863 run_crash_ipi_callback(regs);
870 #else /* !CONFIG_KEXEC_CORE */
871 static inline void uv_nmi_kdump(int cpu, int main, struct pt_regs *regs)
874 pr_err("UV: NMI kdump: KEXEC not supported in this kernel\n");
875 atomic_set(&uv_nmi_kexec_failed, 1);
877 #endif /* !CONFIG_KEXEC_CORE */
880 #ifdef CONFIG_KGDB_KDB
881 static inline int uv_nmi_kdb_reason(void)
883 return KDB_REASON_SYSTEM_NMI;
885 #else /* !CONFIG_KGDB_KDB */
886 static inline int uv_nmi_kdb_reason(void)
888 /* Ensure user is expecting to attach gdb remote */
889 if (uv_nmi_action_is("kgdb"))
892 pr_err("UV: NMI error: KDB is not enabled in this kernel\n");
895 #endif /* CONFIG_KGDB_KDB */
898 * Call KGDB/KDB from NMI handler
900 * Note that if both KGDB and KDB are configured, then the action of 'kgdb' or
901 * 'kdb' has no affect on which is used. See the KGDB documention for further
904 static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
907 int reason = uv_nmi_kdb_reason();
913 /* Call KGDB NMI handler as MASTER */
914 ret = kgdb_nmicallin(cpu, X86_TRAP_NMI, regs, reason,
915 &uv_nmi_slave_continue);
917 pr_alert("KGDB returned error, is kgdboc set?\n");
918 atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
921 /* Wait for KGDB signal that it's ready for slaves to enter */
926 sig = atomic_read(&uv_nmi_slave_continue);
929 /* Call KGDB as slave */
930 if (sig == SLAVE_CONTINUE)
931 kgdb_nmicallback(cpu, regs);
933 uv_nmi_sync_exit(master);
936 #else /* !CONFIG_KGDB */
937 static inline void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
939 pr_err("UV: NMI error: KGDB is not enabled in this kernel\n");
941 #endif /* !CONFIG_KGDB */
946 static int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
948 struct uv_hub_nmi_s *hub_nmi = uv_hub_nmi;
949 int cpu = smp_processor_id();
953 local_irq_save(flags);
955 /* If not a UV System NMI, ignore */
956 if (!this_cpu_read(uv_cpu_nmi.pinging) && !uv_check_nmi(hub_nmi)) {
957 local_irq_restore(flags);
961 /* Indicate we are the first CPU into the NMI handler */
962 master = (atomic_read(&uv_nmi_cpu) == cpu);
964 /* If NMI action is "kdump", then attempt to do it */
965 if (uv_nmi_action_is("kdump")) {
966 uv_nmi_kdump(cpu, master, regs);
968 /* Unexpected return, revert action to "dump" */
970 strncpy(uv_nmi_action, "dump", strlen(uv_nmi_action));
973 /* Pause as all CPU's enter the NMI handler */
976 /* Process actions other than "kdump": */
977 if (uv_nmi_action_is("health")) {
978 uv_nmi_action_health(cpu, regs, master);
979 } else if (uv_nmi_action_is("ips") || uv_nmi_action_is("dump")) {
980 uv_nmi_dump_state(cpu, regs, master);
981 } else if (uv_nmi_action_is("kdb") || uv_nmi_action_is("kgdb")) {
982 uv_call_kgdb_kdb(cpu, regs, master);
985 pr_alert("UV: unknown NMI action: %s\n", uv_nmi_action);
986 uv_nmi_sync_exit(master);
989 /* Clear per_cpu "in_nmi" flag */
990 this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_OUT);
992 /* Clear MMR NMI flag on each hub */
995 /* Clear global flags */
997 if (cpumask_weight(uv_nmi_cpu_mask))
998 uv_nmi_cleanup_mask();
999 atomic_set(&uv_nmi_cpus_in_nmi, -1);
1000 atomic_set(&uv_nmi_cpu, -1);
1001 atomic_set(&uv_in_nmi, 0);
1002 atomic_set(&uv_nmi_kexec_failed, 0);
1003 atomic_set(&uv_nmi_slave_continue, SLAVE_CLEAR);
1006 uv_nmi_touch_watchdogs();
1007 local_irq_restore(flags);
1013 * NMI handler for pulling in CPU's when perf events are grabbing our NMI
1015 static int uv_handle_nmi_ping(unsigned int reason, struct pt_regs *regs)
1019 this_cpu_inc(uv_cpu_nmi.queries);
1020 if (!this_cpu_read(uv_cpu_nmi.pinging)) {
1021 local64_inc(&uv_nmi_ping_misses);
1025 this_cpu_inc(uv_cpu_nmi.pings);
1026 local64_inc(&uv_nmi_ping_count);
1027 ret = uv_handle_nmi(reason, regs);
1028 this_cpu_write(uv_cpu_nmi.pinging, 0);
1032 static void uv_register_nmi_notifier(void)
1034 if (register_nmi_handler(NMI_UNKNOWN, uv_handle_nmi, 0, "uv"))
1035 pr_warn("UV: NMI handler failed to register\n");
1037 if (register_nmi_handler(NMI_LOCAL, uv_handle_nmi_ping, 0, "uvping"))
1038 pr_warn("UV: PING NMI handler failed to register\n");
1041 void uv_nmi_init(void)
1046 * Unmask NMI on all CPU's
1048 value = apic_read(APIC_LVT1) | APIC_DM_NMI;
1049 value &= ~APIC_LVT_MASKED;
1050 apic_write(APIC_LVT1, value);
1053 /* Setup HUB NMI info */
1054 static void __init uv_nmi_setup_common(bool hubbed)
1056 int size = sizeof(void *) * (1 << NODES_SHIFT);
1059 uv_hub_nmi_list = kzalloc(size, GFP_KERNEL);
1060 nmi_debug("UV: NMI hub list @ 0x%p (%d)\n", uv_hub_nmi_list, size);
1061 BUG_ON(!uv_hub_nmi_list);
1062 size = sizeof(struct uv_hub_nmi_s);
1063 for_each_present_cpu(cpu) {
1064 int nid = cpu_to_node(cpu);
1065 if (uv_hub_nmi_list[nid] == NULL) {
1066 uv_hub_nmi_list[nid] = kzalloc_node(size,
1068 BUG_ON(!uv_hub_nmi_list[nid]);
1069 raw_spin_lock_init(&(uv_hub_nmi_list[nid]->nmi_lock));
1070 atomic_set(&uv_hub_nmi_list[nid]->cpu_owner, -1);
1071 uv_hub_nmi_list[nid]->hub_present = hubbed;
1072 uv_hub_nmi_list[nid]->pch_owner = (nid == 0);
1074 uv_hub_nmi_per(cpu) = uv_hub_nmi_list[nid];
1076 BUG_ON(!alloc_cpumask_var(&uv_nmi_cpu_mask, GFP_KERNEL));
1079 /* Setup for UV Hub systems */
1080 void __init uv_nmi_setup(void)
1082 uv_nmi_setup_mmrs();
1083 uv_nmi_setup_common(true);
1084 uv_register_nmi_notifier();
1085 pr_info("UV: Hub NMI enabled\n");
1088 /* Setup for UV Hubless systems */
1089 void __init uv_nmi_setup_hubless(void)
1091 uv_nmi_setup_common(false);
1092 pch_base = xlate_dev_mem_ptr(PCH_PCR_GPIO_1_BASE);
1093 nmi_debug("UV: PCH base:%p from 0x%lx, GPP_D_0\n",
1094 pch_base, PCH_PCR_GPIO_1_BASE);
1095 if (uv_pch_init_enable)
1096 uv_init_hubless_pch_d0();
1097 uv_init_hubless_pch_io(GPI_NMI_ENA_GPP_D_0,
1098 STS_GPP_D_0_MASK, STS_GPP_D_0_MASK);
1099 uv_nmi_setup_hubless_intr();
1100 /* Ensure NMI enabled in Processor Interface Reg: */
1102 uv_register_nmi_notifier();
1103 pr_info("UV: PCH NMI enabled\n");