3 * Copyright IBM Corp. 1999, 2000
4 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
5 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
7 * Derived from "arch/i386/kernel/traps.c"
8 * Copyright (C) 1991, 1992 Linus Torvalds
12 * 'Traps.c' handles hardware traps and faults after we have saved some
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/ptrace.h>
20 #include <linux/timer.h>
22 #include <linux/smp.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/seq_file.h>
26 #include <linux/delay.h>
27 #include <linux/module.h>
28 #include <linux/kdebug.h>
29 #include <linux/kallsyms.h>
30 #include <linux/reboot.h>
31 #include <linux/kprobes.h>
32 #include <linux/bug.h>
33 #include <linux/utsname.h>
34 #include <asm/uaccess.h>
36 #include <linux/atomic.h>
37 #include <asm/mathemu.h>
38 #include <asm/cpcmd.h>
39 #include <asm/lowcore.h>
40 #include <asm/debug.h>
44 void (*pgm_check_table[128])(struct pt_regs *regs);
46 int show_unhandled_signals = 1;
48 #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
52 #define FOURLONG "%08lx %08lx %08lx %08lx\n"
53 static int kstack_depth_to_print = 12;
54 #else /* CONFIG_64BIT */
55 #define LONG "%016lx "
56 #define FOURLONG "%016lx %016lx %016lx %016lx\n"
57 static int kstack_depth_to_print = 20;
58 #endif /* CONFIG_64BIT */
60 static inline void __user *get_trap_ip(struct pt_regs *regs)
63 unsigned long address;
65 if (regs->int_code & 0x200)
66 address = *(unsigned long *)(current->thread.trap_tdb + 24);
68 address = regs->psw.addr;
69 return (void __user *)
70 ((address - (regs->int_code >> 16)) & PSW_ADDR_INSN);
72 return (void __user *)
73 ((regs->psw.addr - (regs->int_code >> 16)) & PSW_ADDR_INSN);
78 * For show_trace we have tree different stack to consider:
79 * - the panic stack which is used if the kernel stack has overflown
80 * - the asynchronous interrupt stack (cpu related)
81 * - the synchronous kernel stack (process related)
82 * The stack trace can start at any of the three stack and can potentially
83 * touch all of them. The order is: panic stack, async stack, sync stack.
86 __show_trace(unsigned long sp, unsigned long low, unsigned long high)
88 struct stack_frame *sf;
92 sp = sp & PSW_ADDR_INSN;
93 if (sp < low || sp > high - sizeof(*sf))
95 sf = (struct stack_frame *) sp;
96 printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
97 print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
98 /* Follow the backchain. */
101 sp = sf->back_chain & PSW_ADDR_INSN;
104 if (sp <= low || sp > high - sizeof(*sf))
106 sf = (struct stack_frame *) sp;
107 printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
108 print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
110 /* Zero backchain detected, check for interrupt frame. */
111 sp = (unsigned long) (sf + 1);
112 if (sp <= low || sp > high - sizeof(*regs))
114 regs = (struct pt_regs *) sp;
115 printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
116 print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
122 static void show_trace(struct task_struct *task, unsigned long *stack)
124 register unsigned long __r15 asm ("15");
127 sp = (unsigned long) stack;
129 sp = task ? task->thread.ksp : __r15;
130 printk("Call Trace:\n");
131 #ifdef CONFIG_CHECK_STACK
132 sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
133 S390_lowcore.panic_stack);
135 sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
136 S390_lowcore.async_stack);
138 __show_trace(sp, (unsigned long) task_stack_page(task),
139 (unsigned long) task_stack_page(task) + THREAD_SIZE);
141 __show_trace(sp, S390_lowcore.thread_info,
142 S390_lowcore.thread_info + THREAD_SIZE);
145 debug_show_held_locks(task);
148 void show_stack(struct task_struct *task, unsigned long *sp)
150 register unsigned long * __r15 asm ("15");
151 unsigned long *stack;
155 stack = task ? (unsigned long *) task->thread.ksp : __r15;
159 for (i = 0; i < kstack_depth_to_print; i++) {
160 if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
162 if ((i * sizeof(long) % 32) == 0)
163 printk("%s ", i == 0 ? "" : "\n");
164 printk(LONG, *stack++);
167 show_trace(task, sp);
170 static void show_last_breaking_event(struct pt_regs *regs)
173 printk("Last Breaking-Event-Address:\n");
174 printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
175 print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
180 * The architecture-independent dump_stack generator
182 void dump_stack(void)
184 printk("CPU: %d %s %s %.*s\n",
185 task_thread_info(current)->cpu, print_tainted(),
186 init_utsname()->release,
187 (int)strcspn(init_utsname()->version, " "),
188 init_utsname()->version);
189 printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
190 current->comm, current->pid, current,
191 (void *) current->thread.ksp);
192 show_stack(NULL, NULL);
194 EXPORT_SYMBOL(dump_stack);
196 static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
198 return (regs->psw.mask & bits) / ((~bits + 1) & bits);
201 void show_registers(struct pt_regs *regs)
205 mode = user_mode(regs) ? "User" : "Krnl";
206 printk("%s PSW : %p %p",
207 mode, (void *) regs->psw.mask,
208 (void *) regs->psw.addr);
209 print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
210 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
211 "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
212 mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
213 mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
214 mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
215 mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
216 mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
218 printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA));
220 printk("\n%s GPRS: " FOURLONG, mode,
221 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
223 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
225 regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
227 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
232 void show_regs(struct pt_regs *regs)
234 printk("CPU: %d %s %s %.*s\n",
235 task_thread_info(current)->cpu, print_tainted(),
236 init_utsname()->release,
237 (int)strcspn(init_utsname()->version, " "),
238 init_utsname()->version);
239 printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
240 current->comm, current->pid, current,
241 (void *) current->thread.ksp);
242 show_registers(regs);
243 /* Show stack backtrace if pt_regs is from kernel mode */
244 if (!user_mode(regs))
245 show_trace(NULL, (unsigned long *) regs->gprs[15]);
246 show_last_breaking_event(regs);
249 static DEFINE_SPINLOCK(die_lock);
251 void die(struct pt_regs *regs, const char *str)
253 static int die_counter;
259 spin_lock_irq(&die_lock);
261 printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter);
262 #ifdef CONFIG_PREEMPT
268 #ifdef CONFIG_DEBUG_PAGEALLOC
269 printk("DEBUG_PAGEALLOC");
272 notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
276 add_taint(TAINT_DIE);
277 spin_unlock_irq(&die_lock);
279 panic("Fatal exception in interrupt");
281 panic("Fatal exception: panic_on_oops");
286 static inline void report_user_fault(struct pt_regs *regs, int signr)
288 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
290 if (!unhandled_signal(current, signr))
292 if (!printk_ratelimit())
294 printk("User process fault: interruption code 0x%X ", regs->int_code);
295 print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN);
300 int is_valid_bugaddr(unsigned long addr)
305 static void __kprobes do_trap(struct pt_regs *regs,
306 int si_signo, int si_code, char *str)
310 if (notify_die(DIE_TRAP, str, regs, 0,
311 regs->int_code, si_signo) == NOTIFY_STOP)
314 if (user_mode(regs)) {
315 info.si_signo = si_signo;
317 info.si_code = si_code;
318 info.si_addr = get_trap_ip(regs);
319 force_sig_info(si_signo, &info, current);
320 report_user_fault(regs, si_signo);
322 const struct exception_table_entry *fixup;
323 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
325 regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE;
327 enum bug_trap_type btt;
329 btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs);
330 if (btt == BUG_TRAP_TYPE_WARN)
337 void __kprobes do_per_trap(struct pt_regs *regs)
341 if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP)
343 if (!current->ptrace)
345 info.si_signo = SIGTRAP;
347 info.si_code = TRAP_HWBKPT;
349 (void __force __user *) current->thread.per_event.address;
350 force_sig_info(SIGTRAP, &info, current);
353 static void default_trap_handler(struct pt_regs *regs)
355 if (user_mode(regs)) {
356 report_user_fault(regs, SIGSEGV);
359 die(regs, "Unknown program exception");
362 #define DO_ERROR_INFO(name, signr, sicode, str) \
363 static void name(struct pt_regs *regs) \
365 do_trap(regs, signr, sicode, str); \
368 DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR,
369 "addressing exception")
370 DO_ERROR_INFO(execute_exception, SIGILL, ILL_ILLOPN,
372 DO_ERROR_INFO(divide_exception, SIGFPE, FPE_INTDIV,
373 "fixpoint divide exception")
374 DO_ERROR_INFO(overflow_exception, SIGFPE, FPE_INTOVF,
375 "fixpoint overflow exception")
376 DO_ERROR_INFO(hfp_overflow_exception, SIGFPE, FPE_FLTOVF,
377 "HFP overflow exception")
378 DO_ERROR_INFO(hfp_underflow_exception, SIGFPE, FPE_FLTUND,
379 "HFP underflow exception")
380 DO_ERROR_INFO(hfp_significance_exception, SIGFPE, FPE_FLTRES,
381 "HFP significance exception")
382 DO_ERROR_INFO(hfp_divide_exception, SIGFPE, FPE_FLTDIV,
383 "HFP divide exception")
384 DO_ERROR_INFO(hfp_sqrt_exception, SIGFPE, FPE_FLTINV,
385 "HFP square root exception")
386 DO_ERROR_INFO(operand_exception, SIGILL, ILL_ILLOPN,
388 DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC,
389 "privileged operation")
390 DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN,
391 "special operation exception")
392 DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN,
393 "translation exception")
396 DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN,
397 "transaction constraint exception")
400 static inline void do_fp_trap(struct pt_regs *regs, int fpc)
403 /* FPC[2] is Data Exception Code */
404 if ((fpc & 0x00000300) == 0) {
405 /* bits 6 and 7 of DXC are 0 iff IEEE exception */
406 if (fpc & 0x8000) /* invalid fp operation */
407 si_code = FPE_FLTINV;
408 else if (fpc & 0x4000) /* div by 0 */
409 si_code = FPE_FLTDIV;
410 else if (fpc & 0x2000) /* overflow */
411 si_code = FPE_FLTOVF;
412 else if (fpc & 0x1000) /* underflow */
413 si_code = FPE_FLTUND;
414 else if (fpc & 0x0800) /* inexact */
415 si_code = FPE_FLTRES;
417 do_trap(regs, SIGFPE, si_code, "floating point exception");
420 static void __kprobes illegal_op(struct pt_regs *regs)
424 __u16 __user *location;
427 location = get_trap_ip(regs);
429 if (user_mode(regs)) {
430 if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
432 if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
433 if (current->ptrace) {
434 info.si_signo = SIGTRAP;
436 info.si_code = TRAP_BRKPT;
437 info.si_addr = location;
438 force_sig_info(SIGTRAP, &info, current);
441 #ifdef CONFIG_MATHEMU
442 } else if (opcode[0] == 0xb3) {
443 if (get_user(*((__u16 *) (opcode+2)), location+1))
445 signal = math_emu_b3(opcode, regs);
446 } else if (opcode[0] == 0xed) {
447 if (get_user(*((__u32 *) (opcode+2)),
448 (__u32 __user *)(location+1)))
450 signal = math_emu_ed(opcode, regs);
451 } else if (*((__u16 *) opcode) == 0xb299) {
452 if (get_user(*((__u16 *) (opcode+2)), location+1))
454 signal = math_emu_srnm(opcode, regs);
455 } else if (*((__u16 *) opcode) == 0xb29c) {
456 if (get_user(*((__u16 *) (opcode+2)), location+1))
458 signal = math_emu_stfpc(opcode, regs);
459 } else if (*((__u16 *) opcode) == 0xb29d) {
460 if (get_user(*((__u16 *) (opcode+2)), location+1))
462 signal = math_emu_lfpc(opcode, regs);
468 * If we get an illegal op in kernel mode, send it through the
469 * kprobes notifier. If kprobes doesn't pick it up, SIGILL
471 if (notify_die(DIE_BPT, "bpt", regs, 0,
472 3, SIGTRAP) != NOTIFY_STOP)
476 #ifdef CONFIG_MATHEMU
477 if (signal == SIGFPE)
478 do_fp_trap(regs, current->thread.fp_regs.fpc);
479 else if (signal == SIGSEGV)
480 do_trap(regs, signal, SEGV_MAPERR, "user address fault");
484 do_trap(regs, signal, ILL_ILLOPC, "illegal operation");
488 #ifdef CONFIG_MATHEMU
489 void specification_exception(struct pt_regs *regs)
492 __u16 __user *location = NULL;
495 location = (__u16 __user *) get_trap_ip(regs);
497 if (user_mode(regs)) {
498 get_user(*((__u16 *) opcode), location);
500 case 0x28: /* LDR Rx,Ry */
501 signal = math_emu_ldr(opcode);
503 case 0x38: /* LER Rx,Ry */
504 signal = math_emu_ler(opcode);
506 case 0x60: /* STD R,D(X,B) */
507 get_user(*((__u16 *) (opcode+2)), location+1);
508 signal = math_emu_std(opcode, regs);
510 case 0x68: /* LD R,D(X,B) */
511 get_user(*((__u16 *) (opcode+2)), location+1);
512 signal = math_emu_ld(opcode, regs);
514 case 0x70: /* STE R,D(X,B) */
515 get_user(*((__u16 *) (opcode+2)), location+1);
516 signal = math_emu_ste(opcode, regs);
518 case 0x78: /* LE R,D(X,B) */
519 get_user(*((__u16 *) (opcode+2)), location+1);
520 signal = math_emu_le(opcode, regs);
529 if (signal == SIGFPE)
530 do_fp_trap(regs, current->thread.fp_regs.fpc);
532 do_trap(regs, signal, ILL_ILLOPN, "specification exception");
535 DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
536 "specification exception");
539 static void data_exception(struct pt_regs *regs)
541 __u16 __user *location;
544 location = get_trap_ip(regs);
546 if (MACHINE_HAS_IEEE)
547 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
549 #ifdef CONFIG_MATHEMU
550 else if (user_mode(regs)) {
552 get_user(*((__u16 *) opcode), location);
554 case 0x28: /* LDR Rx,Ry */
555 signal = math_emu_ldr(opcode);
557 case 0x38: /* LER Rx,Ry */
558 signal = math_emu_ler(opcode);
560 case 0x60: /* STD R,D(X,B) */
561 get_user(*((__u16 *) (opcode+2)), location+1);
562 signal = math_emu_std(opcode, regs);
564 case 0x68: /* LD R,D(X,B) */
565 get_user(*((__u16 *) (opcode+2)), location+1);
566 signal = math_emu_ld(opcode, regs);
568 case 0x70: /* STE R,D(X,B) */
569 get_user(*((__u16 *) (opcode+2)), location+1);
570 signal = math_emu_ste(opcode, regs);
572 case 0x78: /* LE R,D(X,B) */
573 get_user(*((__u16 *) (opcode+2)), location+1);
574 signal = math_emu_le(opcode, regs);
577 get_user(*((__u16 *) (opcode+2)), location+1);
578 signal = math_emu_b3(opcode, regs);
581 get_user(*((__u32 *) (opcode+2)),
582 (__u32 __user *)(location+1));
583 signal = math_emu_ed(opcode, regs);
586 if (opcode[1] == 0x99) {
587 get_user(*((__u16 *) (opcode+2)), location+1);
588 signal = math_emu_srnm(opcode, regs);
589 } else if (opcode[1] == 0x9c) {
590 get_user(*((__u16 *) (opcode+2)), location+1);
591 signal = math_emu_stfpc(opcode, regs);
592 } else if (opcode[1] == 0x9d) {
593 get_user(*((__u16 *) (opcode+2)), location+1);
594 signal = math_emu_lfpc(opcode, regs);
604 if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
608 if (signal == SIGFPE)
609 do_fp_trap(regs, current->thread.fp_regs.fpc);
611 do_trap(regs, signal, ILL_ILLOPN, "data exception");
614 static void space_switch_exception(struct pt_regs *regs)
616 /* Set user psw back to home space mode. */
618 regs->psw.mask |= PSW_ASC_HOME;
620 do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event");
623 void __kprobes kernel_stack_overflow(struct pt_regs * regs)
626 printk("Kernel stack overflow.\n");
629 panic("Corrupt kernel stack, can't continue.");
632 /* init is done in lowcore.S and head.S */
634 void __init trap_init(void)
638 for (i = 0; i < 128; i++)
639 pgm_check_table[i] = &default_trap_handler;
640 pgm_check_table[1] = &illegal_op;
641 pgm_check_table[2] = &privileged_op;
642 pgm_check_table[3] = &execute_exception;
643 pgm_check_table[4] = &do_protection_exception;
644 pgm_check_table[5] = &addressing_exception;
645 pgm_check_table[6] = &specification_exception;
646 pgm_check_table[7] = &data_exception;
647 pgm_check_table[8] = &overflow_exception;
648 pgm_check_table[9] = ÷_exception;
649 pgm_check_table[0x0A] = &overflow_exception;
650 pgm_check_table[0x0B] = ÷_exception;
651 pgm_check_table[0x0C] = &hfp_overflow_exception;
652 pgm_check_table[0x0D] = &hfp_underflow_exception;
653 pgm_check_table[0x0E] = &hfp_significance_exception;
654 pgm_check_table[0x0F] = &hfp_divide_exception;
655 pgm_check_table[0x10] = &do_dat_exception;
656 pgm_check_table[0x11] = &do_dat_exception;
657 pgm_check_table[0x12] = &translation_exception;
658 pgm_check_table[0x13] = &special_op_exception;
660 pgm_check_table[0x18] = &transaction_exception;
661 pgm_check_table[0x38] = &do_asce_exception;
662 pgm_check_table[0x39] = &do_dat_exception;
663 pgm_check_table[0x3A] = &do_dat_exception;
664 pgm_check_table[0x3B] = &do_dat_exception;
665 #endif /* CONFIG_64BIT */
666 pgm_check_table[0x15] = &operand_exception;
667 pgm_check_table[0x1C] = &space_switch_exception;
668 pgm_check_table[0x1D] = &hfp_sqrt_exception;
669 /* Enable machine checks early. */