1 // SPDX-License-Identifier: GPL-2.0
3 * arch/alpha/kernel/traps.c
5 * (C) Copyright 1994 Linus Torvalds
9 * This file initializes the trap entry points
12 #include <linux/jiffies.h>
14 #include <linux/sched/signal.h>
15 #include <linux/sched/debug.h>
16 #include <linux/tty.h>
17 #include <linux/delay.h>
18 #include <linux/extable.h>
19 #include <linux/kallsyms.h>
20 #include <linux/ratelimit.h>
22 #include <asm/gentrap.h>
23 #include <linux/uaccess.h>
24 #include <asm/unaligned.h>
25 #include <asm/sysinfo.h>
26 #include <asm/hwrpb.h>
27 #include <asm/mmu_context.h>
28 #include <asm/special_insns.h>
32 /* Work-around for some SRMs which mishandle opDEC faults. */
39 __asm__ __volatile__ (
40 /* Load the address of... */
42 /* A stub instruction fault handler. Just add 4 to the
48 /* Install the instruction fault handler. */
50 " call_pal %[wrent]\n"
51 /* With that in place, the fault from the round-to-minf fp
52 insn will arrive either at the "lda 4" insn (bad) or one
53 past that (good). This places the correct fixup in %0. */
55 " cvttq/svm $f31,$f31\n"
57 : [fix] "=r" (opDEC_fix)
58 : [rti] "n" (PAL_rti), [wrent] "n" (PAL_wrent)
59 : "$0", "$1", "$16", "$17", "$22", "$23", "$24", "$25");
62 printk("opDEC fixup enabled.\n");
66 dik_show_regs(struct pt_regs *regs, unsigned long *r9_15)
68 printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx %s\n",
69 regs->pc, regs->r26, regs->ps, print_tainted());
70 printk("pc is at %pSR\n", (void *)regs->pc);
71 printk("ra is at %pSR\n", (void *)regs->r26);
72 printk("v0 = %016lx t0 = %016lx t1 = %016lx\n",
73 regs->r0, regs->r1, regs->r2);
74 printk("t2 = %016lx t3 = %016lx t4 = %016lx\n",
75 regs->r3, regs->r4, regs->r5);
76 printk("t5 = %016lx t6 = %016lx t7 = %016lx\n",
77 regs->r6, regs->r7, regs->r8);
80 printk("s0 = %016lx s1 = %016lx s2 = %016lx\n",
81 r9_15[9], r9_15[10], r9_15[11]);
82 printk("s3 = %016lx s4 = %016lx s5 = %016lx\n",
83 r9_15[12], r9_15[13], r9_15[14]);
84 printk("s6 = %016lx\n", r9_15[15]);
87 printk("a0 = %016lx a1 = %016lx a2 = %016lx\n",
88 regs->r16, regs->r17, regs->r18);
89 printk("a3 = %016lx a4 = %016lx a5 = %016lx\n",
90 regs->r19, regs->r20, regs->r21);
91 printk("t8 = %016lx t9 = %016lx t10= %016lx\n",
92 regs->r22, regs->r23, regs->r24);
93 printk("t11= %016lx pv = %016lx at = %016lx\n",
94 regs->r25, regs->r27, regs->r28);
95 printk("gp = %016lx sp = %p\n", regs->gp, regs+1);
102 static char * ireg_name[] = {"v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
103 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "s6",
104 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
105 "t10", "t11", "ra", "pv", "at", "gp", "sp", "zero"};
109 dik_show_code(unsigned int *pc)
114 for (i = -6; i < 2; i++) {
116 if (__get_user(insn, (unsigned int __user *)pc + i))
118 printk("%c%08x%c", i ? ' ' : '<', insn, i ? ' ' : '>');
124 dik_show_trace(unsigned long *sp, const char *loglvl)
127 printk("%sTrace:\n", loglvl);
128 while (0x1ff8 & (unsigned long) sp) {
129 extern char _stext[], _etext[];
130 unsigned long tmp = *sp;
132 if (!is_kernel_text(tmp))
134 printk("%s[<%lx>] %pSR\n", loglvl, tmp, (void *)tmp);
136 printk("%s ...", loglvl);
140 printk("%s\n", loglvl);
143 static int kstack_depth_to_print = 24;
145 void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
147 unsigned long *stack;
151 * debugging aid: "show_stack(NULL, NULL, KERN_EMERG);" prints the
152 * back trace for this cpu.
155 sp=(unsigned long*)&sp;
158 for(i=0; i < kstack_depth_to_print; i++) {
159 if (((long) stack & (THREAD_SIZE-1)) == 0)
164 printk("%s ", loglvl);
168 pr_cont("%016lx", *stack++);
171 dik_show_trace(sp, loglvl);
175 die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15)
180 printk("CPU %d ", hard_smp_processor_id());
182 printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err);
183 dik_show_regs(regs, r9_15);
184 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
185 dik_show_trace((unsigned long *)(regs+1), KERN_DEFAULT);
186 dik_show_code((unsigned int *)regs->pc);
188 if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
189 printk("die_if_kernel recursion detected.\n");
193 make_task_dead(SIGSEGV);
196 #ifndef CONFIG_MATHEMU
197 static long dummy_emul(void) { return 0; }
198 long (*alpha_fp_emul_imprecise)(struct pt_regs *regs, unsigned long writemask)
199 = (void *)dummy_emul;
200 EXPORT_SYMBOL_GPL(alpha_fp_emul_imprecise);
201 long (*alpha_fp_emul) (unsigned long pc)
202 = (void *)dummy_emul;
203 EXPORT_SYMBOL_GPL(alpha_fp_emul);
205 long alpha_fp_emul_imprecise(struct pt_regs *regs, unsigned long writemask);
206 long alpha_fp_emul (unsigned long pc);
210 do_entArith(unsigned long summary, unsigned long write_mask,
211 struct pt_regs *regs)
213 long si_code = FPE_FLTINV;
216 /* Software-completion summary bit is set, so try to
217 emulate the instruction. If the processor supports
218 precise exceptions, we don't have to search. */
219 if (!amask(AMASK_PRECISE_TRAP))
220 si_code = alpha_fp_emul(regs->pc - 4);
222 si_code = alpha_fp_emul_imprecise(regs, write_mask);
226 die_if_kernel("Arithmetic fault", regs, 0, NULL);
228 send_sig_fault_trapno(SIGFPE, si_code, (void __user *) regs->pc, 0, current);
232 do_entIF(unsigned long type, struct pt_regs *regs)
236 if ((regs->ps & ~IPL_MAX) == 0) {
238 const unsigned int *data
239 = (const unsigned int *) regs->pc;
240 printk("Kernel bug at %s:%d\n",
241 (const char *)(data[1] | (long)data[2] << 32),
244 #ifdef CONFIG_ALPHA_WTINT
246 /* If CALL_PAL WTINT is totally unsupported by the
247 PALcode, e.g. MILO, "emulate" it by overwriting
250 = (unsigned int *) regs->pc - 1;
251 if (*pinsn == PAL_wtint) {
252 *pinsn = 0x47e01400; /* mov 0,$0 */
258 #endif /* ALPHA_WTINT */
259 die_if_kernel((type == 1 ? "Kernel Bug" : "Instruction fault"),
264 case 0: /* breakpoint */
265 if (ptrace_cancel_bpt(current)) {
266 regs->pc -= 4; /* make pc point to former bpt */
269 send_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->pc,
273 case 1: /* bugcheck */
274 send_sig_fault_trapno(SIGTRAP, TRAP_UNK,
275 (void __user *) regs->pc, 0, current);
278 case 2: /* gentrap */
279 switch ((long) regs->r16) {
336 send_sig_fault_trapno(signo, code, (void __user *) regs->pc,
341 if (implver() == IMPLVER_EV4) {
344 /* The some versions of SRM do not handle
345 the opDEC properly - they return the PC of the
346 opDEC fault, not the instruction after as the
347 Alpha architecture requires. Here we fix it up.
348 We do this by intentionally causing an opDEC
349 fault during the boot sequence and testing if
350 we get the correct PC. If not, we set a flag
351 to correct it every time through. */
352 regs->pc += opDEC_fix;
354 /* EV4 does not implement anything except normal
355 rounding. Everything else will come here as
356 an illegal instruction. Emulate them. */
357 si_code = alpha_fp_emul(regs->pc - 4);
361 send_sig_fault_trapno(SIGFPE, si_code,
362 (void __user *) regs->pc,
369 case 3: /* FEN fault */
370 /* Irritating users can call PAL_clrfen to disable the
371 FPU for the process. The kernel will then trap in
372 do_switch_stack and undo_switch_stack when we try
373 to save and restore the FP registers.
375 Given that GCC by default generates code that uses the
376 FP registers, PAL_clrfen is not useful except for DoS
377 attacks. So turn the bleeding FPU back on and be done
379 current_thread_info()->pcb.flags |= 1;
380 __reload_thread(¤t_thread_info()->pcb);
384 default: /* unexpected instruction-fault type */
388 send_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)regs->pc, current);
391 /* There is an ifdef in the PALcode in MILO that enables a
392 "kernel debugging entry point" as an unprivileged call_pal.
394 We don't want to have anything to do with it, but unfortunately
395 several versions of MILO included in distributions have it enabled,
396 and if we don't put something on the entry point we'll oops. */
399 do_entDbg(struct pt_regs *regs)
401 die_if_kernel("Instruction fault", regs, 0, NULL);
403 force_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)regs->pc);
408 * entUna has a different register layout to be reasonably simple. It
409 * needs access to all the integer registers (the kernel doesn't use
410 * fp-regs), and it needs to have them in order for simpler access.
412 * Due to the non-standard register layout (and because we don't want
413 * to handle floating-point regs), user-mode unaligned accesses are
414 * handled separately by do_entUnaUser below.
416 * Oh, btw, we don't handle the "gp" register correctly, but if we fault
417 * on a gp-register unaligned load/store, something is _very_ wrong
418 * in the kernel anyway..
421 unsigned long regs[32];
422 unsigned long ps, pc, gp, a0, a1, a2;
425 struct unaligned_stat {
426 unsigned long count, va, pc;
430 /* Macro for exception fixup code to access integer registers. */
431 #define una_reg(r) (_regs[(r) >= 16 && (r) <= 18 ? (r)+19 : (r)])
435 do_entUna(void * va, unsigned long opcode, unsigned long reg,
436 struct allregs *regs)
438 long error, tmp1, tmp2, tmp3, tmp4;
439 unsigned long pc = regs->pc - 4;
440 unsigned long *_regs = regs->regs;
441 const struct exception_table_entry *fixup;
443 unaligned[0].count++;
444 unaligned[0].va = (unsigned long) va;
445 unaligned[0].pc = pc;
447 /* We don't want to use the generic get/put unaligned macros as
448 we want to trap exceptions. Only if we actually get an
449 exception will we decide whether we should have caught it. */
452 case 0x0c: /* ldwu */
453 __asm__ __volatile__(
454 "1: ldq_u %1,0(%3)\n"
455 "2: ldq_u %2,1(%3)\n"
461 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
465 una_reg(reg) = tmp1|tmp2;
469 __asm__ __volatile__(
470 "1: ldq_u %1,0(%3)\n"
471 "2: ldq_u %2,3(%3)\n"
477 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
481 una_reg(reg) = (int)(tmp1|tmp2);
485 __asm__ __volatile__(
486 "1: ldq_u %1,0(%3)\n"
487 "2: ldq_u %2,7(%3)\n"
493 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
497 una_reg(reg) = tmp1|tmp2;
500 /* Note that the store sequences do not indicate that they change
501 memory because it _should_ be affecting nothing in this context.
502 (Otherwise we have other, much larger, problems.) */
504 __asm__ __volatile__(
505 "1: ldq_u %2,1(%5)\n"
506 "2: ldq_u %1,0(%5)\n"
513 "3: stq_u %2,1(%5)\n"
514 "4: stq_u %1,0(%5)\n"
520 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
521 "=&r"(tmp3), "=&r"(tmp4)
522 : "r"(va), "r"(una_reg(reg)), "0"(0));
528 __asm__ __volatile__(
529 "1: ldq_u %2,3(%5)\n"
530 "2: ldq_u %1,0(%5)\n"
537 "3: stq_u %2,3(%5)\n"
538 "4: stq_u %1,0(%5)\n"
544 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
545 "=&r"(tmp3), "=&r"(tmp4)
546 : "r"(va), "r"(una_reg(reg)), "0"(0));
552 __asm__ __volatile__(
553 "1: ldq_u %2,7(%5)\n"
554 "2: ldq_u %1,0(%5)\n"
561 "3: stq_u %2,7(%5)\n"
562 "4: stq_u %1,0(%5)\n"
568 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
569 "=&r"(tmp3), "=&r"(tmp4)
570 : "r"(va), "r"(una_reg(reg)), "0"(0));
576 printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n",
577 pc, va, opcode, reg);
578 make_task_dead(SIGSEGV);
581 /* Ok, we caught the exception, but we don't want it. Is there
582 someone to pass it along to? */
583 if ((fixup = search_exception_tables(pc)) != 0) {
585 newpc = fixup_exception(una_reg, fixup, pc);
587 printk("Forwarding unaligned exception at %lx (%lx)\n",
595 * Yikes! No one to forward the exception to.
596 * Since the registers are in a weird format, dump them ourselves.
599 printk("%s(%d): unhandled unaligned exception\n",
600 current->comm, task_pid_nr(current));
602 printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx\n",
603 pc, una_reg(26), regs->ps);
604 printk("r0 = %016lx r1 = %016lx r2 = %016lx\n",
605 una_reg(0), una_reg(1), una_reg(2));
606 printk("r3 = %016lx r4 = %016lx r5 = %016lx\n",
607 una_reg(3), una_reg(4), una_reg(5));
608 printk("r6 = %016lx r7 = %016lx r8 = %016lx\n",
609 una_reg(6), una_reg(7), una_reg(8));
610 printk("r9 = %016lx r10= %016lx r11= %016lx\n",
611 una_reg(9), una_reg(10), una_reg(11));
612 printk("r12= %016lx r13= %016lx r14= %016lx\n",
613 una_reg(12), una_reg(13), una_reg(14));
614 printk("r15= %016lx\n", una_reg(15));
615 printk("r16= %016lx r17= %016lx r18= %016lx\n",
616 una_reg(16), una_reg(17), una_reg(18));
617 printk("r19= %016lx r20= %016lx r21= %016lx\n",
618 una_reg(19), una_reg(20), una_reg(21));
619 printk("r22= %016lx r23= %016lx r24= %016lx\n",
620 una_reg(22), una_reg(23), una_reg(24));
621 printk("r25= %016lx r27= %016lx r28= %016lx\n",
622 una_reg(25), una_reg(27), una_reg(28));
623 printk("gp = %016lx sp = %p\n", regs->gp, regs+1);
625 dik_show_code((unsigned int *)pc);
626 dik_show_trace((unsigned long *)(regs+1), KERN_DEFAULT);
628 if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
629 printk("die_if_kernel recursion detected.\n");
633 make_task_dead(SIGSEGV);
637 * Convert an s-floating point value in memory format to the
638 * corresponding value in register format. The exponent
639 * needs to be remapped to preserve non-finite values
640 * (infinities, not-a-numbers, denormals).
642 static inline unsigned long
643 s_mem_to_reg (unsigned long s_mem)
645 unsigned long frac = (s_mem >> 0) & 0x7fffff;
646 unsigned long sign = (s_mem >> 31) & 0x1;
647 unsigned long exp_msb = (s_mem >> 30) & 0x1;
648 unsigned long exp_low = (s_mem >> 23) & 0x7f;
651 exp = (exp_msb << 10) | exp_low; /* common case */
653 if (exp_low == 0x7f) {
657 if (exp_low == 0x00) {
663 return (sign << 63) | (exp << 52) | (frac << 29);
667 * Convert an s-floating point value in register format to the
668 * corresponding value in memory format.
670 static inline unsigned long
671 s_reg_to_mem (unsigned long s_reg)
673 return ((s_reg >> 62) << 30) | ((s_reg << 5) >> 34);
677 * Handle user-level unaligned fault. Handling user-level unaligned
678 * faults is *extremely* slow and produces nasty messages. A user
679 * program *should* fix unaligned faults ASAP.
681 * Notice that we have (almost) the regular kernel stack layout here,
682 * so finding the appropriate registers is a little more difficult
683 * than in the kernel case.
685 * Finally, we handle regular integer load/stores only. In
686 * particular, load-linked/store-conditionally and floating point
687 * load/stores are not supported. The former make no sense with
688 * unaligned faults (they are guaranteed to fail) and I don't think
689 * the latter will occur in any decent program.
691 * Sigh. We *do* have to handle some FP operations, because GCC will
692 * uses them as temporary storage for integer memory to memory copies.
693 * However, we need to deal with stt/ldt and sts/lds only.
696 #define OP_INT_MASK ( 1L << 0x28 | 1L << 0x2c /* ldl stl */ \
697 | 1L << 0x29 | 1L << 0x2d /* ldq stq */ \
698 | 1L << 0x0c | 1L << 0x0d /* ldwu stw */ \
699 | 1L << 0x0a | 1L << 0x0e ) /* ldbu stb */
701 #define OP_WRITE_MASK ( 1L << 0x26 | 1L << 0x27 /* sts stt */ \
702 | 1L << 0x2c | 1L << 0x2d /* stl stq */ \
703 | 1L << 0x0d | 1L << 0x0e ) /* stw stb */
705 #define R(x) ((size_t) &((struct pt_regs *)0)->x)
707 static int unauser_reg_offsets[32] = {
708 R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8),
709 /* r9 ... r15 are stored in front of regs. */
710 -56, -48, -40, -32, -24, -16, -8,
711 R(r16), R(r17), R(r18),
712 R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26),
713 R(r27), R(r28), R(gp),
720 do_entUnaUser(void __user * va, unsigned long opcode,
721 unsigned long reg, struct pt_regs *regs)
723 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
725 unsigned long tmp1, tmp2, tmp3, tmp4;
726 unsigned long fake_reg, *reg_addr = &fake_reg;
730 /* Check the UAC bits to decide what the user wants us to do
731 with the unaligned access. */
733 if (!(current_thread_info()->status & TS_UAC_NOPRINT)) {
734 if (__ratelimit(&ratelimit)) {
735 printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n",
736 current->comm, task_pid_nr(current),
737 regs->pc - 4, va, opcode, reg);
740 if ((current_thread_info()->status & TS_UAC_SIGBUS))
742 /* Not sure why you'd want to use this, but... */
743 if ((current_thread_info()->status & TS_UAC_NOFIX))
746 /* Don't bother reading ds in the access check since we already
747 know that this came from the user. Also rely on the fact that
748 the page at TASK_SIZE is unmapped and so can't be touched anyway. */
749 if ((unsigned long)va >= TASK_SIZE)
752 ++unaligned[1].count;
753 unaligned[1].va = (unsigned long)va;
754 unaligned[1].pc = regs->pc - 4;
756 if ((1L << opcode) & OP_INT_MASK) {
757 /* it's an integer load/store */
759 reg_addr = (unsigned long *)
760 ((char *)regs + unauser_reg_offsets[reg]);
761 } else if (reg == 30) {
762 /* usp in PAL regs */
765 /* zero "register" */
770 /* We don't want to use the generic get/put unaligned macros as
771 we want to trap exceptions. Only if we actually get an
772 exception will we decide whether we should have caught it. */
775 case 0x0c: /* ldwu */
776 __asm__ __volatile__(
777 "1: ldq_u %1,0(%3)\n"
778 "2: ldq_u %2,1(%3)\n"
784 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
788 *reg_addr = tmp1|tmp2;
792 __asm__ __volatile__(
793 "1: ldq_u %1,0(%3)\n"
794 "2: ldq_u %2,3(%3)\n"
800 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
804 alpha_write_fp_reg(reg, s_mem_to_reg((int)(tmp1|tmp2)));
808 __asm__ __volatile__(
809 "1: ldq_u %1,0(%3)\n"
810 "2: ldq_u %2,7(%3)\n"
816 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
820 alpha_write_fp_reg(reg, tmp1|tmp2);
824 __asm__ __volatile__(
825 "1: ldq_u %1,0(%3)\n"
826 "2: ldq_u %2,3(%3)\n"
832 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
836 *reg_addr = (int)(tmp1|tmp2);
840 __asm__ __volatile__(
841 "1: ldq_u %1,0(%3)\n"
842 "2: ldq_u %2,7(%3)\n"
848 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
852 *reg_addr = tmp1|tmp2;
855 /* Note that the store sequences do not indicate that they change
856 memory because it _should_ be affecting nothing in this context.
857 (Otherwise we have other, much larger, problems.) */
859 __asm__ __volatile__(
860 "1: ldq_u %2,1(%5)\n"
861 "2: ldq_u %1,0(%5)\n"
868 "3: stq_u %2,1(%5)\n"
869 "4: stq_u %1,0(%5)\n"
875 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
876 "=&r"(tmp3), "=&r"(tmp4)
877 : "r"(va), "r"(*reg_addr), "0"(0));
883 fake_reg = s_reg_to_mem(alpha_read_fp_reg(reg));
887 __asm__ __volatile__(
888 "1: ldq_u %2,3(%5)\n"
889 "2: ldq_u %1,0(%5)\n"
896 "3: stq_u %2,3(%5)\n"
897 "4: stq_u %1,0(%5)\n"
903 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
904 "=&r"(tmp3), "=&r"(tmp4)
905 : "r"(va), "r"(*reg_addr), "0"(0));
911 fake_reg = alpha_read_fp_reg(reg);
915 __asm__ __volatile__(
916 "1: ldq_u %2,7(%5)\n"
917 "2: ldq_u %1,0(%5)\n"
924 "3: stq_u %2,7(%5)\n"
925 "4: stq_u %1,0(%5)\n"
931 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
932 "=&r"(tmp3), "=&r"(tmp4)
933 : "r"(va), "r"(*reg_addr), "0"(0));
939 /* What instruction were you trying to use, exactly? */
943 /* Only integer loads should get here; everyone else returns early. */
949 regs->pc -= 4; /* make pc point to faulting insn */
951 /* We need to replicate some of the logic in mm/fault.c,
952 since we don't have access to the fault code in the
953 exception handling return path. */
954 if ((unsigned long)va >= TASK_SIZE)
955 si_code = SEGV_ACCERR;
957 struct mm_struct *mm = current->mm;
959 if (find_vma(mm, (unsigned long)va))
960 si_code = SEGV_ACCERR;
962 si_code = SEGV_MAPERR;
963 mmap_read_unlock(mm);
965 send_sig_fault(SIGSEGV, si_code, va, current);
970 send_sig_fault(SIGBUS, BUS_ADRALN, va, current);
977 /* Tell PAL-code what global pointer we want in the kernel. */
978 register unsigned long gptr __asm__("$29");
981 /* Hack for Multia (UDB) and JENSEN: some of their SRMs have
982 a bug in the handling of the opDEC fault. Fix it up if so. */
983 if (implver() == IMPLVER_EV4)