1 // SPDX-License-Identifier: GPL-2.0
3 * Author: Huacai Chen <chenhuacai@loongson.cn>
4 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
6 #include <linux/bitops.h>
8 #include <linux/compiler.h>
9 #include <linux/context_tracking.h>
10 #include <linux/entry-common.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/kexec.h>
14 #include <linux/module.h>
15 #include <linux/extable.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/debug.h>
19 #include <linux/smp.h>
20 #include <linux/spinlock.h>
21 #include <linux/kallsyms.h>
22 #include <linux/memblock.h>
23 #include <linux/interrupt.h>
24 #include <linux/ptrace.h>
25 #include <linux/kgdb.h>
26 #include <linux/kdebug.h>
27 #include <linux/kprobes.h>
28 #include <linux/notifier.h>
29 #include <linux/irq.h>
30 #include <linux/perf_event.h>
32 #include <asm/addrspace.h>
33 #include <asm/bootinfo.h>
34 #include <asm/branch.h>
35 #include <asm/break.h>
38 #include <asm/loongarch.h>
39 #include <asm/mmu_context.h>
40 #include <asm/pgtable.h>
41 #include <asm/ptrace.h>
42 #include <asm/sections.h>
43 #include <asm/siginfo.h>
44 #include <asm/stacktrace.h>
46 #include <asm/types.h>
47 #include <asm/unwind.h>
49 #include "access-helper.h"
51 extern asmlinkage void handle_ade(void);
52 extern asmlinkage void handle_ale(void);
53 extern asmlinkage void handle_sys(void);
54 extern asmlinkage void handle_bp(void);
55 extern asmlinkage void handle_ri(void);
56 extern asmlinkage void handle_fpu(void);
57 extern asmlinkage void handle_fpe(void);
58 extern asmlinkage void handle_lbt(void);
59 extern asmlinkage void handle_lsx(void);
60 extern asmlinkage void handle_lasx(void);
61 extern asmlinkage void handle_reserved(void);
62 extern asmlinkage void handle_watch(void);
63 extern asmlinkage void handle_vint(void);
65 static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
66 const char *loglvl, bool user)
69 struct unwind_state state;
70 struct pt_regs *pregs = (struct pt_regs *)regs;
76 state.type = UNWINDER_GUESS;
78 printk("%sCall Trace:", loglvl);
79 for (unwind_start(&state, task, pregs);
80 !unwind_done(&state); unwind_next_frame(&state)) {
81 addr = unwind_get_return_address(&state);
82 print_ip_sym(loglvl, addr);
84 printk("%s\n", loglvl);
87 static void show_stacktrace(struct task_struct *task,
88 const struct pt_regs *regs, const char *loglvl, bool user)
91 const int field = 2 * sizeof(unsigned long);
92 unsigned long stackdata;
93 unsigned long *sp = (unsigned long *)regs->regs[3];
95 printk("%sStack :", loglvl);
97 while ((unsigned long) sp & (PAGE_SIZE - 1)) {
98 if (i && ((i % (64 / field)) == 0)) {
100 printk("%s ", loglvl);
107 if (__get_addr(&stackdata, sp++, user)) {
108 pr_cont(" (Bad stack address)");
112 pr_cont(" %0*lx", field, stackdata);
116 show_backtrace(task, regs, loglvl, user);
119 void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
127 regs.regs[3] = (unsigned long)sp;
129 if (!task || task == current)
130 prepare_frametrace(®s);
132 regs.csr_era = task->thread.reg01;
134 regs.regs[3] = task->thread.reg03;
135 regs.regs[22] = task->thread.reg22;
139 show_stacktrace(task, ®s, loglvl, false);
142 static void show_code(unsigned int *pc, bool user)
149 for(i = -3 ; i < 6 ; i++) {
150 if (__get_inst(&insn, pc + i, user)) {
151 pr_cont(" (Bad address in era)\n");
154 pr_cont("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>'));
159 static void __show_regs(const struct pt_regs *regs)
161 const int field = 2 * sizeof(unsigned long);
162 unsigned int excsubcode;
163 unsigned int exccode;
166 show_regs_print_info(KERN_DEFAULT);
169 * Saved main processor registers
171 for (i = 0; i < 32; ) {
174 pr_cont(" %0*lx", field, regs->regs[i]);
182 * Saved csr registers
184 printk("era : %0*lx %pS\n", field, regs->csr_era,
185 (void *) regs->csr_era);
186 printk("ra : %0*lx %pS\n", field, regs->regs[1],
187 (void *) regs->regs[1]);
189 printk("CSR crmd: %08lx ", regs->csr_crmd);
190 printk("CSR prmd: %08lx ", regs->csr_prmd);
191 printk("CSR euen: %08lx ", regs->csr_euen);
192 printk("CSR ecfg: %08lx ", regs->csr_ecfg);
193 printk("CSR estat: %08lx ", regs->csr_estat);
197 exccode = ((regs->csr_estat) & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
198 excsubcode = ((regs->csr_estat) & CSR_ESTAT_ESUBCODE) >> CSR_ESTAT_ESUBCODE_SHIFT;
199 printk("ExcCode : %x (SubCode %x)\n", exccode, excsubcode);
201 if (exccode >= EXCCODE_TLBL && exccode <= EXCCODE_ALE)
202 printk("BadVA : %0*lx\n", field, regs->csr_badvaddr);
204 printk("PrId : %08x (%s)\n", read_cpucfg(LOONGARCH_CPUCFG0),
205 cpu_family_string());
208 void show_regs(struct pt_regs *regs)
210 __show_regs((struct pt_regs *)regs);
214 void show_registers(struct pt_regs *regs)
218 printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
219 current->comm, current->pid, current_thread_info(), current);
221 show_stacktrace(current, regs, KERN_DEFAULT, user_mode(regs));
222 show_code((void *)regs->csr_era, user_mode(regs));
226 static DEFINE_RAW_SPINLOCK(die_lock);
228 void __noreturn die(const char *str, struct pt_regs *regs)
230 static int die_counter;
235 if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
236 SIGSEGV) == NOTIFY_STOP)
240 raw_spin_lock_irq(&die_lock);
243 printk("%s[#%d]:\n", str, ++die_counter);
244 show_registers(regs);
245 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
246 raw_spin_unlock_irq(&die_lock);
250 if (regs && kexec_should_crash(current))
254 panic("Fatal exception in interrupt");
257 panic("Fatal exception");
262 static inline void setup_vint_size(unsigned int size)
268 if (vs == 0 || vs > 7)
269 panic("vint_size %d Not support yet", vs);
271 csr_xchg32(vs<<CSR_ECFG_VS_SHIFT, CSR_ECFG_VS, LOONGARCH_CSR_ECFG);
275 * Send SIGFPE according to FCSR Cause bits, which must have already
276 * been masked against Enable bits. This is impotant as Inexact can
277 * happen together with Overflow or Underflow, and `ptrace' can set
280 void force_fcsr_sig(unsigned long fcsr, void __user *fault_addr,
281 struct task_struct *tsk)
283 int si_code = FPE_FLTUNK;
285 if (fcsr & FPU_CSR_INV_X)
286 si_code = FPE_FLTINV;
287 else if (fcsr & FPU_CSR_DIV_X)
288 si_code = FPE_FLTDIV;
289 else if (fcsr & FPU_CSR_OVF_X)
290 si_code = FPE_FLTOVF;
291 else if (fcsr & FPU_CSR_UDF_X)
292 si_code = FPE_FLTUND;
293 else if (fcsr & FPU_CSR_INE_X)
294 si_code = FPE_FLTRES;
296 force_sig_fault(SIGFPE, si_code, fault_addr);
299 int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcsr)
308 force_fcsr_sig(fcsr, fault_addr, current);
312 force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr);
316 mmap_read_lock(current->mm);
317 if (vma_lookup(current->mm, (unsigned long)fault_addr))
318 si_code = SEGV_ACCERR;
320 si_code = SEGV_MAPERR;
321 mmap_read_unlock(current->mm);
322 force_sig_fault(SIGSEGV, si_code, fault_addr);
332 * Delayed fp exceptions when doing a lazy ctx switch
334 asmlinkage void noinstr do_fpe(struct pt_regs *regs, unsigned long fcsr)
337 void __user *fault_addr;
338 irqentry_state_t state = irqentry_enter(regs);
340 if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
341 SIGFPE) == NOTIFY_STOP)
344 /* Clear FCSR.Cause before enabling interrupts */
345 write_fcsr(LOONGARCH_FCSR0, fcsr & ~mask_fcsr_x(fcsr));
348 die_if_kernel("FP exception in kernel code", regs);
351 fault_addr = (void __user *) regs->csr_era;
353 /* Send a signal if required. */
354 process_fpemu_return(sig, fault_addr, fcsr);
358 irqentry_exit(regs, state);
361 asmlinkage void noinstr do_ade(struct pt_regs *regs)
363 irqentry_state_t state = irqentry_enter(regs);
365 die_if_kernel("Kernel ade access", regs);
366 force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)regs->csr_badvaddr);
368 irqentry_exit(regs, state);
371 asmlinkage void noinstr do_ale(struct pt_regs *regs)
373 irqentry_state_t state = irqentry_enter(regs);
375 die_if_kernel("Kernel ale access", regs);
376 force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
378 irqentry_exit(regs, state);
381 #ifdef CONFIG_GENERIC_BUG
382 int is_valid_bugaddr(unsigned long addr)
386 #endif /* CONFIG_GENERIC_BUG */
388 static void bug_handler(struct pt_regs *regs)
390 switch (report_bug(regs->csr_era, regs)) {
391 case BUG_TRAP_TYPE_BUG:
392 case BUG_TRAP_TYPE_NONE:
393 die_if_kernel("Oops - BUG", regs);
397 case BUG_TRAP_TYPE_WARN:
398 /* Skip the BUG instruction and continue */
399 regs->csr_era += LOONGARCH_INSN_SIZE;
404 asmlinkage void noinstr do_bp(struct pt_regs *regs)
406 bool user = user_mode(regs);
407 unsigned int opcode, bcode;
408 unsigned long era = exception_era(regs);
409 irqentry_state_t state = irqentry_enter(regs);
412 current->thread.trap_nr = read_csr_excode();
413 if (__get_inst(&opcode, (u32 *)era, user))
416 bcode = (opcode & 0x7fff);
419 * notify the kprobe handlers, if instruction is likely to
424 if (notify_die(DIE_BREAK, "Kprobe", regs, bcode,
425 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
429 case BRK_KPROBE_SSTEPBP:
430 if (notify_die(DIE_SSTEPBP, "Kprobe_SingleStep", regs, bcode,
431 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
436 if (notify_die(DIE_UPROBE, "Uprobe", regs, bcode,
437 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
441 case BRK_UPROBE_XOLBP:
442 if (notify_die(DIE_UPROBE_XOL, "Uprobe_XOL", regs, bcode,
443 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
448 if (notify_die(DIE_TRAP, "Break", regs, bcode,
449 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
460 die_if_kernel("Break instruction in kernel code", regs);
461 force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->csr_era);
464 die_if_kernel("Break instruction in kernel code", regs);
465 force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->csr_era);
468 die_if_kernel("Break instruction in kernel code", regs);
469 force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->csr_era);
475 irqentry_exit(regs, state);
483 asmlinkage void noinstr do_watch(struct pt_regs *regs)
485 pr_warn("Hardware watch point handler not implemented!\n");
488 asmlinkage void noinstr do_ri(struct pt_regs *regs)
491 unsigned int opcode = 0;
492 unsigned int __user *era = (unsigned int __user *)exception_era(regs);
493 irqentry_state_t state = irqentry_enter(regs);
496 current->thread.trap_nr = read_csr_excode();
498 if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
499 SIGILL) == NOTIFY_STOP)
502 die_if_kernel("Reserved instruction in kernel code", regs);
504 if (unlikely(get_user(opcode, era) < 0)) {
506 current->thread.error_code = 1;
513 irqentry_exit(regs, state);
516 static void init_restore_fp(void)
519 /* First time FP context user. */
522 /* This task has formerly used the FP context */
527 BUG_ON(!is_fp_enabled());
530 asmlinkage void noinstr do_fpu(struct pt_regs *regs)
532 irqentry_state_t state = irqentry_enter(regs);
535 die_if_kernel("do_fpu invoked from kernel context!", regs);
542 irqentry_exit(regs, state);
545 asmlinkage void noinstr do_lsx(struct pt_regs *regs)
547 irqentry_state_t state = irqentry_enter(regs);
553 irqentry_exit(regs, state);
556 asmlinkage void noinstr do_lasx(struct pt_regs *regs)
558 irqentry_state_t state = irqentry_enter(regs);
564 irqentry_exit(regs, state);
567 asmlinkage void noinstr do_lbt(struct pt_regs *regs)
569 irqentry_state_t state = irqentry_enter(regs);
575 irqentry_exit(regs, state);
578 asmlinkage void noinstr do_reserved(struct pt_regs *regs)
580 irqentry_state_t state = irqentry_enter(regs);
584 * Game over - no way to handle this if it ever occurs. Most probably
585 * caused by a fatal error after another hardware/software error.
587 pr_err("Caught reserved exception %u on pid:%d [%s] - should not happen\n",
588 read_csr_excode(), current->pid, current->comm);
589 die_if_kernel("do_reserved exception", regs);
590 force_sig(SIGUNUSED);
594 irqentry_exit(regs, state);
597 asmlinkage void cache_parity_error(void)
599 /* For the moment, report the problem and hang. */
600 pr_err("Cache error exception:\n");
601 pr_err("csr_merrctl == %08x\n", csr_read32(LOONGARCH_CSR_MERRCTL));
602 pr_err("csr_merrera == %016llx\n", csr_read64(LOONGARCH_CSR_MERRERA));
603 panic("Can't handle the cache error!");
606 asmlinkage void noinstr handle_loongarch_irq(struct pt_regs *regs)
608 struct pt_regs *old_regs;
611 old_regs = set_irq_regs(regs);
612 handle_arch_irq(regs);
613 set_irq_regs(old_regs);
617 asmlinkage void noinstr do_vint(struct pt_regs *regs, unsigned long sp)
620 register unsigned long stack;
621 irqentry_state_t state = irqentry_enter(regs);
623 cpu = smp_processor_id();
625 if (on_irq_stack(cpu, sp))
626 handle_loongarch_irq(regs);
628 stack = per_cpu(irq_stack, cpu) + IRQ_STACK_START;
630 /* Save task's sp on IRQ stack for unwinding */
631 *(unsigned long *)stack = sp;
633 __asm__ __volatile__(
634 "move $s0, $sp \n" /* Preserve sp */
635 "move $sp, %[stk] \n" /* Switch stack */
636 "move $a0, %[regs] \n"
637 "bl handle_loongarch_irq \n"
638 "move $sp, $s0 \n" /* Restore sp */
640 : [stk] "r" (stack), [regs] "r" (regs)
641 : "$a0", "$a1", "$a2", "$a3", "$a4", "$a5", "$a6", "$a7", "$s0",
642 "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8",
646 irqentry_exit(regs, state);
649 unsigned long eentry;
650 unsigned long tlbrentry;
652 long exception_handlers[VECSIZE * 128 / sizeof(long)] __aligned(SZ_64K);
654 static void configure_exception_vector(void)
656 eentry = (unsigned long)exception_handlers;
657 tlbrentry = (unsigned long)exception_handlers + 80*VECSIZE;
659 csr_write64(eentry, LOONGARCH_CSR_EENTRY);
660 csr_write64(eentry, LOONGARCH_CSR_MERRENTRY);
661 csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY);
664 void per_cpu_trap_init(int cpu)
668 setup_vint_size(VECSIZE);
670 configure_exception_vector();
672 if (!cpu_data[cpu].asid_cache)
673 cpu_data[cpu].asid_cache = asid_first_version(cpu);
676 current->active_mm = &init_mm;
678 enter_lazy_tlb(&init_mm, current);
680 /* Initialise exception handlers */
682 for (i = 0; i < 64; i++)
683 set_handler(i * VECSIZE, handle_reserved, VECSIZE);
689 /* Install CPU exception handler */
690 void set_handler(unsigned long offset, void *addr, unsigned long size)
692 memcpy((void *)(eentry + offset), addr, size);
693 local_flush_icache_range(eentry + offset, eentry + offset + size);
696 static const char panic_null_cerr[] =
697 "Trying to set NULL cache error exception handler\n";
700 * Install uncached CPU exception handler.
701 * This is suitable only for the cache error exception which is the only
702 * exception handler that is being run uncached.
704 void set_merr_handler(unsigned long offset, void *addr, unsigned long size)
706 unsigned long uncached_eentry = TO_UNCACHE(__pa(eentry));
709 panic(panic_null_cerr);
711 memcpy((void *)(uncached_eentry + offset), addr, size);
714 void __init trap_init(void)
718 /* Set interrupt vector handler */
719 for (i = EXCCODE_INT_START; i < EXCCODE_INT_END; i++)
720 set_handler(i * VECSIZE, handle_vint, VECSIZE);
722 set_handler(EXCCODE_ADE * VECSIZE, handle_ade, VECSIZE);
723 set_handler(EXCCODE_ALE * VECSIZE, handle_ale, VECSIZE);
724 set_handler(EXCCODE_SYS * VECSIZE, handle_sys, VECSIZE);
725 set_handler(EXCCODE_BP * VECSIZE, handle_bp, VECSIZE);
726 set_handler(EXCCODE_INE * VECSIZE, handle_ri, VECSIZE);
727 set_handler(EXCCODE_IPE * VECSIZE, handle_ri, VECSIZE);
728 set_handler(EXCCODE_FPDIS * VECSIZE, handle_fpu, VECSIZE);
729 set_handler(EXCCODE_LSXDIS * VECSIZE, handle_lsx, VECSIZE);
730 set_handler(EXCCODE_LASXDIS * VECSIZE, handle_lasx, VECSIZE);
731 set_handler(EXCCODE_FPE * VECSIZE, handle_fpe, VECSIZE);
732 set_handler(EXCCODE_BTDIS * VECSIZE, handle_lbt, VECSIZE);
733 set_handler(EXCCODE_WATCH * VECSIZE, handle_watch, VECSIZE);
737 local_flush_icache_range(eentry, eentry + 0x400);