powerpc/mm: Avoid calling arch_enter/leave_lazy_mmu() in set_ptes
[platform/kernel/linux-starfive.git] / arch / riscv / kernel / traps.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 Regents of the University of California
4  */
5
6 #include <linux/cpu.h>
7 #include <linux/kernel.h>
8 #include <linux/init.h>
9 #include <linux/sched.h>
10 #include <linux/sched/debug.h>
11 #include <linux/sched/signal.h>
12 #include <linux/signal.h>
13 #include <linux/kdebug.h>
14 #include <linux/uaccess.h>
15 #include <linux/kprobes.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/irq.h>
19 #include <linux/kexec.h>
20 #include <linux/entry-common.h>
21
22 #include <asm/asm-prototypes.h>
23 #include <asm/bug.h>
24 #include <asm/cfi.h>
25 #include <asm/csr.h>
26 #include <asm/processor.h>
27 #include <asm/ptrace.h>
28 #include <asm/syscall.h>
29 #include <asm/thread_info.h>
30 #include <asm/vector.h>
31 #include <asm/irq_stack.h>
32
33 int show_unhandled_signals = 1;
34
35 static DEFINE_SPINLOCK(die_lock);
36
37 static void dump_kernel_instr(const char *loglvl, struct pt_regs *regs)
38 {
39         char str[sizeof("0000 ") * 12 + 2 + 1], *p = str;
40         const u16 *insns = (u16 *)instruction_pointer(regs);
41         long bad;
42         u16 val;
43         int i;
44
45         for (i = -10; i < 2; i++) {
46                 bad = get_kernel_nofault(val, &insns[i]);
47                 if (!bad) {
48                         p += sprintf(p, i == 0 ? "(%04hx) " : "%04hx ", val);
49                 } else {
50                         printk("%sCode: Unable to access instruction at 0x%px.\n",
51                                loglvl, &insns[i]);
52                         return;
53                 }
54         }
55         printk("%sCode: %s\n", loglvl, str);
56 }
57
58 void die(struct pt_regs *regs, const char *str)
59 {
60         static int die_counter;
61         int ret;
62         long cause;
63         unsigned long flags;
64
65         oops_enter();
66
67         spin_lock_irqsave(&die_lock, flags);
68         console_verbose();
69         bust_spinlocks(1);
70
71         pr_emerg("%s [#%d]\n", str, ++die_counter);
72         print_modules();
73         if (regs) {
74                 show_regs(regs);
75                 dump_kernel_instr(KERN_EMERG, regs);
76         }
77
78         cause = regs ? regs->cause : -1;
79         ret = notify_die(DIE_OOPS, str, regs, 0, cause, SIGSEGV);
80
81         if (kexec_should_crash(current))
82                 crash_kexec(regs);
83
84         bust_spinlocks(0);
85         add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
86         spin_unlock_irqrestore(&die_lock, flags);
87         oops_exit();
88
89         if (in_interrupt())
90                 panic("Fatal exception in interrupt");
91         if (panic_on_oops)
92                 panic("Fatal exception");
93         if (ret != NOTIFY_STOP)
94                 make_task_dead(SIGSEGV);
95 }
96
97 void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr)
98 {
99         struct task_struct *tsk = current;
100
101         if (show_unhandled_signals && unhandled_signal(tsk, signo)
102             && printk_ratelimit()) {
103                 pr_info("%s[%d]: unhandled signal %d code 0x%x at 0x" REG_FMT,
104                         tsk->comm, task_pid_nr(tsk), signo, code, addr);
105                 print_vma_addr(KERN_CONT " in ", instruction_pointer(regs));
106                 pr_cont("\n");
107                 __show_regs(regs);
108         }
109
110         force_sig_fault(signo, code, (void __user *)addr);
111 }
112
113 static void do_trap_error(struct pt_regs *regs, int signo, int code,
114         unsigned long addr, const char *str)
115 {
116         current->thread.bad_cause = regs->cause;
117
118         if (user_mode(regs)) {
119                 do_trap(regs, signo, code, addr);
120         } else {
121                 if (!fixup_exception(regs))
122                         die(regs, str);
123         }
124 }
125
126 #if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_RISCV_ALTERNATIVE)
127 #define __trap_section __noinstr_section(".xip.traps")
128 #else
129 #define __trap_section noinstr
130 #endif
131 #define DO_ERROR_INFO(name, signo, code, str)                                   \
132 asmlinkage __visible __trap_section void name(struct pt_regs *regs)             \
133 {                                                                               \
134         if (user_mode(regs)) {                                                  \
135                 irqentry_enter_from_user_mode(regs);                            \
136                 do_trap_error(regs, signo, code, regs->epc, "Oops - " str);     \
137                 irqentry_exit_to_user_mode(regs);                               \
138         } else {                                                                \
139                 irqentry_state_t state = irqentry_nmi_enter(regs);              \
140                 do_trap_error(regs, signo, code, regs->epc, "Oops - " str);     \
141                 irqentry_nmi_exit(regs, state);                                 \
142         }                                                                       \
143 }
144
145 DO_ERROR_INFO(do_trap_unknown,
146         SIGILL, ILL_ILLTRP, "unknown exception");
147 DO_ERROR_INFO(do_trap_insn_misaligned,
148         SIGBUS, BUS_ADRALN, "instruction address misaligned");
149 DO_ERROR_INFO(do_trap_insn_fault,
150         SIGSEGV, SEGV_ACCERR, "instruction access fault");
151
152 asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *regs)
153 {
154         bool handled;
155
156         if (user_mode(regs)) {
157                 irqentry_enter_from_user_mode(regs);
158
159                 local_irq_enable();
160
161                 handled = riscv_v_first_use_handler(regs);
162
163                 local_irq_disable();
164
165                 if (!handled)
166                         do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->epc,
167                                       "Oops - illegal instruction");
168
169                 irqentry_exit_to_user_mode(regs);
170         } else {
171                 irqentry_state_t state = irqentry_nmi_enter(regs);
172
173                 do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->epc,
174                               "Oops - illegal instruction");
175
176                 irqentry_nmi_exit(regs, state);
177         }
178 }
179
180 DO_ERROR_INFO(do_trap_load_fault,
181         SIGSEGV, SEGV_ACCERR, "load access fault");
182 #ifndef CONFIG_RISCV_M_MODE
183 DO_ERROR_INFO(do_trap_load_misaligned,
184         SIGBUS, BUS_ADRALN, "Oops - load address misaligned");
185 DO_ERROR_INFO(do_trap_store_misaligned,
186         SIGBUS, BUS_ADRALN, "Oops - store (or AMO) address misaligned");
187 #else
188 int handle_misaligned_load(struct pt_regs *regs);
189 int handle_misaligned_store(struct pt_regs *regs);
190
191 asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs)
192 {
193         if (user_mode(regs)) {
194                 irqentry_enter_from_user_mode(regs);
195
196                 if (handle_misaligned_load(regs))
197                         do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
198                               "Oops - load address misaligned");
199
200                 irqentry_exit_to_user_mode(regs);
201         } else {
202                 irqentry_state_t state = irqentry_nmi_enter(regs);
203
204                 if (handle_misaligned_load(regs))
205                         do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
206                               "Oops - load address misaligned");
207
208                 irqentry_nmi_exit(regs, state);
209         }
210 }
211
212 asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs *regs)
213 {
214         if (user_mode(regs)) {
215                 irqentry_enter_from_user_mode(regs);
216
217                 if (handle_misaligned_store(regs))
218                         do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
219                                 "Oops - store (or AMO) address misaligned");
220
221                 irqentry_exit_to_user_mode(regs);
222         } else {
223                 irqentry_state_t state = irqentry_nmi_enter(regs);
224
225                 if (handle_misaligned_store(regs))
226                         do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
227                                 "Oops - store (or AMO) address misaligned");
228
229                 irqentry_nmi_exit(regs, state);
230         }
231 }
232 #endif
233 DO_ERROR_INFO(do_trap_store_fault,
234         SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault");
235 DO_ERROR_INFO(do_trap_ecall_s,
236         SIGILL, ILL_ILLTRP, "environment call from S-mode");
237 DO_ERROR_INFO(do_trap_ecall_m,
238         SIGILL, ILL_ILLTRP, "environment call from M-mode");
239
240 static inline unsigned long get_break_insn_length(unsigned long pc)
241 {
242         bug_insn_t insn;
243
244         if (get_kernel_nofault(insn, (bug_insn_t *)pc))
245                 return 0;
246
247         return GET_INSN_LENGTH(insn);
248 }
249
250 void handle_break(struct pt_regs *regs)
251 {
252 #ifdef CONFIG_KPROBES
253         if (kprobe_single_step_handler(regs))
254                 return;
255
256         if (kprobe_breakpoint_handler(regs))
257                 return;
258 #endif
259 #ifdef CONFIG_UPROBES
260         if (uprobe_single_step_handler(regs))
261                 return;
262
263         if (uprobe_breakpoint_handler(regs))
264                 return;
265 #endif
266         current->thread.bad_cause = regs->cause;
267
268         if (user_mode(regs))
269                 force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->epc);
270 #ifdef CONFIG_KGDB
271         else if (notify_die(DIE_TRAP, "EBREAK", regs, 0, regs->cause, SIGTRAP)
272                                                                 == NOTIFY_STOP)
273                 return;
274 #endif
275         else if (report_bug(regs->epc, regs) == BUG_TRAP_TYPE_WARN ||
276                  handle_cfi_failure(regs) == BUG_TRAP_TYPE_WARN)
277                 regs->epc += get_break_insn_length(regs->epc);
278         else
279                 die(regs, "Kernel BUG");
280 }
281
282 asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs)
283 {
284         if (user_mode(regs)) {
285                 irqentry_enter_from_user_mode(regs);
286
287                 handle_break(regs);
288
289                 irqentry_exit_to_user_mode(regs);
290         } else {
291                 irqentry_state_t state = irqentry_nmi_enter(regs);
292
293                 handle_break(regs);
294
295                 irqentry_nmi_exit(regs, state);
296         }
297 }
298
299 asmlinkage __visible __trap_section void do_trap_ecall_u(struct pt_regs *regs)
300 {
301         if (user_mode(regs)) {
302                 long syscall = regs->a7;
303
304                 regs->epc += 4;
305                 regs->orig_a0 = regs->a0;
306
307                 riscv_v_vstate_discard(regs);
308
309                 syscall = syscall_enter_from_user_mode(regs, syscall);
310
311                 if (syscall >= 0 && syscall < NR_syscalls)
312                         syscall_handler(regs, syscall);
313                 else if (syscall != -1)
314                         regs->a0 = -ENOSYS;
315
316                 syscall_exit_to_user_mode(regs);
317         } else {
318                 irqentry_state_t state = irqentry_nmi_enter(regs);
319
320                 do_trap_error(regs, SIGILL, ILL_ILLTRP, regs->epc,
321                         "Oops - environment call from U-mode");
322
323                 irqentry_nmi_exit(regs, state);
324         }
325
326 }
327
328 #ifdef CONFIG_MMU
329 asmlinkage __visible noinstr void do_page_fault(struct pt_regs *regs)
330 {
331         irqentry_state_t state = irqentry_enter(regs);
332
333         handle_page_fault(regs);
334
335         local_irq_disable();
336
337         irqentry_exit(regs, state);
338 }
339 #endif
340
341 static void noinstr handle_riscv_irq(struct pt_regs *regs)
342 {
343         struct pt_regs *old_regs;
344
345         irq_enter_rcu();
346         old_regs = set_irq_regs(regs);
347         handle_arch_irq(regs);
348         set_irq_regs(old_regs);
349         irq_exit_rcu();
350 }
351
352 asmlinkage void noinstr do_irq(struct pt_regs *regs)
353 {
354         irqentry_state_t state = irqentry_enter(regs);
355 #ifdef CONFIG_IRQ_STACKS
356         if (on_thread_stack()) {
357                 ulong *sp = per_cpu(irq_stack_ptr, smp_processor_id())
358                                         + IRQ_STACK_SIZE/sizeof(ulong);
359                 __asm__ __volatile(
360                 "addi   sp, sp, -"RISCV_SZPTR  "\n"
361                 REG_S"  ra, (sp)                \n"
362                 "addi   sp, sp, -"RISCV_SZPTR  "\n"
363                 REG_S"  s0, (sp)                \n"
364                 "addi   s0, sp, 2*"RISCV_SZPTR "\n"
365                 "move   sp, %[sp]               \n"
366                 "move   a0, %[regs]             \n"
367                 "call   handle_riscv_irq        \n"
368                 "addi   sp, s0, -2*"RISCV_SZPTR"\n"
369                 REG_L"  s0, (sp)                \n"
370                 "addi   sp, sp, "RISCV_SZPTR   "\n"
371                 REG_L"  ra, (sp)                \n"
372                 "addi   sp, sp, "RISCV_SZPTR   "\n"
373                 :
374                 : [sp] "r" (sp), [regs] "r" (regs)
375                 : "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
376                   "t0", "t1", "t2", "t3", "t4", "t5", "t6",
377 #ifndef CONFIG_FRAME_POINTER
378                   "s0",
379 #endif
380                   "memory");
381         } else
382 #endif
383                 handle_riscv_irq(regs);
384
385         irqentry_exit(regs, state);
386 }
387
388 #ifdef CONFIG_GENERIC_BUG
389 int is_valid_bugaddr(unsigned long pc)
390 {
391         bug_insn_t insn;
392
393         if (pc < VMALLOC_START)
394                 return 0;
395         if (get_kernel_nofault(insn, (bug_insn_t *)pc))
396                 return 0;
397         if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32)
398                 return (insn == __BUG_INSN_32);
399         else
400                 return ((insn & __COMPRESSED_INSN_MASK) == __BUG_INSN_16);
401 }
402 #endif /* CONFIG_GENERIC_BUG */
403
404 #ifdef CONFIG_VMAP_STACK
405 /*
406  * Extra stack space that allows us to provide panic messages when the kernel
407  * has overflowed its stack.
408  */
409 static DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
410                 overflow_stack)__aligned(16);
411 /*
412  * A temporary stack for use by handle_kernel_stack_overflow.  This is used so
413  * we can call into C code to get the per-hart overflow stack.  Usage of this
414  * stack must be protected by spin_shadow_stack.
415  */
416 long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)] __aligned(16);
417
418 /*
419  * A pseudo spinlock to protect the shadow stack from being used by multiple
420  * harts concurrently.  This isn't a real spinlock because the lock side must
421  * be taken without a valid stack and only a single register, it's only taken
422  * while in the process of panicing anyway so the performance and error
423  * checking a proper spinlock gives us doesn't matter.
424  */
425 unsigned long spin_shadow_stack;
426
427 asmlinkage unsigned long get_overflow_stack(void)
428 {
429         return (unsigned long)this_cpu_ptr(overflow_stack) +
430                 OVERFLOW_STACK_SIZE;
431 }
432
433 asmlinkage void handle_bad_stack(struct pt_regs *regs)
434 {
435         unsigned long tsk_stk = (unsigned long)current->stack;
436         unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
437
438         /*
439          * We're done with the shadow stack by this point, as we're on the
440          * overflow stack.  Tell any other concurrent overflowing harts that
441          * they can proceed with panicing by releasing the pseudo-spinlock.
442          *
443          * This pairs with an amoswap.aq in handle_kernel_stack_overflow.
444          */
445         smp_store_release(&spin_shadow_stack, 0);
446
447         console_verbose();
448
449         pr_emerg("Insufficient stack space to handle exception!\n");
450         pr_emerg("Task stack:     [0x%016lx..0x%016lx]\n",
451                         tsk_stk, tsk_stk + THREAD_SIZE);
452         pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n",
453                         ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE);
454
455         __show_regs(regs);
456         panic("Kernel stack overflow");
457
458         for (;;)
459                 wait_for_interrupt();
460 }
461 #endif