1 #include <asm/asm-offsets.h>
3 #ifdef CONFIG_PPC_BOOK3S
4 #include <asm/exception-64s.h>
6 #include <asm/exception-64e.h>
8 #include <asm/feature-fixups.h>
9 #include <asm/head-64.h>
10 #include <asm/hw_irq.h>
13 #include <asm/ppc_asm.h>
14 #include <asm/ptrace.h>
18 .tc sys_call_table[TC],sys_call_table
21 COMPAT_SYS_CALL_TABLE:
22 .tc compat_sys_call_table[TC],compat_sys_call_table
28 .macro DEBUG_SRR_VALID srr
29 #ifdef CONFIG_PPC_RFI_SRR_DEBUG
34 EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
38 EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
43 EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
47 EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
52 #ifdef CONFIG_PPC_BOOK3S
53 .macro system_call_vectored name trapnr
54 .globl system_call_vectored_\name
55 system_call_vectored_\name:
56 _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
57 SCV_INTERRUPT_TO_KERNEL
69 /* Can we avoid saving r3-r8 in common case? */
76 /* Zero r9-r12, this should only be required when restoring all GPRs */
90 addi r10,r1,STACK_FRAME_OVERHEAD
91 ld r11,exception_marker@toc(r2)
92 std r11,-16(r10) /* "regshere" marker */
96 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
99 * scv enters with MSR[EE]=1 and is immediately considered soft-masked.
100 * The entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED,
101 * and interrupts may be masked and pending already.
102 * system_call_exception() will call trace_hardirqs_off() which means
103 * interrupts could already have been blocked before trace_hardirqs_off,
104 * but this is the best we can do.
107 /* Calling convention has r9 = orig r0, r10 = regs */
109 bl system_call_exception
111 .Lsyscall_vectored_\name\()_exit:
112 addi r4,r1,STACK_FRAME_OVERHEAD
114 bl syscall_exit_prepare
115 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
116 .Lsyscall_vectored_\name\()_rst_start:
117 lbz r11,PACAIRQHAPPENED(r13)
118 andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
119 bne- syscall_vectored_\name\()_restart
121 stb r11,PACAIRQSOFTMASK(r13)
123 stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
130 stdcx. r0,0,r1 /* to clear the reservation */
131 END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
135 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
138 bne .Lsyscall_vectored_\name\()_restore_regs
140 /* rfscv returns with LR->NIA and CTR->MSR */
144 /* Could zero these as per ABI, but we may consider a stricter ABI
145 * which preserves these if libc implementations can benefit, so
146 * restore them for now until further measurement is done. */
153 /* Zero volatile regs that may contain sensitive kernel data */
161 * We don't need to restore AMR on the way back to userspace for KUAP.
162 * The value of AMR only matters while we're in the kernel.
170 b . /* prevent speculative execution */
172 .Lsyscall_vectored_\name\()_restore_regs:
190 .Lsyscall_vectored_\name\()_rst_end:
192 syscall_vectored_\name\()_restart:
193 _ASM_NOKPROBE_SYMBOL(syscall_vectored_\name\()_restart)
195 ld r1,PACA_EXIT_SAVE_R1(r13)
198 addi r4,r1,STACK_FRAME_OVERHEAD
199 li r11,IRQS_ALL_DISABLED
200 stb r11,PACAIRQSOFTMASK(r13)
201 bl syscall_exit_restart
202 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
203 b .Lsyscall_vectored_\name\()_rst_start
206 SOFT_MASK_TABLE(.Lsyscall_vectored_\name\()_rst_start, 1b)
207 RESTART_TABLE(.Lsyscall_vectored_\name\()_rst_start, .Lsyscall_vectored_\name\()_rst_end, syscall_vectored_\name\()_restart)
211 system_call_vectored common 0x3000
214 * We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0
215 * which is tested by system_call_exception when r0 is -1 (as set by vector
218 system_call_vectored sigill 0x7ff0
222 * Entered via kernel return set up by kernel/sstep.c, must match entry regs
224 .globl system_call_vectored_emulate
225 system_call_vectored_emulate:
226 _ASM_NOKPROBE_SYMBOL(system_call_vectored_emulate)
227 li r10,IRQS_ALL_DISABLED
228 stb r10,PACAIRQSOFTMASK(r13)
229 b system_call_vectored_common
230 #endif /* CONFIG_PPC_BOOK3S */
232 .balign IFETCH_ALIGN_BYTES
233 .globl system_call_common_real
234 system_call_common_real:
235 _ASM_NOKPROBE_SYMBOL(system_call_common_real)
236 ld r10,PACAKMSR(r13) /* get MSR value for kernel */
239 .balign IFETCH_ALIGN_BYTES
240 .globl system_call_common
242 _ASM_NOKPROBE_SYMBOL(system_call_common)
251 #ifdef CONFIG_PPC_FSL_BOOK3E
252 START_BTB_FLUSH_SECTION
254 END_BTB_FLUSH_SECTION
259 /* Can we avoid saving r3-r8 in common case? */
266 /* Zero r9-r12, this should only be required when restoring all GPRs */
278 * This clears CR0.SO (bit 28), which is the error indication on
279 * return from this system call.
281 rldimi r12,r11,28,(63-28)
286 addi r10,r1,STACK_FRAME_OVERHEAD
287 ld r11,exception_marker@toc(r2)
288 std r11,-16(r10) /* "regshere" marker */
290 #ifdef CONFIG_PPC_BOOK3S
292 stb r11,PACASRR_VALID(r13)
296 * We always enter kernel from userspace with irq soft-mask enabled and
297 * nothing pending. system_call_exception() will call
298 * trace_hardirqs_off().
300 li r11,IRQS_ALL_DISABLED
301 stb r11,PACAIRQSOFTMASK(r13)
302 #ifdef CONFIG_PPC_BOOK3S
303 li r12,-1 /* Set MSR_EE and MSR_RI */
309 /* Calling convention has r9 = orig r0, r10 = regs */
311 bl system_call_exception
314 addi r4,r1,STACK_FRAME_OVERHEAD
316 bl syscall_exit_prepare
317 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
318 #ifdef CONFIG_PPC_BOOK3S
320 lbz r11,PACAIRQHAPPENED(r13)
321 andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
325 stb r11,PACAIRQSOFTMASK(r13)
327 stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
333 #ifdef CONFIG_PPC_BOOK3S
334 lbz r4,PACASRR_VALID(r13)
338 stb r4,PACASRR_VALID(r13)
348 stdcx. r0,0,r1 /* to clear the reservation */
349 END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
352 bne .Lsyscall_restore_regs
353 /* Zero volatile regs that may contain sensitive kernel data */
366 .Lsyscall_restore_regs_cont:
370 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
373 * We don't need to restore AMR on the way back to userspace for KUAP.
374 * The value of AMR only matters while we're in the kernel.
382 b . /* prevent speculative execution */
384 .Lsyscall_restore_regs:
393 b .Lsyscall_restore_regs_cont
396 #ifdef CONFIG_PPC_BOOK3S
398 _ASM_NOKPROBE_SYMBOL(syscall_restart)
400 ld r1,PACA_EXIT_SAVE_R1(r13)
403 addi r4,r1,STACK_FRAME_OVERHEAD
404 li r11,IRQS_ALL_DISABLED
405 stb r11,PACAIRQSOFTMASK(r13)
406 bl syscall_exit_restart
407 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
408 b .Lsyscall_rst_start
411 SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
412 RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
416 * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
417 * touched, no exit work created, then this can be used.
419 .balign IFETCH_ALIGN_BYTES
420 .globl fast_interrupt_return_srr
421 fast_interrupt_return_srr:
422 _ASM_NOKPROBE_SYMBOL(fast_interrupt_return_srr)
423 kuap_check_amr r3, r4
426 #ifdef CONFIG_PPC_BOOK3S
428 kuap_user_restore r3, r4
429 b .Lfast_user_interrupt_return_srr
430 1: kuap_kernel_restore r3, r4
432 li r3,0 /* 0 return value, no EMULATE_STACK_STORE */
433 bne+ .Lfast_kernel_interrupt_return_srr
434 addi r3,r1,STACK_FRAME_OVERHEAD
435 bl unrecoverable_exception
436 b . /* should not get here */
438 bne .Lfast_user_interrupt_return_srr
439 b .Lfast_kernel_interrupt_return_srr
442 .macro interrupt_return_macro srr
443 .balign IFETCH_ALIGN_BYTES
444 .globl interrupt_return_\srr
445 interrupt_return_\srr\():
446 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\())
449 beq interrupt_return_\srr\()_kernel
450 interrupt_return_\srr\()_user: /* make backtraces match the _kernel variant */
451 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user)
452 addi r3,r1,STACK_FRAME_OVERHEAD
453 bl interrupt_exit_user_prepare
455 bne- .Lrestore_nvgprs_\srr
456 .Lrestore_nvgprs_\srr\()_cont:
457 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
458 #ifdef CONFIG_PPC_BOOK3S
459 .Linterrupt_return_\srr\()_user_rst_start:
460 lbz r11,PACAIRQHAPPENED(r13)
461 andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
462 bne- interrupt_return_\srr\()_user_restart
465 stb r11,PACAIRQSOFTMASK(r13)
467 stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
469 .Lfast_user_interrupt_return_\srr\():
470 #ifdef CONFIG_PPC_BOOK3S
472 lbz r4,PACASRR_VALID(r13)
474 lbz r4,PACAHSRR_VALID(r13)
486 #ifdef CONFIG_PPC_BOOK3S
487 stb r4,PACASRR_VALID(r13)
493 #ifdef CONFIG_PPC_BOOK3S
494 stb r4,PACAHSRR_VALID(r13)
499 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
500 lbz r4,PACAIRQSOFTMASK(r13)
501 tdnei r4,IRQS_ENABLED
507 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
510 stdcx. r0,0,r1 /* to clear the reservation */
513 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
539 b . /* prevent speculative execution */
540 .Linterrupt_return_\srr\()_user_rst_end:
542 .Lrestore_nvgprs_\srr\():
544 b .Lrestore_nvgprs_\srr\()_cont
546 #ifdef CONFIG_PPC_BOOK3S
547 interrupt_return_\srr\()_user_restart:
548 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user_restart)
550 ld r1,PACA_EXIT_SAVE_R1(r13)
552 addi r3,r1,STACK_FRAME_OVERHEAD
553 li r11,IRQS_ALL_DISABLED
554 stb r11,PACAIRQSOFTMASK(r13)
555 bl interrupt_exit_user_restart
556 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
557 b .Linterrupt_return_\srr\()_user_rst_start
560 SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_user_rst_start, 1b)
561 RESTART_TABLE(.Linterrupt_return_\srr\()_user_rst_start, .Linterrupt_return_\srr\()_user_rst_end, interrupt_return_\srr\()_user_restart)
564 .balign IFETCH_ALIGN_BYTES
565 interrupt_return_\srr\()_kernel:
566 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel)
567 addi r3,r1,STACK_FRAME_OVERHEAD
568 bl interrupt_exit_kernel_prepare
570 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
571 .Linterrupt_return_\srr\()_kernel_rst_start:
573 cmpwi r11,IRQS_ENABLED
574 stb r11,PACAIRQSOFTMASK(r13)
576 #ifdef CONFIG_PPC_BOOK3S
577 lbz r11,PACAIRQHAPPENED(r13)
578 andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
579 bne- interrupt_return_\srr\()_kernel_restart
582 stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
585 .Lfast_kernel_interrupt_return_\srr\():
587 #ifdef CONFIG_PPC_BOOK3S
589 lbz r4,PACASRR_VALID(r13)
591 lbz r4,PACAHSRR_VALID(r13)
603 #ifdef CONFIG_PPC_BOOK3S
604 stb r4,PACASRR_VALID(r13)
610 #ifdef CONFIG_PPC_BOOK3S
611 stb r4,PACAHSRR_VALID(r13)
617 stdcx. r0,0,r1 /* to clear the reservation */
620 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
636 * Leaving a stale exception_marker on the stack can confuse
637 * the reliable stack unwinder later on. Clear it.
639 std r0,STACK_FRAME_OVERHEAD-16(r1)
643 bne- cr1,1f /* emulate stack store */
653 b . /* prevent speculative execution */
656 * Emulate stack store with update. New r1 value was already calculated
657 * and updated in our interrupt regs by emulate_loadstore, but we can't
658 * store the previous value of r1 to the stack before re-loading our
659 * registers from it, otherwise they could be clobbered. Use
660 * PACA_EXGEN as temporary storage to hold the store data, as
661 * interrupts are disabled here so it won't be clobbered.
664 std r9,PACA_EXGEN+0(r13)
665 addi r9,r1,INT_FRAME_SIZE /* get original r1 */
669 std r9,0(r1) /* perform store component of stdu */
670 ld r9,PACA_EXGEN+0(r13)
677 b . /* prevent speculative execution */
678 .Linterrupt_return_\srr\()_kernel_rst_end:
680 #ifdef CONFIG_PPC_BOOK3S
681 interrupt_return_\srr\()_kernel_restart:
682 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel_restart)
684 ld r1,PACA_EXIT_SAVE_R1(r13)
686 addi r3,r1,STACK_FRAME_OVERHEAD
687 li r11,IRQS_ALL_DISABLED
688 stb r11,PACAIRQSOFTMASK(r13)
689 bl interrupt_exit_kernel_restart
690 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
691 b .Linterrupt_return_\srr\()_kernel_rst_start
694 SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, 1b)
695 RESTART_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, .Linterrupt_return_\srr\()_kernel_rst_end, interrupt_return_\srr\()_kernel_restart)
700 interrupt_return_macro srr
701 #ifdef CONFIG_PPC_BOOK3S
702 interrupt_return_macro hsrr
704 .globl __end_soft_masked
706 DEFINE_FIXED_SYMBOL(__end_soft_masked)
707 #endif /* CONFIG_PPC_BOOK3S */
709 #ifdef CONFIG_PPC_BOOK3S
710 _GLOBAL(ret_from_fork_scv)
713 li r3,0 /* fork() return value */
714 b .Lsyscall_vectored_common_exit
717 _GLOBAL(ret_from_fork)
720 li r3,0 /* fork() return value */
723 _GLOBAL(ret_from_kernel_thread)
728 #ifdef PPC64_ELF_ABI_v2