Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm
[platform/kernel/linux-rpi.git] / arch / powerpc / kernel / interrupt_64.S
1 #include <asm/asm-offsets.h>
2 #include <asm/bug.h>
3 #ifdef CONFIG_PPC_BOOK3S
4 #include <asm/exception-64s.h>
5 #else
6 #include <asm/exception-64e.h>
7 #endif
8 #include <asm/feature-fixups.h>
9 #include <asm/head-64.h>
10 #include <asm/hw_irq.h>
11 #include <asm/kup.h>
12 #include <asm/mmu.h>
13 #include <asm/ppc_asm.h>
14 #include <asm/ptrace.h>
15
16         .section        ".toc","aw"
17 SYS_CALL_TABLE:
18         .tc sys_call_table[TC],sys_call_table
19
20 #ifdef CONFIG_COMPAT
21 COMPAT_SYS_CALL_TABLE:
22         .tc compat_sys_call_table[TC],compat_sys_call_table
23 #endif
24         .previous
25
26         .align 7
27
28 .macro DEBUG_SRR_VALID srr
29 #ifdef CONFIG_PPC_RFI_SRR_DEBUG
30         .ifc \srr,srr
31         mfspr   r11,SPRN_SRR0
32         ld      r12,_NIP(r1)
33 100:    tdne    r11,r12
34         EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
35         mfspr   r11,SPRN_SRR1
36         ld      r12,_MSR(r1)
37 100:    tdne    r11,r12
38         EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
39         .else
40         mfspr   r11,SPRN_HSRR0
41         ld      r12,_NIP(r1)
42 100:    tdne    r11,r12
43         EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
44         mfspr   r11,SPRN_HSRR1
45         ld      r12,_MSR(r1)
46 100:    tdne    r11,r12
47         EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
48         .endif
49 #endif
50 .endm
51
52 #ifdef CONFIG_PPC_BOOK3S
53 .macro system_call_vectored name trapnr
54         .globl system_call_vectored_\name
55 system_call_vectored_\name:
56 _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
57         SCV_INTERRUPT_TO_KERNEL
58         mr      r10,r1
59         ld      r1,PACAKSAVE(r13)
60         std     r10,0(r1)
61         std     r11,_NIP(r1)
62         std     r12,_MSR(r1)
63         std     r0,GPR0(r1)
64         std     r10,GPR1(r1)
65         std     r2,GPR2(r1)
66         ld      r2,PACATOC(r13)
67         mfcr    r12
68         li      r11,0
69         /* Can we avoid saving r3-r8 in common case? */
70         std     r3,GPR3(r1)
71         std     r4,GPR4(r1)
72         std     r5,GPR5(r1)
73         std     r6,GPR6(r1)
74         std     r7,GPR7(r1)
75         std     r8,GPR8(r1)
76         /* Zero r9-r12, this should only be required when restoring all GPRs */
77         std     r11,GPR9(r1)
78         std     r11,GPR10(r1)
79         std     r11,GPR11(r1)
80         std     r11,GPR12(r1)
81         std     r9,GPR13(r1)
82         SAVE_NVGPRS(r1)
83         std     r11,_XER(r1)
84         std     r11,_LINK(r1)
85         std     r11,_CTR(r1)
86
87         li      r11,\trapnr
88         std     r11,_TRAP(r1)
89         std     r12,_CCR(r1)
90         addi    r10,r1,STACK_FRAME_OVERHEAD
91         ld      r11,exception_marker@toc(r2)
92         std     r11,-16(r10)            /* "regshere" marker */
93
94 BEGIN_FTR_SECTION
95         HMT_MEDIUM
96 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
97
98         /*
99          * scv enters with MSR[EE]=1 and is immediately considered soft-masked.
100          * The entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED,
101          * and interrupts may be masked and pending already.
102          * system_call_exception() will call trace_hardirqs_off() which means
103          * interrupts could already have been blocked before trace_hardirqs_off,
104          * but this is the best we can do.
105          */
106
107         /* Calling convention has r9 = orig r0, r10 = regs */
108         mr      r9,r0
109         bl      system_call_exception
110
111 .Lsyscall_vectored_\name\()_exit:
112         addi    r4,r1,STACK_FRAME_OVERHEAD
113         li      r5,1 /* scv */
114         bl      syscall_exit_prepare
115         std     r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
116 .Lsyscall_vectored_\name\()_rst_start:
117         lbz     r11,PACAIRQHAPPENED(r13)
118         andi.   r11,r11,(~PACA_IRQ_HARD_DIS)@l
119         bne-    syscall_vectored_\name\()_restart
120         li      r11,IRQS_ENABLED
121         stb     r11,PACAIRQSOFTMASK(r13)
122         li      r11,0
123         stb     r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
124
125         ld      r2,_CCR(r1)
126         ld      r4,_NIP(r1)
127         ld      r5,_MSR(r1)
128
129 BEGIN_FTR_SECTION
130         stdcx.  r0,0,r1                 /* to clear the reservation */
131 END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
132
133 BEGIN_FTR_SECTION
134         HMT_MEDIUM_LOW
135 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
136
137         cmpdi   r3,0
138         bne     .Lsyscall_vectored_\name\()_restore_regs
139
140         /* rfscv returns with LR->NIA and CTR->MSR */
141         mtlr    r4
142         mtctr   r5
143
144         /* Could zero these as per ABI, but we may consider a stricter ABI
145          * which preserves these if libc implementations can benefit, so
146          * restore them for now until further measurement is done. */
147         ld      r0,GPR0(r1)
148         ld      r4,GPR4(r1)
149         ld      r5,GPR5(r1)
150         ld      r6,GPR6(r1)
151         ld      r7,GPR7(r1)
152         ld      r8,GPR8(r1)
153         /* Zero volatile regs that may contain sensitive kernel data */
154         li      r9,0
155         li      r10,0
156         li      r11,0
157         li      r12,0
158         mtspr   SPRN_XER,r0
159
160         /*
161          * We don't need to restore AMR on the way back to userspace for KUAP.
162          * The value of AMR only matters while we're in the kernel.
163          */
164         mtcr    r2
165         ld      r2,GPR2(r1)
166         ld      r3,GPR3(r1)
167         ld      r13,GPR13(r1)
168         ld      r1,GPR1(r1)
169         RFSCV_TO_USER
170         b       .       /* prevent speculative execution */
171
172 .Lsyscall_vectored_\name\()_restore_regs:
173         mtspr   SPRN_SRR0,r4
174         mtspr   SPRN_SRR1,r5
175
176         ld      r3,_CTR(r1)
177         ld      r4,_LINK(r1)
178         ld      r5,_XER(r1)
179
180         REST_NVGPRS(r1)
181         ld      r0,GPR0(r1)
182         mtcr    r2
183         mtctr   r3
184         mtlr    r4
185         mtspr   SPRN_XER,r5
186         REST_10GPRS(2, r1)
187         REST_2GPRS(12, r1)
188         ld      r1,GPR1(r1)
189         RFI_TO_USER
190 .Lsyscall_vectored_\name\()_rst_end:
191
192 syscall_vectored_\name\()_restart:
193 _ASM_NOKPROBE_SYMBOL(syscall_vectored_\name\()_restart)
194         GET_PACA(r13)
195         ld      r1,PACA_EXIT_SAVE_R1(r13)
196         ld      r2,PACATOC(r13)
197         ld      r3,RESULT(r1)
198         addi    r4,r1,STACK_FRAME_OVERHEAD
199         li      r11,IRQS_ALL_DISABLED
200         stb     r11,PACAIRQSOFTMASK(r13)
201         bl      syscall_exit_restart
202         std     r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
203         b       .Lsyscall_vectored_\name\()_rst_start
204 1:
205
206 SOFT_MASK_TABLE(.Lsyscall_vectored_\name\()_rst_start, 1b)
207 RESTART_TABLE(.Lsyscall_vectored_\name\()_rst_start, .Lsyscall_vectored_\name\()_rst_end, syscall_vectored_\name\()_restart)
208
209 .endm
210
211 system_call_vectored common 0x3000
212
213 /*
214  * We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0
215  * which is tested by system_call_exception when r0 is -1 (as set by vector
216  * entry code).
217  */
218 system_call_vectored sigill 0x7ff0
219
220
221 /*
222  * Entered via kernel return set up by kernel/sstep.c, must match entry regs
223  */
224         .globl system_call_vectored_emulate
225 system_call_vectored_emulate:
226 _ASM_NOKPROBE_SYMBOL(system_call_vectored_emulate)
227         li      r10,IRQS_ALL_DISABLED
228         stb     r10,PACAIRQSOFTMASK(r13)
229         b       system_call_vectored_common
230 #endif /* CONFIG_PPC_BOOK3S */
231
232         .balign IFETCH_ALIGN_BYTES
233         .globl system_call_common_real
234 system_call_common_real:
235 _ASM_NOKPROBE_SYMBOL(system_call_common_real)
236         ld      r10,PACAKMSR(r13)       /* get MSR value for kernel */
237         mtmsrd  r10
238
239         .balign IFETCH_ALIGN_BYTES
240         .globl system_call_common
241 system_call_common:
242 _ASM_NOKPROBE_SYMBOL(system_call_common)
243         mr      r10,r1
244         ld      r1,PACAKSAVE(r13)
245         std     r10,0(r1)
246         std     r11,_NIP(r1)
247         std     r12,_MSR(r1)
248         std     r0,GPR0(r1)
249         std     r10,GPR1(r1)
250         std     r2,GPR2(r1)
251 #ifdef CONFIG_PPC_FSL_BOOK3E
252 START_BTB_FLUSH_SECTION
253         BTB_FLUSH(r10)
254 END_BTB_FLUSH_SECTION
255 #endif
256         ld      r2,PACATOC(r13)
257         mfcr    r12
258         li      r11,0
259         /* Can we avoid saving r3-r8 in common case? */
260         std     r3,GPR3(r1)
261         std     r4,GPR4(r1)
262         std     r5,GPR5(r1)
263         std     r6,GPR6(r1)
264         std     r7,GPR7(r1)
265         std     r8,GPR8(r1)
266         /* Zero r9-r12, this should only be required when restoring all GPRs */
267         std     r11,GPR9(r1)
268         std     r11,GPR10(r1)
269         std     r11,GPR11(r1)
270         std     r11,GPR12(r1)
271         std     r9,GPR13(r1)
272         SAVE_NVGPRS(r1)
273         std     r11,_XER(r1)
274         std     r11,_CTR(r1)
275         mflr    r10
276
277         /*
278          * This clears CR0.SO (bit 28), which is the error indication on
279          * return from this system call.
280          */
281         rldimi  r12,r11,28,(63-28)
282         li      r11,0xc00
283         std     r10,_LINK(r1)
284         std     r11,_TRAP(r1)
285         std     r12,_CCR(r1)
286         addi    r10,r1,STACK_FRAME_OVERHEAD
287         ld      r11,exception_marker@toc(r2)
288         std     r11,-16(r10)            /* "regshere" marker */
289
290 #ifdef CONFIG_PPC_BOOK3S
291         li      r11,1
292         stb     r11,PACASRR_VALID(r13)
293 #endif
294
295         /*
296          * We always enter kernel from userspace with irq soft-mask enabled and
297          * nothing pending. system_call_exception() will call
298          * trace_hardirqs_off().
299          */
300         li      r11,IRQS_ALL_DISABLED
301         stb     r11,PACAIRQSOFTMASK(r13)
302 #ifdef CONFIG_PPC_BOOK3S
303         li      r12,-1 /* Set MSR_EE and MSR_RI */
304         mtmsrd  r12,1
305 #else
306         wrteei  1
307 #endif
308
309         /* Calling convention has r9 = orig r0, r10 = regs */
310         mr      r9,r0
311         bl      system_call_exception
312
313 .Lsyscall_exit:
314         addi    r4,r1,STACK_FRAME_OVERHEAD
315         li      r5,0 /* !scv */
316         bl      syscall_exit_prepare
317         std     r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
318 #ifdef CONFIG_PPC_BOOK3S
319 .Lsyscall_rst_start:
320         lbz     r11,PACAIRQHAPPENED(r13)
321         andi.   r11,r11,(~PACA_IRQ_HARD_DIS)@l
322         bne-    syscall_restart
323 #endif
324         li      r11,IRQS_ENABLED
325         stb     r11,PACAIRQSOFTMASK(r13)
326         li      r11,0
327         stb     r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
328
329         ld      r2,_CCR(r1)
330         ld      r6,_LINK(r1)
331         mtlr    r6
332
333 #ifdef CONFIG_PPC_BOOK3S
334         lbz     r4,PACASRR_VALID(r13)
335         cmpdi   r4,0
336         bne     1f
337         li      r4,0
338         stb     r4,PACASRR_VALID(r13)
339 #endif
340         ld      r4,_NIP(r1)
341         ld      r5,_MSR(r1)
342         mtspr   SPRN_SRR0,r4
343         mtspr   SPRN_SRR1,r5
344 1:
345         DEBUG_SRR_VALID srr
346
347 BEGIN_FTR_SECTION
348         stdcx.  r0,0,r1                 /* to clear the reservation */
349 END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
350
351         cmpdi   r3,0
352         bne     .Lsyscall_restore_regs
353         /* Zero volatile regs that may contain sensitive kernel data */
354         li      r0,0
355         li      r4,0
356         li      r5,0
357         li      r6,0
358         li      r7,0
359         li      r8,0
360         li      r9,0
361         li      r10,0
362         li      r11,0
363         li      r12,0
364         mtctr   r0
365         mtspr   SPRN_XER,r0
366 .Lsyscall_restore_regs_cont:
367
368 BEGIN_FTR_SECTION
369         HMT_MEDIUM_LOW
370 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
371
372         /*
373          * We don't need to restore AMR on the way back to userspace for KUAP.
374          * The value of AMR only matters while we're in the kernel.
375          */
376         mtcr    r2
377         ld      r2,GPR2(r1)
378         ld      r3,GPR3(r1)
379         ld      r13,GPR13(r1)
380         ld      r1,GPR1(r1)
381         RFI_TO_USER
382         b       .       /* prevent speculative execution */
383
384 .Lsyscall_restore_regs:
385         ld      r3,_CTR(r1)
386         ld      r4,_XER(r1)
387         REST_NVGPRS(r1)
388         mtctr   r3
389         mtspr   SPRN_XER,r4
390         ld      r0,GPR0(r1)
391         REST_8GPRS(4, r1)
392         ld      r12,GPR12(r1)
393         b       .Lsyscall_restore_regs_cont
394 .Lsyscall_rst_end:
395
396 #ifdef CONFIG_PPC_BOOK3S
397 syscall_restart:
398 _ASM_NOKPROBE_SYMBOL(syscall_restart)
399         GET_PACA(r13)
400         ld      r1,PACA_EXIT_SAVE_R1(r13)
401         ld      r2,PACATOC(r13)
402         ld      r3,RESULT(r1)
403         addi    r4,r1,STACK_FRAME_OVERHEAD
404         li      r11,IRQS_ALL_DISABLED
405         stb     r11,PACAIRQSOFTMASK(r13)
406         bl      syscall_exit_restart
407         std     r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
408         b       .Lsyscall_rst_start
409 1:
410
411 SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
412 RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
413 #endif
414
415         /*
416          * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
417          * touched, no exit work created, then this can be used.
418          */
419         .balign IFETCH_ALIGN_BYTES
420         .globl fast_interrupt_return_srr
421 fast_interrupt_return_srr:
422 _ASM_NOKPROBE_SYMBOL(fast_interrupt_return_srr)
423         kuap_check_amr r3, r4
424         ld      r5,_MSR(r1)
425         andi.   r0,r5,MSR_PR
426 #ifdef CONFIG_PPC_BOOK3S
427         beq     1f
428         kuap_user_restore r3, r4
429         b       .Lfast_user_interrupt_return_srr
430 1:      kuap_kernel_restore r3, r4
431         andi.   r0,r5,MSR_RI
432         li      r3,0 /* 0 return value, no EMULATE_STACK_STORE */
433         bne+    .Lfast_kernel_interrupt_return_srr
434         addi    r3,r1,STACK_FRAME_OVERHEAD
435         bl      unrecoverable_exception
436         b       . /* should not get here */
437 #else
438         bne     .Lfast_user_interrupt_return_srr
439         b       .Lfast_kernel_interrupt_return_srr
440 #endif
441
442 .macro interrupt_return_macro srr
443         .balign IFETCH_ALIGN_BYTES
444         .globl interrupt_return_\srr
445 interrupt_return_\srr\():
446 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\())
447         ld      r4,_MSR(r1)
448         andi.   r0,r4,MSR_PR
449         beq     interrupt_return_\srr\()_kernel
450 interrupt_return_\srr\()_user: /* make backtraces match the _kernel variant */
451 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user)
452         addi    r3,r1,STACK_FRAME_OVERHEAD
453         bl      interrupt_exit_user_prepare
454         cmpdi   r3,0
455         bne-    .Lrestore_nvgprs_\srr
456 .Lrestore_nvgprs_\srr\()_cont:
457         std     r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
458 #ifdef CONFIG_PPC_BOOK3S
459 .Linterrupt_return_\srr\()_user_rst_start:
460         lbz     r11,PACAIRQHAPPENED(r13)
461         andi.   r11,r11,(~PACA_IRQ_HARD_DIS)@l
462         bne-    interrupt_return_\srr\()_user_restart
463 #endif
464         li      r11,IRQS_ENABLED
465         stb     r11,PACAIRQSOFTMASK(r13)
466         li      r11,0
467         stb     r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
468
469 .Lfast_user_interrupt_return_\srr\():
470 #ifdef CONFIG_PPC_BOOK3S
471         .ifc \srr,srr
472         lbz     r4,PACASRR_VALID(r13)
473         .else
474         lbz     r4,PACAHSRR_VALID(r13)
475         .endif
476         cmpdi   r4,0
477         li      r4,0
478         bne     1f
479 #endif
480         ld      r11,_NIP(r1)
481         ld      r12,_MSR(r1)
482         .ifc \srr,srr
483         mtspr   SPRN_SRR0,r11
484         mtspr   SPRN_SRR1,r12
485 1:
486 #ifdef CONFIG_PPC_BOOK3S
487         stb     r4,PACASRR_VALID(r13)
488 #endif
489         .else
490         mtspr   SPRN_HSRR0,r11
491         mtspr   SPRN_HSRR1,r12
492 1:
493 #ifdef CONFIG_PPC_BOOK3S
494         stb     r4,PACAHSRR_VALID(r13)
495 #endif
496         .endif
497         DEBUG_SRR_VALID \srr
498
499 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
500         lbz     r4,PACAIRQSOFTMASK(r13)
501         tdnei   r4,IRQS_ENABLED
502 #endif
503
504 BEGIN_FTR_SECTION
505         ld      r10,_PPR(r1)
506         mtspr   SPRN_PPR,r10
507 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
508
509 BEGIN_FTR_SECTION
510         stdcx.  r0,0,r1         /* to clear the reservation */
511 FTR_SECTION_ELSE
512         ldarx   r0,0,r1
513 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
514
515         ld      r3,_CCR(r1)
516         ld      r4,_LINK(r1)
517         ld      r5,_CTR(r1)
518         ld      r6,_XER(r1)
519         li      r0,0
520
521         REST_4GPRS(7, r1)
522         REST_2GPRS(11, r1)
523         REST_GPR(13, r1)
524
525         mtcr    r3
526         mtlr    r4
527         mtctr   r5
528         mtspr   SPRN_XER,r6
529
530         REST_4GPRS(2, r1)
531         REST_GPR(6, r1)
532         REST_GPR(0, r1)
533         REST_GPR(1, r1)
534         .ifc \srr,srr
535         RFI_TO_USER
536         .else
537         HRFI_TO_USER
538         .endif
539         b       .       /* prevent speculative execution */
540 .Linterrupt_return_\srr\()_user_rst_end:
541
542 .Lrestore_nvgprs_\srr\():
543         REST_NVGPRS(r1)
544         b       .Lrestore_nvgprs_\srr\()_cont
545
546 #ifdef CONFIG_PPC_BOOK3S
547 interrupt_return_\srr\()_user_restart:
548 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user_restart)
549         GET_PACA(r13)
550         ld      r1,PACA_EXIT_SAVE_R1(r13)
551         ld      r2,PACATOC(r13)
552         addi    r3,r1,STACK_FRAME_OVERHEAD
553         li      r11,IRQS_ALL_DISABLED
554         stb     r11,PACAIRQSOFTMASK(r13)
555         bl      interrupt_exit_user_restart
556         std     r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
557         b       .Linterrupt_return_\srr\()_user_rst_start
558 1:
559
560 SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_user_rst_start, 1b)
561 RESTART_TABLE(.Linterrupt_return_\srr\()_user_rst_start, .Linterrupt_return_\srr\()_user_rst_end, interrupt_return_\srr\()_user_restart)
562 #endif
563
564         .balign IFETCH_ALIGN_BYTES
565 interrupt_return_\srr\()_kernel:
566 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel)
567         addi    r3,r1,STACK_FRAME_OVERHEAD
568         bl      interrupt_exit_kernel_prepare
569
570         std     r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
571 .Linterrupt_return_\srr\()_kernel_rst_start:
572         ld      r11,SOFTE(r1)
573         cmpwi   r11,IRQS_ENABLED
574         stb     r11,PACAIRQSOFTMASK(r13)
575         bne     1f
576 #ifdef CONFIG_PPC_BOOK3S
577         lbz     r11,PACAIRQHAPPENED(r13)
578         andi.   r11,r11,(~PACA_IRQ_HARD_DIS)@l
579         bne-    interrupt_return_\srr\()_kernel_restart
580 #endif
581         li      r11,0
582         stb     r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
583 1:
584
585 .Lfast_kernel_interrupt_return_\srr\():
586         cmpdi   cr1,r3,0
587 #ifdef CONFIG_PPC_BOOK3S
588         .ifc \srr,srr
589         lbz     r4,PACASRR_VALID(r13)
590         .else
591         lbz     r4,PACAHSRR_VALID(r13)
592         .endif
593         cmpdi   r4,0
594         li      r4,0
595         bne     1f
596 #endif
597         ld      r11,_NIP(r1)
598         ld      r12,_MSR(r1)
599         .ifc \srr,srr
600         mtspr   SPRN_SRR0,r11
601         mtspr   SPRN_SRR1,r12
602 1:
603 #ifdef CONFIG_PPC_BOOK3S
604         stb     r4,PACASRR_VALID(r13)
605 #endif
606         .else
607         mtspr   SPRN_HSRR0,r11
608         mtspr   SPRN_HSRR1,r12
609 1:
610 #ifdef CONFIG_PPC_BOOK3S
611         stb     r4,PACAHSRR_VALID(r13)
612 #endif
613         .endif
614         DEBUG_SRR_VALID \srr
615
616 BEGIN_FTR_SECTION
617         stdcx.  r0,0,r1         /* to clear the reservation */
618 FTR_SECTION_ELSE
619         ldarx   r0,0,r1
620 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
621
622         ld      r3,_LINK(r1)
623         ld      r4,_CTR(r1)
624         ld      r5,_XER(r1)
625         ld      r6,_CCR(r1)
626         li      r0,0
627
628         REST_4GPRS(7, r1)
629         REST_2GPRS(11, r1)
630
631         mtlr    r3
632         mtctr   r4
633         mtspr   SPRN_XER,r5
634
635         /*
636          * Leaving a stale exception_marker on the stack can confuse
637          * the reliable stack unwinder later on. Clear it.
638          */
639         std     r0,STACK_FRAME_OVERHEAD-16(r1)
640
641         REST_4GPRS(2, r1)
642
643         bne-    cr1,1f /* emulate stack store */
644         mtcr    r6
645         REST_GPR(6, r1)
646         REST_GPR(0, r1)
647         REST_GPR(1, r1)
648         .ifc \srr,srr
649         RFI_TO_KERNEL
650         .else
651         HRFI_TO_KERNEL
652         .endif
653         b       .       /* prevent speculative execution */
654
655 1:      /*
656          * Emulate stack store with update. New r1 value was already calculated
657          * and updated in our interrupt regs by emulate_loadstore, but we can't
658          * store the previous value of r1 to the stack before re-loading our
659          * registers from it, otherwise they could be clobbered.  Use
660          * PACA_EXGEN as temporary storage to hold the store data, as
661          * interrupts are disabled here so it won't be clobbered.
662          */
663         mtcr    r6
664         std     r9,PACA_EXGEN+0(r13)
665         addi    r9,r1,INT_FRAME_SIZE /* get original r1 */
666         REST_GPR(6, r1)
667         REST_GPR(0, r1)
668         REST_GPR(1, r1)
669         std     r9,0(r1) /* perform store component of stdu */
670         ld      r9,PACA_EXGEN+0(r13)
671
672         .ifc \srr,srr
673         RFI_TO_KERNEL
674         .else
675         HRFI_TO_KERNEL
676         .endif
677         b       .       /* prevent speculative execution */
678 .Linterrupt_return_\srr\()_kernel_rst_end:
679
680 #ifdef CONFIG_PPC_BOOK3S
681 interrupt_return_\srr\()_kernel_restart:
682 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel_restart)
683         GET_PACA(r13)
684         ld      r1,PACA_EXIT_SAVE_R1(r13)
685         ld      r2,PACATOC(r13)
686         addi    r3,r1,STACK_FRAME_OVERHEAD
687         li      r11,IRQS_ALL_DISABLED
688         stb     r11,PACAIRQSOFTMASK(r13)
689         bl      interrupt_exit_kernel_restart
690         std     r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
691         b       .Linterrupt_return_\srr\()_kernel_rst_start
692 1:
693
694 SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, 1b)
695 RESTART_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, .Linterrupt_return_\srr\()_kernel_rst_end, interrupt_return_\srr\()_kernel_restart)
696 #endif
697
698 .endm
699
700 interrupt_return_macro srr
701 #ifdef CONFIG_PPC_BOOK3S
702 interrupt_return_macro hsrr
703
704         .globl __end_soft_masked
705 __end_soft_masked:
706 DEFINE_FIXED_SYMBOL(__end_soft_masked)
707 #endif /* CONFIG_PPC_BOOK3S */
708
709 #ifdef CONFIG_PPC_BOOK3S
710 _GLOBAL(ret_from_fork_scv)
711         bl      schedule_tail
712         REST_NVGPRS(r1)
713         li      r3,0    /* fork() return value */
714         b       .Lsyscall_vectored_common_exit
715 #endif
716
717 _GLOBAL(ret_from_fork)
718         bl      schedule_tail
719         REST_NVGPRS(r1)
720         li      r3,0    /* fork() return value */
721         b       .Lsyscall_exit
722
723 _GLOBAL(ret_from_kernel_thread)
724         bl      schedule_tail
725         REST_NVGPRS(r1)
726         mtctr   r14
727         mr      r3,r15
728 #ifdef PPC64_ELF_ABI_v2
729         mr      r12,r14
730 #endif
731         bctrl
732         li      r3,0
733         b       .Lsyscall_exit