1 // SPDX-License-Identifier: GPL-2.0+
3 * Author: Hanlu Li <lihanlu@loongson.cn>
4 * Huacai Chen <chenhuacai@loongson.cn>
5 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
8 * Copyright (C) 1991, 1992 Linus Torvalds
9 * Copyright (C) 1994 - 2000 Ralf Baechle
10 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
11 * Copyright (C) 2014, Imagination Technologies Ltd.
13 #include <linux/audit.h>
14 #include <linux/cache.h>
15 #include <linux/context_tracking.h>
16 #include <linux/irqflags.h>
17 #include <linux/sched.h>
19 #include <linux/personality.h>
20 #include <linux/smp.h>
21 #include <linux/kernel.h>
22 #include <linux/signal.h>
23 #include <linux/errno.h>
24 #include <linux/wait.h>
25 #include <linux/ptrace.h>
26 #include <linux/unistd.h>
27 #include <linux/compiler.h>
28 #include <linux/syscalls.h>
29 #include <linux/uaccess.h>
32 #include <asm/cacheflush.h>
33 #include <asm/cpu-features.h>
36 #include <asm/ucontext.h>
40 # define DEBUGP(fmt, args...) printk("%s: " fmt, __func__, ##args)
42 # define DEBUGP(fmt, args...)
45 /* Make sure we will not lose FPU ownership */
46 #define lock_fpu_owner() ({ preempt_disable(); pagefault_disable(); })
47 #define unlock_fpu_owner() ({ pagefault_enable(); preempt_enable(); })
48 /* Make sure we will not lose LBT ownership */
49 #define lock_lbt_owner() ({ preempt_disable(); pagefault_disable(); })
50 #define unlock_lbt_owner() ({ pagefault_enable(); preempt_enable(); })
52 /* Assembly functions to move context to/from the FPU */
54 _save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
56 _restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
58 _save_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
60 _restore_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
62 _save_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
64 _restore_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
66 #ifdef CONFIG_CPU_HAS_LBT
67 extern asmlinkage int _save_lbt_context(void __user *regs, void __user *eflags);
68 extern asmlinkage int _restore_lbt_context(void __user *regs, void __user *eflags);
69 extern asmlinkage int _save_ftop_context(void __user *ftop);
70 extern asmlinkage int _restore_ftop_context(void __user *ftop);
74 struct siginfo rs_info;
75 struct ucontext rs_uctx;
79 struct sctx_info *addr;
83 struct extctx_layout {
86 struct _ctx_layout fpu;
87 struct _ctx_layout lsx;
88 struct _ctx_layout lasx;
89 struct _ctx_layout lbt;
90 struct _ctx_layout end;
93 static void __user *get_ctx_through_ctxinfo(struct sctx_info *info)
95 return (void __user *)((char *)info + sizeof(struct sctx_info));
99 * Thread saved context copy to/from a signal context presumed to be on the
100 * user stack, and therefore accessed with appropriate macros from uaccess.h.
102 static int copy_fpu_to_sigcontext(struct fpu_context __user *ctx)
106 uint64_t __user *regs = (uint64_t *)&ctx->regs;
107 uint64_t __user *fcc = &ctx->fcc;
108 uint32_t __user *fcsr = &ctx->fcsr;
110 for (i = 0; i < NUM_FPU_REGS; i++) {
112 __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 0),
115 err |= __put_user(current->thread.fpu.fcc, fcc);
116 err |= __put_user(current->thread.fpu.fcsr, fcsr);
121 static int copy_fpu_from_sigcontext(struct fpu_context __user *ctx)
126 uint64_t __user *regs = (uint64_t *)&ctx->regs;
127 uint64_t __user *fcc = &ctx->fcc;
128 uint32_t __user *fcsr = &ctx->fcsr;
130 for (i = 0; i < NUM_FPU_REGS; i++) {
131 err |= __get_user(fpr_val, ®s[i]);
132 set_fpr64(¤t->thread.fpu.fpr[i], 0, fpr_val);
134 err |= __get_user(current->thread.fpu.fcc, fcc);
135 err |= __get_user(current->thread.fpu.fcsr, fcsr);
140 static int copy_lsx_to_sigcontext(struct lsx_context __user *ctx)
144 uint64_t __user *regs = (uint64_t *)&ctx->regs;
145 uint64_t __user *fcc = &ctx->fcc;
146 uint32_t __user *fcsr = &ctx->fcsr;
148 for (i = 0; i < NUM_FPU_REGS; i++) {
149 err |= __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 0),
151 err |= __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 1),
154 err |= __put_user(current->thread.fpu.fcc, fcc);
155 err |= __put_user(current->thread.fpu.fcsr, fcsr);
160 static int copy_lsx_from_sigcontext(struct lsx_context __user *ctx)
165 uint64_t __user *regs = (uint64_t *)&ctx->regs;
166 uint64_t __user *fcc = &ctx->fcc;
167 uint32_t __user *fcsr = &ctx->fcsr;
169 for (i = 0; i < NUM_FPU_REGS; i++) {
170 err |= __get_user(fpr_val, ®s[2*i]);
171 set_fpr64(¤t->thread.fpu.fpr[i], 0, fpr_val);
172 err |= __get_user(fpr_val, ®s[2*i+1]);
173 set_fpr64(¤t->thread.fpu.fpr[i], 1, fpr_val);
175 err |= __get_user(current->thread.fpu.fcc, fcc);
176 err |= __get_user(current->thread.fpu.fcsr, fcsr);
181 static int copy_lasx_to_sigcontext(struct lasx_context __user *ctx)
185 uint64_t __user *regs = (uint64_t *)&ctx->regs;
186 uint64_t __user *fcc = &ctx->fcc;
187 uint32_t __user *fcsr = &ctx->fcsr;
189 for (i = 0; i < NUM_FPU_REGS; i++) {
190 err |= __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 0),
192 err |= __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 1),
194 err |= __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 2),
196 err |= __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 3),
199 err |= __put_user(current->thread.fpu.fcc, fcc);
200 err |= __put_user(current->thread.fpu.fcsr, fcsr);
205 static int copy_lasx_from_sigcontext(struct lasx_context __user *ctx)
210 uint64_t __user *regs = (uint64_t *)&ctx->regs;
211 uint64_t __user *fcc = &ctx->fcc;
212 uint32_t __user *fcsr = &ctx->fcsr;
214 for (i = 0; i < NUM_FPU_REGS; i++) {
215 err |= __get_user(fpr_val, ®s[4*i]);
216 set_fpr64(¤t->thread.fpu.fpr[i], 0, fpr_val);
217 err |= __get_user(fpr_val, ®s[4*i+1]);
218 set_fpr64(¤t->thread.fpu.fpr[i], 1, fpr_val);
219 err |= __get_user(fpr_val, ®s[4*i+2]);
220 set_fpr64(¤t->thread.fpu.fpr[i], 2, fpr_val);
221 err |= __get_user(fpr_val, ®s[4*i+3]);
222 set_fpr64(¤t->thread.fpu.fpr[i], 3, fpr_val);
224 err |= __get_user(current->thread.fpu.fcc, fcc);
225 err |= __get_user(current->thread.fpu.fcsr, fcsr);
230 #ifdef CONFIG_CPU_HAS_LBT
231 static int copy_lbt_to_sigcontext(struct lbt_context __user *ctx)
234 uint64_t __user *regs = (uint64_t *)&ctx->regs;
235 uint32_t __user *eflags = (uint32_t *)&ctx->eflags;
237 err |= __put_user(current->thread.lbt.scr0, ®s[0]);
238 err |= __put_user(current->thread.lbt.scr1, ®s[1]);
239 err |= __put_user(current->thread.lbt.scr2, ®s[2]);
240 err |= __put_user(current->thread.lbt.scr3, ®s[3]);
241 err |= __put_user(current->thread.lbt.eflags, eflags);
246 static int copy_lbt_from_sigcontext(struct lbt_context __user *ctx)
249 uint64_t __user *regs = (uint64_t *)&ctx->regs;
250 uint32_t __user *eflags = (uint32_t *)&ctx->eflags;
252 err |= __get_user(current->thread.lbt.scr0, ®s[0]);
253 err |= __get_user(current->thread.lbt.scr1, ®s[1]);
254 err |= __get_user(current->thread.lbt.scr2, ®s[2]);
255 err |= __get_user(current->thread.lbt.scr3, ®s[3]);
256 err |= __get_user(current->thread.lbt.eflags, eflags);
261 static int copy_ftop_to_sigcontext(struct lbt_context __user *ctx)
263 uint32_t __user *ftop = &ctx->ftop;
265 return __put_user(current->thread.fpu.ftop, ftop);
268 static int copy_ftop_from_sigcontext(struct lbt_context __user *ctx)
270 uint32_t __user *ftop = &ctx->ftop;
272 return __get_user(current->thread.fpu.ftop, ftop);
277 * Wrappers for the assembly _{save,restore}_fp_context functions.
279 static int save_hw_fpu_context(struct fpu_context __user *ctx)
281 uint64_t __user *regs = (uint64_t *)&ctx->regs;
282 uint64_t __user *fcc = &ctx->fcc;
283 uint32_t __user *fcsr = &ctx->fcsr;
285 return _save_fp_context(regs, fcc, fcsr);
288 static int restore_hw_fpu_context(struct fpu_context __user *ctx)
290 uint64_t __user *regs = (uint64_t *)&ctx->regs;
291 uint64_t __user *fcc = &ctx->fcc;
292 uint32_t __user *fcsr = &ctx->fcsr;
294 return _restore_fp_context(regs, fcc, fcsr);
297 static int save_hw_lsx_context(struct lsx_context __user *ctx)
299 uint64_t __user *regs = (uint64_t *)&ctx->regs;
300 uint64_t __user *fcc = &ctx->fcc;
301 uint32_t __user *fcsr = &ctx->fcsr;
303 return _save_lsx_context(regs, fcc, fcsr);
306 static int restore_hw_lsx_context(struct lsx_context __user *ctx)
308 uint64_t __user *regs = (uint64_t *)&ctx->regs;
309 uint64_t __user *fcc = &ctx->fcc;
310 uint32_t __user *fcsr = &ctx->fcsr;
312 return _restore_lsx_context(regs, fcc, fcsr);
315 static int save_hw_lasx_context(struct lasx_context __user *ctx)
317 uint64_t __user *regs = (uint64_t *)&ctx->regs;
318 uint64_t __user *fcc = &ctx->fcc;
319 uint32_t __user *fcsr = &ctx->fcsr;
321 return _save_lasx_context(regs, fcc, fcsr);
324 static int restore_hw_lasx_context(struct lasx_context __user *ctx)
326 uint64_t __user *regs = (uint64_t *)&ctx->regs;
327 uint64_t __user *fcc = &ctx->fcc;
328 uint32_t __user *fcsr = &ctx->fcsr;
330 return _restore_lasx_context(regs, fcc, fcsr);
334 * Wrappers for the assembly _{save,restore}_lbt_context functions.
336 #ifdef CONFIG_CPU_HAS_LBT
337 static int save_hw_lbt_context(struct lbt_context __user *ctx)
339 uint64_t __user *regs = (uint64_t *)&ctx->regs;
340 uint32_t __user *eflags = (uint32_t *)&ctx->eflags;
342 return _save_lbt_context(regs, eflags);
345 static int restore_hw_lbt_context(struct lbt_context __user *ctx)
347 uint64_t __user *regs = (uint64_t *)&ctx->regs;
348 uint32_t __user *eflags = (uint32_t *)&ctx->eflags;
350 return _restore_lbt_context(regs, eflags);
353 static int save_hw_ftop_context(struct lbt_context __user *ctx)
355 uint32_t __user *ftop = &ctx->ftop;
357 return _save_ftop_context(ftop);
360 static int restore_hw_ftop_context(struct lbt_context __user *ctx)
362 uint32_t __user *ftop = &ctx->ftop;
364 return _restore_ftop_context(ftop);
368 static int fcsr_pending(unsigned int __user *fcsr)
371 unsigned int csr, enabled;
373 err = __get_user(csr, fcsr);
374 enabled = ((csr & FPU_CSR_ALL_E) << 24);
376 * If the signal handler set some FPU exceptions, clear it and
381 err |= __put_user(csr, fcsr);
390 static int protected_save_fpu_context(struct extctx_layout *extctx)
393 struct sctx_info __user *info = extctx->fpu.addr;
394 struct fpu_context __user *fpu_ctx = (struct fpu_context *)get_ctx_through_ctxinfo(info);
395 uint64_t __user *regs = (uint64_t *)&fpu_ctx->regs;
396 uint64_t __user *fcc = &fpu_ctx->fcc;
397 uint32_t __user *fcsr = &fpu_ctx->fcsr;
402 err = save_hw_fpu_context(fpu_ctx);
404 err = copy_fpu_to_sigcontext(fpu_ctx);
407 err |= __put_user(FPU_CTX_MAGIC, &info->magic);
408 err |= __put_user(extctx->fpu.size, &info->size);
412 /* Touch the FPU context and try again */
413 err = __put_user(0, ®s[0]) |
414 __put_user(0, ®s[31]) |
418 return err; /* really bad sigcontext */
424 static int protected_restore_fpu_context(struct extctx_layout *extctx)
426 int err = 0, sig = 0, tmp __maybe_unused;
427 struct sctx_info __user *info = extctx->fpu.addr;
428 struct fpu_context __user *fpu_ctx = (struct fpu_context *)get_ctx_through_ctxinfo(info);
429 uint64_t __user *regs = (uint64_t *)&fpu_ctx->regs;
430 uint64_t __user *fcc = &fpu_ctx->fcc;
431 uint32_t __user *fcsr = &fpu_ctx->fcsr;
433 err = sig = fcsr_pending(fcsr);
440 err = restore_hw_fpu_context(fpu_ctx);
442 err = copy_fpu_from_sigcontext(fpu_ctx);
447 /* Touch the FPU context and try again */
448 err = __get_user(tmp, ®s[0]) |
449 __get_user(tmp, ®s[31]) |
450 __get_user(tmp, fcc) |
451 __get_user(tmp, fcsr);
453 break; /* really bad sigcontext */
459 static int protected_save_lsx_context(struct extctx_layout *extctx)
462 struct sctx_info __user *info = extctx->lsx.addr;
463 struct lsx_context __user *lsx_ctx = (struct lsx_context *)get_ctx_through_ctxinfo(info);
464 uint64_t __user *regs = (uint64_t *)&lsx_ctx->regs;
465 uint64_t __user *fcc = &lsx_ctx->fcc;
466 uint32_t __user *fcsr = &lsx_ctx->fcsr;
470 if (is_lsx_enabled())
471 err = save_hw_lsx_context(lsx_ctx);
475 err = copy_lsx_to_sigcontext(lsx_ctx);
479 err |= __put_user(LSX_CTX_MAGIC, &info->magic);
480 err |= __put_user(extctx->lsx.size, &info->size);
484 /* Touch the LSX context and try again */
485 err = __put_user(0, ®s[0]) |
486 __put_user(0, ®s[32*2-1]) |
490 return err; /* really bad sigcontext */
496 static int protected_restore_lsx_context(struct extctx_layout *extctx)
498 int err = 0, sig = 0, tmp __maybe_unused;
499 struct sctx_info __user *info = extctx->lsx.addr;
500 struct lsx_context __user *lsx_ctx = (struct lsx_context *)get_ctx_through_ctxinfo(info);
501 uint64_t __user *regs = (uint64_t *)&lsx_ctx->regs;
502 uint64_t __user *fcc = &lsx_ctx->fcc;
503 uint32_t __user *fcsr = &lsx_ctx->fcsr;
505 err = sig = fcsr_pending(fcsr);
511 if (is_lsx_enabled())
512 err = restore_hw_lsx_context(lsx_ctx);
514 err = copy_lsx_from_sigcontext(lsx_ctx);
522 /* Touch the LSX context and try again */
523 err = __get_user(tmp, ®s[0]) |
524 __get_user(tmp, ®s[32*2-1]) |
525 __get_user(tmp, fcc) |
526 __get_user(tmp, fcsr);
528 break; /* really bad sigcontext */
534 static int protected_save_lasx_context(struct extctx_layout *extctx)
537 struct sctx_info __user *info = extctx->lasx.addr;
538 struct lasx_context __user *lasx_ctx =
539 (struct lasx_context *)get_ctx_through_ctxinfo(info);
540 uint64_t __user *regs = (uint64_t *)&lasx_ctx->regs;
541 uint64_t __user *fcc = &lasx_ctx->fcc;
542 uint32_t __user *fcsr = &lasx_ctx->fcsr;
546 if (is_lasx_enabled())
547 err = save_hw_lasx_context(lasx_ctx);
549 if (is_lsx_enabled())
551 else if (is_fpu_owner())
553 err = copy_lasx_to_sigcontext(lasx_ctx);
557 err |= __put_user(LASX_CTX_MAGIC, &info->magic);
558 err |= __put_user(extctx->lasx.size, &info->size);
562 /* Touch the LASX context and try again */
563 err = __put_user(0, ®s[0]) |
564 __put_user(0, ®s[32*4-1]) |
568 return err; /* really bad sigcontext */
574 static int protected_restore_lasx_context(struct extctx_layout *extctx)
576 int err = 0, sig = 0, tmp __maybe_unused;
577 struct sctx_info __user *info = extctx->lasx.addr;
578 struct lasx_context __user *lasx_ctx =
579 (struct lasx_context *)get_ctx_through_ctxinfo(info);
580 uint64_t __user *regs = (uint64_t *)&lasx_ctx->regs;
581 uint64_t __user *fcc = &lasx_ctx->fcc;
582 uint32_t __user *fcsr = &lasx_ctx->fcsr;
584 err = sig = fcsr_pending(fcsr);
590 if (is_lasx_enabled())
591 err = restore_hw_lasx_context(lasx_ctx);
593 err = copy_lasx_from_sigcontext(lasx_ctx);
594 if (is_lsx_enabled())
595 restore_lsx(current);
596 else if (is_fpu_owner())
603 /* Touch the LASX context and try again */
604 err = __get_user(tmp, ®s[0]) |
605 __get_user(tmp, ®s[32*4-1]) |
606 __get_user(tmp, fcc) |
607 __get_user(tmp, fcsr);
609 break; /* really bad sigcontext */
615 #ifdef CONFIG_CPU_HAS_LBT
616 static int protected_save_lbt_context(struct extctx_layout *extctx)
619 struct sctx_info __user *info = extctx->lbt.addr;
620 struct lbt_context __user *lbt_ctx =
621 (struct lbt_context *)get_ctx_through_ctxinfo(info);
622 uint64_t __user *regs = (uint64_t *)&lbt_ctx->regs;
623 uint32_t __user *eflags = (uint32_t *)&lbt_ctx->eflags;
628 err |= save_hw_lbt_context(lbt_ctx);
630 err |= copy_lbt_to_sigcontext(lbt_ctx);
632 err |= save_hw_ftop_context(lbt_ctx);
634 err |= copy_ftop_to_sigcontext(lbt_ctx);
637 err |= __put_user(LBT_CTX_MAGIC, &info->magic);
638 err |= __put_user(extctx->lbt.size, &info->size);
642 /* Touch the LBT context and try again */
643 err = __put_user(0, ®s[0]) | __put_user(0, eflags);
652 static int protected_restore_lbt_context(struct extctx_layout *extctx)
654 int err = 0, tmp __maybe_unused;
655 struct sctx_info __user *info = extctx->lbt.addr;
656 struct lbt_context __user *lbt_ctx =
657 (struct lbt_context *)get_ctx_through_ctxinfo(info);
658 uint64_t __user *regs = (uint64_t *)&lbt_ctx->regs;
659 uint32_t __user *eflags = (uint32_t *)&lbt_ctx->eflags;
664 err |= restore_hw_lbt_context(lbt_ctx);
666 err |= copy_lbt_from_sigcontext(lbt_ctx);
668 err |= restore_hw_ftop_context(lbt_ctx);
670 err |= copy_ftop_from_sigcontext(lbt_ctx);
675 /* Touch the LBT context and try again */
676 err = __get_user(tmp, ®s[0]) | __get_user(tmp, eflags);
686 static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
687 struct extctx_layout *extctx)
690 struct sctx_info __user *info;
692 err |= __put_user(regs->csr_era, &sc->sc_pc);
693 err |= __put_user(extctx->flags, &sc->sc_flags);
695 err |= __put_user(0, &sc->sc_regs[0]);
696 for (i = 1; i < 32; i++)
697 err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
699 if (extctx->lasx.addr)
700 err |= protected_save_lasx_context(extctx);
701 else if (extctx->lsx.addr)
702 err |= protected_save_lsx_context(extctx);
703 else if (extctx->fpu.addr)
704 err |= protected_save_fpu_context(extctx);
706 #ifdef CONFIG_CPU_HAS_LBT
707 if (extctx->lbt.addr)
708 err |= protected_save_lbt_context(extctx);
711 /* Set the "end" magic */
712 info = (struct sctx_info *)extctx->end.addr;
713 err |= __put_user(0, &info->magic);
714 err |= __put_user(0, &info->size);
719 static int parse_extcontext(struct sigcontext __user *sc, struct extctx_layout *extctx)
722 unsigned int magic, size;
723 struct sctx_info __user *info = (struct sctx_info __user *)&sc->sc_extcontext;
726 err |= __get_user(magic, &info->magic);
727 err |= __get_user(size, &info->size);
736 if (size < (sizeof(struct sctx_info) +
737 sizeof(struct fpu_context)))
739 extctx->fpu.addr = info;
743 if (size < (sizeof(struct sctx_info) +
744 sizeof(struct lsx_context)))
746 extctx->lsx.addr = info;
750 if (size < (sizeof(struct sctx_info) +
751 sizeof(struct lasx_context)))
753 extctx->lasx.addr = info;
757 if (size < (sizeof(struct sctx_info) +
758 sizeof(struct lbt_context)))
760 extctx->lbt.addr = info;
767 info = (struct sctx_info *)((char *)info + size);
777 static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
780 struct extctx_layout extctx;
782 memset(&extctx, 0, sizeof(struct extctx_layout));
784 err = __get_user(extctx.flags, &sc->sc_flags);
788 err = parse_extcontext(sc, &extctx);
792 conditional_used_math(extctx.flags & SC_USED_FP);
795 * The signal handler may have used FPU; give it up if the program
796 * doesn't want it following sigreturn.
798 if (!(extctx.flags & SC_USED_FP))
801 /* Always make any pending restarted system calls return -EINTR */
802 current->restart_block.fn = do_no_restart_syscall;
804 err |= __get_user(regs->csr_era, &sc->sc_pc);
805 for (i = 1; i < 32; i++)
806 err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
808 if (extctx.lasx.addr)
809 err |= protected_restore_lasx_context(&extctx);
810 else if (extctx.lsx.addr)
811 err |= protected_restore_lsx_context(&extctx);
812 else if (extctx.fpu.addr)
813 err |= protected_restore_fpu_context(&extctx);
815 #ifdef CONFIG_CPU_HAS_LBT
817 err |= protected_restore_lbt_context(&extctx);
824 static unsigned int handle_flags(void)
826 unsigned int flags = 0;
828 flags = used_math() ? SC_USED_FP : 0;
830 switch (current->thread.error_code) {
832 flags |= SC_ADDRERR_RD;
835 flags |= SC_ADDRERR_WR;
842 static unsigned long extframe_alloc(struct extctx_layout *extctx,
843 struct _ctx_layout *layout,
844 size_t size, unsigned int align, unsigned long base)
846 unsigned long new_base = base - size;
848 new_base = round_down(new_base, (align < 16 ? 16 : align));
849 new_base -= sizeof(struct sctx_info);
851 layout->addr = (void *)new_base;
852 layout->size = (unsigned int)(base - new_base);
853 extctx->size += layout->size;
858 static unsigned long setup_extcontext(struct extctx_layout *extctx, unsigned long sp)
860 unsigned long new_sp = sp;
862 memset(extctx, 0, sizeof(struct extctx_layout));
864 extctx->flags = handle_flags();
866 /* Grow down, alloc "end" context info first. */
867 new_sp -= sizeof(struct sctx_info);
868 extctx->end.addr = (void *)new_sp;
869 extctx->end.size = (unsigned int)sizeof(struct sctx_info);
870 extctx->size += extctx->end.size;
872 if (extctx->flags & SC_USED_FP) {
873 if (cpu_has_lasx && thread_lasx_context_live())
874 new_sp = extframe_alloc(extctx, &extctx->lasx,
875 sizeof(struct lasx_context), LASX_CTX_ALIGN, new_sp);
876 else if (cpu_has_lsx && thread_lsx_context_live())
877 new_sp = extframe_alloc(extctx, &extctx->lsx,
878 sizeof(struct lsx_context), LSX_CTX_ALIGN, new_sp);
879 else if (cpu_has_fpu)
880 new_sp = extframe_alloc(extctx, &extctx->fpu,
881 sizeof(struct fpu_context), FPU_CTX_ALIGN, new_sp);
884 #ifdef CONFIG_CPU_HAS_LBT
885 if (cpu_has_lbt && thread_lbt_context_live()) {
886 new_sp = extframe_alloc(extctx, &extctx->lbt,
887 sizeof(struct lbt_context), LBT_CTX_ALIGN, new_sp);
894 void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
895 struct extctx_layout *extctx)
899 /* Default to using normal stack */
903 * If we are on the alternate signal stack and would overflow it, don't.
904 * Return an always-bogus address instead so we will die with SIGSEGV.
906 if (on_sig_stack(sp) &&
907 !likely(on_sig_stack(sp - sizeof(struct rt_sigframe))))
908 return (void __user __force *)(-1UL);
910 sp = sigsp(sp, ksig);
911 sp = round_down(sp, 16);
912 sp = setup_extcontext(extctx, sp);
913 sp -= sizeof(struct rt_sigframe);
915 if (!IS_ALIGNED(sp, 16))
918 return (void __user *)sp;
922 * Atomically swap in the new signal mask, and wait for a signal.
925 asmlinkage long sys_rt_sigreturn(void)
929 struct pt_regs *regs;
930 struct rt_sigframe __user *frame;
932 regs = current_pt_regs();
933 frame = (struct rt_sigframe __user *)regs->regs[3];
934 if (!access_ok(frame, sizeof(*frame)))
936 if (__copy_from_user(&set, &frame->rs_uctx.uc_sigmask, sizeof(set)))
939 set_current_blocked(&set);
941 sig = restore_sigcontext(regs, &frame->rs_uctx.uc_mcontext);
947 regs->regs[0] = 0; /* No syscall restarting */
948 if (restore_altstack(&frame->rs_uctx.uc_stack))
951 return regs->regs[4];
958 static int setup_rt_frame(void *sig_return, struct ksignal *ksig,
959 struct pt_regs *regs, sigset_t *set)
962 struct extctx_layout extctx;
963 struct rt_sigframe __user *frame;
965 frame = get_sigframe(ksig, regs, &extctx);
966 if (!access_ok(frame, sizeof(*frame) + extctx.size))
969 /* Create siginfo. */
970 err |= copy_siginfo_to_user(&frame->rs_info, &ksig->info);
972 /* Create the ucontext. */
973 err |= __put_user(0, &frame->rs_uctx.uc_flags);
974 err |= __put_user(NULL, &frame->rs_uctx.uc_link);
975 err |= __save_altstack(&frame->rs_uctx.uc_stack, regs->regs[3]);
976 err |= setup_sigcontext(regs, &frame->rs_uctx.uc_mcontext, &extctx);
977 err |= __copy_to_user(&frame->rs_uctx.uc_sigmask, set, sizeof(*set));
983 * Arguments to signal handler:
986 * a1 = pointer to siginfo
987 * a2 = pointer to ucontext
989 * c0_era point to the signal handler, $r3 (sp) points to
990 * the struct rt_sigframe.
992 regs->regs[4] = ksig->sig;
993 regs->regs[5] = (unsigned long) &frame->rs_info;
994 regs->regs[6] = (unsigned long) &frame->rs_uctx;
995 regs->regs[3] = (unsigned long) frame;
996 regs->regs[1] = (unsigned long) sig_return;
997 regs->csr_era = (unsigned long) ksig->ka.sa.sa_handler;
999 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
1000 current->comm, current->pid,
1001 frame, regs->csr_era, regs->regs[1]);
1006 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
1009 sigset_t *oldset = sigmask_to_save();
1010 void *vdso = current->mm->context.vdso;
1012 /* Are we from a system call? */
1013 if (regs->regs[0]) {
1014 switch (regs->regs[4]) {
1015 case -ERESTART_RESTARTBLOCK:
1016 case -ERESTARTNOHAND:
1017 regs->regs[4] = -EINTR;
1020 if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
1021 regs->regs[4] = -EINTR;
1025 case -ERESTARTNOINTR:
1026 regs->regs[4] = regs->orig_a0;
1030 regs->regs[0] = 0; /* Don't deal with this again. */
1033 rseq_signal_deliver(ksig, regs);
1035 ret = setup_rt_frame(vdso + current->thread.vdso->offset_sigreturn, ksig, regs, oldset);
1037 signal_setup_done(ret, ksig, 0);
1040 void arch_do_signal_or_restart(struct pt_regs *regs)
1042 struct ksignal ksig;
1044 if (get_signal(&ksig)) {
1045 /* Whee! Actually deliver the signal. */
1046 handle_signal(&ksig, regs);
1050 /* Are we from a system call? */
1051 if (regs->regs[0]) {
1052 switch (regs->regs[4]) {
1053 case -ERESTARTNOHAND:
1055 case -ERESTARTNOINTR:
1056 regs->regs[4] = regs->orig_a0;
1060 case -ERESTART_RESTARTBLOCK:
1061 regs->regs[4] = regs->orig_a0;
1062 regs->regs[11] = __NR_restart_syscall;
1066 regs->regs[0] = 0; /* Don't deal with this again. */
1070 * If there's no signal to deliver, we just put the saved sigmask
1073 restore_saved_sigmask();