2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1992 Ross Biro
7 * Copyright (C) Linus Torvalds
8 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
9 * Copyright (C) 1996 David S. Miller
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11 * Copyright (C) 1999 MIPS Technologies, Inc.
12 * Copyright (C) 2000 Ulf Carlsson
14 * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
17 #include <linux/compiler.h>
18 #include <linux/context_tracking.h>
19 #include <linux/elf.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/sched/task_stack.h>
24 #include <linux/errno.h>
25 #include <linux/ptrace.h>
26 #include <linux/regset.h>
27 #include <linux/smp.h>
28 #include <linux/security.h>
29 #include <linux/stddef.h>
30 #include <linux/audit.h>
31 #include <linux/seccomp.h>
32 #include <linux/ftrace.h>
34 #include <asm/byteorder.h>
36 #include <asm/cpu-info.h>
39 #include <asm/mipsregs.h>
40 #include <asm/mipsmtregs.h>
42 #include <asm/processor.h>
43 #include <asm/syscall.h>
44 #include <linux/uaccess.h>
45 #include <asm/bootinfo.h>
48 #define CREATE_TRACE_POINTS
49 #include <trace/events/syscalls.h>
52 * Called by kernel/ptrace.c when detaching..
54 * Make sure single step bits etc are not set.
56 void ptrace_disable(struct task_struct *child)
58 /* Don't load the watchpoint registers for the ex-child. */
59 clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
63 * Read a general register set. We always use the 64-bit format, even
64 * for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
65 * Registers are sign extended to fill the available space.
67 int ptrace_getregs(struct task_struct *child, struct user_pt_regs __user *data)
72 if (!access_ok(data, 38 * 8))
75 regs = task_pt_regs(child);
77 for (i = 0; i < 32; i++)
78 __put_user((long)regs->regs[i], (__s64 __user *)&data->regs[i]);
79 __put_user((long)regs->lo, (__s64 __user *)&data->lo);
80 __put_user((long)regs->hi, (__s64 __user *)&data->hi);
81 __put_user((long)regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
82 __put_user((long)regs->cp0_badvaddr, (__s64 __user *)&data->cp0_badvaddr);
83 __put_user((long)regs->cp0_status, (__s64 __user *)&data->cp0_status);
84 __put_user((long)regs->cp0_cause, (__s64 __user *)&data->cp0_cause);
90 * Write a general register set. As for PTRACE_GETREGS, we always use
91 * the 64-bit format. On a 32-bit kernel only the lower order half
92 * (according to endianness) will be used.
94 int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data)
99 if (!access_ok(data, 38 * 8))
102 regs = task_pt_regs(child);
104 for (i = 0; i < 32; i++)
105 __get_user(regs->regs[i], (__s64 __user *)&data->regs[i]);
106 __get_user(regs->lo, (__s64 __user *)&data->lo);
107 __get_user(regs->hi, (__s64 __user *)&data->hi);
108 __get_user(regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
110 /* badvaddr, status, and cause may not be written. */
112 /* System call number may have been changed */
113 mips_syscall_update_nr(child, regs);
118 int ptrace_get_watch_regs(struct task_struct *child,
119 struct pt_watch_regs __user *addr)
121 enum pt_watch_style style;
124 if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
126 if (!access_ok(addr, sizeof(struct pt_watch_regs)))
130 style = pt_watch_style_mips32;
131 #define WATCH_STYLE mips32
133 style = pt_watch_style_mips64;
134 #define WATCH_STYLE mips64
137 __put_user(style, &addr->style);
138 __put_user(boot_cpu_data.watch_reg_use_cnt,
139 &addr->WATCH_STYLE.num_valid);
140 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
141 __put_user(child->thread.watch.mips3264.watchlo[i],
142 &addr->WATCH_STYLE.watchlo[i]);
143 __put_user(child->thread.watch.mips3264.watchhi[i] &
144 (MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW),
145 &addr->WATCH_STYLE.watchhi[i]);
146 __put_user(boot_cpu_data.watch_reg_masks[i],
147 &addr->WATCH_STYLE.watch_masks[i]);
150 __put_user(0, &addr->WATCH_STYLE.watchlo[i]);
151 __put_user(0, &addr->WATCH_STYLE.watchhi[i]);
152 __put_user(0, &addr->WATCH_STYLE.watch_masks[i]);
158 int ptrace_set_watch_regs(struct task_struct *child,
159 struct pt_watch_regs __user *addr)
162 int watch_active = 0;
163 unsigned long lt[NUM_WATCH_REGS];
164 u16 ht[NUM_WATCH_REGS];
166 if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
168 if (!access_ok(addr, sizeof(struct pt_watch_regs)))
170 /* Check the values. */
171 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
172 __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
174 if (lt[i] & __UA_LIMIT)
177 if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) {
178 if (lt[i] & 0xffffffff80000000UL)
181 if (lt[i] & __UA_LIMIT)
185 __get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]);
186 if (ht[i] & ~MIPS_WATCHHI_MASK)
190 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
191 if (lt[i] & MIPS_WATCHLO_IRW)
193 child->thread.watch.mips3264.watchlo[i] = lt[i];
195 child->thread.watch.mips3264.watchhi[i] = ht[i];
199 set_tsk_thread_flag(child, TIF_LOAD_WATCH);
201 clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
206 /* regset get/set implementations */
208 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
210 static int gpr32_get(struct task_struct *target,
211 const struct user_regset *regset,
214 struct pt_regs *regs = task_pt_regs(target);
215 u32 uregs[ELF_NGREG] = {};
217 mips_dump_regs32(uregs, regs);
218 return membuf_write(&to, uregs, sizeof(uregs));
221 static int gpr32_set(struct task_struct *target,
222 const struct user_regset *regset,
223 unsigned int pos, unsigned int count,
224 const void *kbuf, const void __user *ubuf)
226 struct pt_regs *regs = task_pt_regs(target);
227 u32 uregs[ELF_NGREG];
228 unsigned start, num_regs, i;
231 start = pos / sizeof(u32);
232 num_regs = count / sizeof(u32);
234 if (start + num_regs > ELF_NGREG)
237 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
242 for (i = start; i < num_regs; i++) {
244 * Cast all values to signed here so that if this is a 64-bit
245 * kernel, the supplied 32-bit values will be sign extended.
248 case MIPS32_EF_R1 ... MIPS32_EF_R25:
249 /* k0/k1 are ignored. */
250 case MIPS32_EF_R28 ... MIPS32_EF_R31:
251 regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i];
254 regs->lo = (s32)uregs[i];
257 regs->hi = (s32)uregs[i];
259 case MIPS32_EF_CP0_EPC:
260 regs->cp0_epc = (s32)uregs[i];
265 /* System call number may have been changed */
266 mips_syscall_update_nr(target, regs);
271 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
275 static int gpr64_get(struct task_struct *target,
276 const struct user_regset *regset,
279 struct pt_regs *regs = task_pt_regs(target);
280 u64 uregs[ELF_NGREG] = {};
282 mips_dump_regs64(uregs, regs);
283 return membuf_write(&to, uregs, sizeof(uregs));
286 static int gpr64_set(struct task_struct *target,
287 const struct user_regset *regset,
288 unsigned int pos, unsigned int count,
289 const void *kbuf, const void __user *ubuf)
291 struct pt_regs *regs = task_pt_regs(target);
292 u64 uregs[ELF_NGREG];
293 unsigned start, num_regs, i;
296 start = pos / sizeof(u64);
297 num_regs = count / sizeof(u64);
299 if (start + num_regs > ELF_NGREG)
302 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
307 for (i = start; i < num_regs; i++) {
309 case MIPS64_EF_R1 ... MIPS64_EF_R25:
310 /* k0/k1 are ignored. */
311 case MIPS64_EF_R28 ... MIPS64_EF_R31:
312 regs->regs[i - MIPS64_EF_R0] = uregs[i];
320 case MIPS64_EF_CP0_EPC:
321 regs->cp0_epc = uregs[i];
326 /* System call number may have been changed */
327 mips_syscall_update_nr(target, regs);
332 #endif /* CONFIG_64BIT */
335 #ifdef CONFIG_MIPS_FP_SUPPORT
338 * Poke at FCSR according to its mask. Set the Cause bits even
339 * if a corresponding Enable bit is set. This will be noticed at
340 * the time the thread is switched to and SIGFPE thrown accordingly.
342 static void ptrace_setfcr31(struct task_struct *child, u32 value)
347 fcr31 = child->thread.fpu.fcr31;
348 mask = boot_cpu_data.fpu_msk31;
349 child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
352 int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
356 if (!access_ok(data, 33 * 8))
359 if (tsk_used_math(child)) {
360 union fpureg *fregs = get_fpu_regs(child);
361 for (i = 0; i < 32; i++)
362 __put_user(get_fpr64(&fregs[i], 0),
363 i + (__u64 __user *)data);
365 for (i = 0; i < 32; i++)
366 __put_user((__u64) -1, i + (__u64 __user *) data);
369 __put_user(child->thread.fpu.fcr31, data + 64);
370 __put_user(boot_cpu_data.fpu_id, data + 65);
375 int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
382 if (!access_ok(data, 33 * 8))
386 fregs = get_fpu_regs(child);
388 for (i = 0; i < 32; i++) {
389 __get_user(fpr_val, i + (__u64 __user *)data);
390 set_fpr64(&fregs[i], 0, fpr_val);
393 __get_user(value, data + 64);
394 ptrace_setfcr31(child, value);
396 /* FIR may not be written. */
402 * Copy the floating-point context to the supplied NT_PRFPREG buffer,
403 * !CONFIG_CPU_HAS_MSA variant. FP context's general register slots
404 * correspond 1:1 to buffer slots. Only general registers are copied.
406 static void fpr_get_fpa(struct task_struct *target,
409 membuf_write(to, &target->thread.fpu,
410 NUM_FPU_REGS * sizeof(elf_fpreg_t));
414 * Copy the floating-point context to the supplied NT_PRFPREG buffer,
415 * CONFIG_CPU_HAS_MSA variant. Only lower 64 bits of FP context's
416 * general register slots are copied to buffer slots. Only general
417 * registers are copied.
419 static void fpr_get_msa(struct task_struct *target, struct membuf *to)
423 BUILD_BUG_ON(sizeof(u64) != sizeof(elf_fpreg_t));
424 for (i = 0; i < NUM_FPU_REGS; i++)
425 membuf_store(to, get_fpr64(&target->thread.fpu.fpr[i], 0));
429 * Copy the floating-point context to the supplied NT_PRFPREG buffer.
430 * Choose the appropriate helper for general registers, and then copy
431 * the FCSR and FIR registers separately.
433 static int fpr_get(struct task_struct *target,
434 const struct user_regset *regset,
437 if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
438 fpr_get_fpa(target, &to);
440 fpr_get_msa(target, &to);
442 membuf_write(&to, &target->thread.fpu.fcr31, sizeof(u32));
443 membuf_write(&to, &boot_cpu_data.fpu_id, sizeof(u32));
448 * Copy the supplied NT_PRFPREG buffer to the floating-point context,
449 * !CONFIG_CPU_HAS_MSA variant. Buffer slots correspond 1:1 to FP
450 * context's general register slots. Only general registers are copied.
452 static int fpr_set_fpa(struct task_struct *target,
453 unsigned int *pos, unsigned int *count,
454 const void **kbuf, const void __user **ubuf)
456 return user_regset_copyin(pos, count, kbuf, ubuf,
458 0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
462 * Copy the supplied NT_PRFPREG buffer to the floating-point context,
463 * CONFIG_CPU_HAS_MSA variant. Buffer slots are copied to lower 64
464 * bits only of FP context's general register slots. Only general
465 * registers are copied.
467 static int fpr_set_msa(struct task_struct *target,
468 unsigned int *pos, unsigned int *count,
469 const void **kbuf, const void __user **ubuf)
475 BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
476 for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
477 err = user_regset_copyin(pos, count, kbuf, ubuf,
478 &fpr_val, i * sizeof(elf_fpreg_t),
479 (i + 1) * sizeof(elf_fpreg_t));
482 set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
489 * Copy the supplied NT_PRFPREG buffer to the floating-point context.
490 * Choose the appropriate helper for general registers, and then copy
491 * the FCSR register separately. Ignore the incoming FIR register
492 * contents though, as the register is read-only.
494 * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
495 * which is supposed to have been guaranteed by the kernel before
496 * calling us, e.g. in `ptrace_regset'. We enforce that requirement,
497 * so that we can safely avoid preinitializing temporaries for
498 * partial register writes.
500 static int fpr_set(struct task_struct *target,
501 const struct user_regset *regset,
502 unsigned int pos, unsigned int count,
503 const void *kbuf, const void __user *ubuf)
505 const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
506 const int fir_pos = fcr31_pos + sizeof(u32);
510 BUG_ON(count % sizeof(elf_fpreg_t));
512 if (pos + count > sizeof(elf_fpregset_t))
517 if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
518 err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf);
520 err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf);
525 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
527 fcr31_pos, fcr31_pos + sizeof(u32));
531 ptrace_setfcr31(target, fcr31);
535 err = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
537 fir_pos + sizeof(u32));
542 /* Copy the FP mode setting to the supplied NT_MIPS_FP_MODE buffer. */
543 static int fp_mode_get(struct task_struct *target,
544 const struct user_regset *regset,
547 return membuf_store(&to, (int)mips_get_process_fp_mode(target));
551 * Copy the supplied NT_MIPS_FP_MODE buffer to the FP mode setting.
553 * We optimize for the case where `count % sizeof(int) == 0', which
554 * is supposed to have been guaranteed by the kernel before calling
555 * us, e.g. in `ptrace_regset'. We enforce that requirement, so
556 * that we can safely avoid preinitializing temporaries for partial
559 static int fp_mode_set(struct task_struct *target,
560 const struct user_regset *regset,
561 unsigned int pos, unsigned int count,
562 const void *kbuf, const void __user *ubuf)
567 BUG_ON(count % sizeof(int));
569 if (pos + count > sizeof(fp_mode))
572 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fp_mode, 0,
578 err = mips_set_process_fp_mode(target, fp_mode);
583 #endif /* CONFIG_MIPS_FP_SUPPORT */
585 #ifdef CONFIG_CPU_HAS_MSA
587 struct msa_control_regs {
594 static void copy_pad_fprs(struct task_struct *target,
595 const struct user_regset *regset,
597 unsigned int live_sz)
600 unsigned long long fill = ~0ull;
601 unsigned int cp_sz, pad_sz;
603 cp_sz = min(regset->size, live_sz);
604 pad_sz = regset->size - cp_sz;
605 WARN_ON(pad_sz % sizeof(fill));
607 for (i = 0; i < NUM_FPU_REGS; i++) {
608 membuf_write(to, &target->thread.fpu.fpr[i], cp_sz);
609 for (j = 0; j < (pad_sz / sizeof(fill)); j++)
610 membuf_store(to, fill);
614 static int msa_get(struct task_struct *target,
615 const struct user_regset *regset,
618 const unsigned int wr_size = NUM_FPU_REGS * regset->size;
619 const struct msa_control_regs ctrl_regs = {
620 .fir = boot_cpu_data.fpu_id,
621 .fcsr = target->thread.fpu.fcr31,
622 .msair = boot_cpu_data.msa_id,
623 .msacsr = target->thread.fpu.msacsr,
626 if (!tsk_used_math(target)) {
627 /* The task hasn't used FP or MSA, fill with 0xff */
628 copy_pad_fprs(target, regset, &to, 0);
629 } else if (!test_tsk_thread_flag(target, TIF_MSA_CTX_LIVE)) {
630 /* Copy scalar FP context, fill the rest with 0xff */
631 copy_pad_fprs(target, regset, &to, 8);
632 } else if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
633 /* Trivially copy the vector registers */
634 membuf_write(&to, &target->thread.fpu.fpr, wr_size);
636 /* Copy as much context as possible, fill the rest with 0xff */
637 copy_pad_fprs(target, regset, &to,
638 sizeof(target->thread.fpu.fpr[0]));
641 return membuf_write(&to, &ctrl_regs, sizeof(ctrl_regs));
644 static int msa_set(struct task_struct *target,
645 const struct user_regset *regset,
646 unsigned int pos, unsigned int count,
647 const void *kbuf, const void __user *ubuf)
649 const unsigned int wr_size = NUM_FPU_REGS * regset->size;
650 struct msa_control_regs ctrl_regs;
656 if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
657 /* Trivially copy the vector registers */
658 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
659 &target->thread.fpu.fpr,
662 /* Copy as much context as possible */
663 cp_sz = min_t(unsigned int, regset->size,
664 sizeof(target->thread.fpu.fpr[0]));
667 for (; i < NUM_FPU_REGS; i++, start += regset->size) {
668 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
669 &target->thread.fpu.fpr[i],
670 start, start + cp_sz);
675 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl_regs,
676 wr_size, wr_size + sizeof(ctrl_regs));
678 target->thread.fpu.fcr31 = ctrl_regs.fcsr & ~FPU_CSR_ALL_X;
679 target->thread.fpu.msacsr = ctrl_regs.msacsr & ~MSA_CSR_CAUSEF;
685 #endif /* CONFIG_CPU_HAS_MSA */
687 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
690 * Copy the DSP context to the supplied 32-bit NT_MIPS_DSP buffer.
692 static int dsp32_get(struct task_struct *target,
693 const struct user_regset *regset,
696 u32 dspregs[NUM_DSP_REGS + 1];
699 BUG_ON(to.left % sizeof(u32));
704 for (i = 0; i < NUM_DSP_REGS; i++)
705 dspregs[i] = target->thread.dsp.dspr[i];
706 dspregs[NUM_DSP_REGS] = target->thread.dsp.dspcontrol;
707 return membuf_write(&to, dspregs, sizeof(dspregs));
711 * Copy the supplied 32-bit NT_MIPS_DSP buffer to the DSP context.
713 static int dsp32_set(struct task_struct *target,
714 const struct user_regset *regset,
715 unsigned int pos, unsigned int count,
716 const void *kbuf, const void __user *ubuf)
718 unsigned int start, num_regs, i;
719 u32 dspregs[NUM_DSP_REGS + 1];
722 BUG_ON(count % sizeof(u32));
727 start = pos / sizeof(u32);
728 num_regs = count / sizeof(u32);
730 if (start + num_regs > NUM_DSP_REGS + 1)
733 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0,
738 for (i = start; i < num_regs; i++)
740 case 0 ... NUM_DSP_REGS - 1:
741 target->thread.dsp.dspr[i] = (s32)dspregs[i];
744 target->thread.dsp.dspcontrol = (s32)dspregs[i];
751 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
756 * Copy the DSP context to the supplied 64-bit NT_MIPS_DSP buffer.
758 static int dsp64_get(struct task_struct *target,
759 const struct user_regset *regset,
762 u64 dspregs[NUM_DSP_REGS + 1];
765 BUG_ON(to.left % sizeof(u64));
770 for (i = 0; i < NUM_DSP_REGS; i++)
771 dspregs[i] = target->thread.dsp.dspr[i];
772 dspregs[NUM_DSP_REGS] = target->thread.dsp.dspcontrol;
773 return membuf_write(&to, dspregs, sizeof(dspregs));
777 * Copy the supplied 64-bit NT_MIPS_DSP buffer to the DSP context.
779 static int dsp64_set(struct task_struct *target,
780 const struct user_regset *regset,
781 unsigned int pos, unsigned int count,
782 const void *kbuf, const void __user *ubuf)
784 unsigned int start, num_regs, i;
785 u64 dspregs[NUM_DSP_REGS + 1];
788 BUG_ON(count % sizeof(u64));
793 start = pos / sizeof(u64);
794 num_regs = count / sizeof(u64);
796 if (start + num_regs > NUM_DSP_REGS + 1)
799 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0,
804 for (i = start; i < num_regs; i++)
806 case 0 ... NUM_DSP_REGS - 1:
807 target->thread.dsp.dspr[i] = dspregs[i];
810 target->thread.dsp.dspcontrol = dspregs[i];
817 #endif /* CONFIG_64BIT */
820 * Determine whether the DSP context is present.
822 static int dsp_active(struct task_struct *target,
823 const struct user_regset *regset)
825 return cpu_has_dsp ? NUM_DSP_REGS + 1 : -ENODEV;
831 #ifdef CONFIG_MIPS_FP_SUPPORT
835 #ifdef CONFIG_CPU_HAS_MSA
840 struct pt_regs_offset {
845 #define REG_OFFSET_NAME(reg, r) { \
847 .offset = offsetof(struct pt_regs, r) \
850 #define REG_OFFSET_END { \
855 static const struct pt_regs_offset regoffset_table[] = {
856 REG_OFFSET_NAME(r0, regs[0]),
857 REG_OFFSET_NAME(r1, regs[1]),
858 REG_OFFSET_NAME(r2, regs[2]),
859 REG_OFFSET_NAME(r3, regs[3]),
860 REG_OFFSET_NAME(r4, regs[4]),
861 REG_OFFSET_NAME(r5, regs[5]),
862 REG_OFFSET_NAME(r6, regs[6]),
863 REG_OFFSET_NAME(r7, regs[7]),
864 REG_OFFSET_NAME(r8, regs[8]),
865 REG_OFFSET_NAME(r9, regs[9]),
866 REG_OFFSET_NAME(r10, regs[10]),
867 REG_OFFSET_NAME(r11, regs[11]),
868 REG_OFFSET_NAME(r12, regs[12]),
869 REG_OFFSET_NAME(r13, regs[13]),
870 REG_OFFSET_NAME(r14, regs[14]),
871 REG_OFFSET_NAME(r15, regs[15]),
872 REG_OFFSET_NAME(r16, regs[16]),
873 REG_OFFSET_NAME(r17, regs[17]),
874 REG_OFFSET_NAME(r18, regs[18]),
875 REG_OFFSET_NAME(r19, regs[19]),
876 REG_OFFSET_NAME(r20, regs[20]),
877 REG_OFFSET_NAME(r21, regs[21]),
878 REG_OFFSET_NAME(r22, regs[22]),
879 REG_OFFSET_NAME(r23, regs[23]),
880 REG_OFFSET_NAME(r24, regs[24]),
881 REG_OFFSET_NAME(r25, regs[25]),
882 REG_OFFSET_NAME(r26, regs[26]),
883 REG_OFFSET_NAME(r27, regs[27]),
884 REG_OFFSET_NAME(r28, regs[28]),
885 REG_OFFSET_NAME(r29, regs[29]),
886 REG_OFFSET_NAME(r30, regs[30]),
887 REG_OFFSET_NAME(r31, regs[31]),
888 REG_OFFSET_NAME(c0_status, cp0_status),
889 REG_OFFSET_NAME(hi, hi),
890 REG_OFFSET_NAME(lo, lo),
891 #ifdef CONFIG_CPU_HAS_SMARTMIPS
892 REG_OFFSET_NAME(acx, acx),
894 REG_OFFSET_NAME(c0_badvaddr, cp0_badvaddr),
895 REG_OFFSET_NAME(c0_cause, cp0_cause),
896 REG_OFFSET_NAME(c0_epc, cp0_epc),
897 #ifdef CONFIG_CPU_CAVIUM_OCTEON
898 REG_OFFSET_NAME(mpl0, mpl[0]),
899 REG_OFFSET_NAME(mpl1, mpl[1]),
900 REG_OFFSET_NAME(mpl2, mpl[2]),
901 REG_OFFSET_NAME(mtp0, mtp[0]),
902 REG_OFFSET_NAME(mtp1, mtp[1]),
903 REG_OFFSET_NAME(mtp2, mtp[2]),
909 * regs_query_register_offset() - query register offset from its name
910 * @name: the name of a register
912 * regs_query_register_offset() returns the offset of a register in struct
913 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
915 int regs_query_register_offset(const char *name)
917 const struct pt_regs_offset *roff;
918 for (roff = regoffset_table; roff->name != NULL; roff++)
919 if (!strcmp(roff->name, name))
924 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
926 static const struct user_regset mips_regsets[] = {
928 .core_note_type = NT_PRSTATUS,
930 .size = sizeof(unsigned int),
931 .align = sizeof(unsigned int),
932 .regset_get = gpr32_get,
936 .core_note_type = NT_MIPS_DSP,
937 .n = NUM_DSP_REGS + 1,
939 .align = sizeof(u32),
940 .regset_get = dsp32_get,
942 .active = dsp_active,
944 #ifdef CONFIG_MIPS_FP_SUPPORT
946 .core_note_type = NT_PRFPREG,
948 .size = sizeof(elf_fpreg_t),
949 .align = sizeof(elf_fpreg_t),
950 .regset_get = fpr_get,
954 .core_note_type = NT_MIPS_FP_MODE,
957 .align = sizeof(int),
958 .regset_get = fp_mode_get,
962 #ifdef CONFIG_CPU_HAS_MSA
964 .core_note_type = NT_MIPS_MSA,
965 .n = NUM_FPU_REGS + 1,
968 .regset_get = msa_get,
974 static const struct user_regset_view user_mips_view = {
976 .e_machine = ELF_ARCH,
977 .ei_osabi = ELF_OSABI,
978 .regsets = mips_regsets,
979 .n = ARRAY_SIZE(mips_regsets),
982 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
986 static const struct user_regset mips64_regsets[] = {
988 .core_note_type = NT_PRSTATUS,
990 .size = sizeof(unsigned long),
991 .align = sizeof(unsigned long),
992 .regset_get = gpr64_get,
996 .core_note_type = NT_MIPS_DSP,
997 .n = NUM_DSP_REGS + 1,
999 .align = sizeof(u64),
1000 .regset_get = dsp64_get,
1002 .active = dsp_active,
1004 #ifdef CONFIG_MIPS_FP_SUPPORT
1005 [REGSET_FP_MODE] = {
1006 .core_note_type = NT_MIPS_FP_MODE,
1008 .size = sizeof(int),
1009 .align = sizeof(int),
1010 .regset_get = fp_mode_get,
1014 .core_note_type = NT_PRFPREG,
1016 .size = sizeof(elf_fpreg_t),
1017 .align = sizeof(elf_fpreg_t),
1018 .regset_get = fpr_get,
1022 #ifdef CONFIG_CPU_HAS_MSA
1024 .core_note_type = NT_MIPS_MSA,
1025 .n = NUM_FPU_REGS + 1,
1028 .regset_get = msa_get,
1034 static const struct user_regset_view user_mips64_view = {
1036 .e_machine = ELF_ARCH,
1037 .ei_osabi = ELF_OSABI,
1038 .regsets = mips64_regsets,
1039 .n = ARRAY_SIZE(mips64_regsets),
1042 #ifdef CONFIG_MIPS32_N32
1044 static const struct user_regset_view user_mipsn32_view = {
1046 .e_flags = EF_MIPS_ABI2,
1047 .e_machine = ELF_ARCH,
1048 .ei_osabi = ELF_OSABI,
1049 .regsets = mips64_regsets,
1050 .n = ARRAY_SIZE(mips64_regsets),
1053 #endif /* CONFIG_MIPS32_N32 */
1055 #endif /* CONFIG_64BIT */
1057 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1060 return &user_mips_view;
1062 #ifdef CONFIG_MIPS32_O32
1063 if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
1064 return &user_mips_view;
1066 #ifdef CONFIG_MIPS32_N32
1067 if (test_tsk_thread_flag(task, TIF_32BIT_ADDR))
1068 return &user_mipsn32_view;
1070 return &user_mips64_view;
1074 long arch_ptrace(struct task_struct *child, long request,
1075 unsigned long addr, unsigned long data)
1078 void __user *addrp = (void __user *) addr;
1079 void __user *datavp = (void __user *) data;
1080 unsigned long __user *datalp = (void __user *) data;
1083 /* when I and D space are separate, these will need to be fixed. */
1084 case PTRACE_PEEKTEXT: /* read word at location addr. */
1085 case PTRACE_PEEKDATA:
1086 ret = generic_ptrace_peekdata(child, addr, data);
1089 /* Read the word at location addr in the USER area. */
1090 case PTRACE_PEEKUSR: {
1091 struct pt_regs *regs;
1092 unsigned long tmp = 0;
1094 regs = task_pt_regs(child);
1095 ret = 0; /* Default return value. */
1099 tmp = regs->regs[addr];
1101 #ifdef CONFIG_MIPS_FP_SUPPORT
1102 case FPR_BASE ... FPR_BASE + 31: {
1103 union fpureg *fregs;
1105 if (!tsk_used_math(child)) {
1106 /* FP not yet used */
1110 fregs = get_fpu_regs(child);
1113 if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
1115 * The odd registers are actually the high
1116 * order bits of the values stored in the even
1119 tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
1124 tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
1128 tmp = child->thread.fpu.fcr31;
1131 /* implementation / version register */
1132 tmp = boot_cpu_data.fpu_id;
1136 tmp = regs->cp0_epc;
1139 tmp = regs->cp0_cause;
1142 tmp = regs->cp0_badvaddr;
1150 #ifdef CONFIG_CPU_HAS_SMARTMIPS
1155 case DSP_BASE ... DSP_BASE + 5: {
1163 dregs = __get_dsp_regs(child);
1164 tmp = dregs[addr - DSP_BASE];
1173 tmp = child->thread.dsp.dspcontrol;
1180 ret = put_user(tmp, datalp);
1184 /* when I and D space are separate, this will have to be fixed. */
1185 case PTRACE_POKETEXT: /* write the word at location addr. */
1186 case PTRACE_POKEDATA:
1187 ret = generic_ptrace_pokedata(child, addr, data);
1190 case PTRACE_POKEUSR: {
1191 struct pt_regs *regs;
1193 regs = task_pt_regs(child);
1197 regs->regs[addr] = data;
1198 /* System call number may have been changed */
1200 mips_syscall_update_nr(child, regs);
1201 else if (addr == 4 &&
1202 mips_syscall_is_indirect(child, regs))
1203 mips_syscall_update_nr(child, regs);
1205 #ifdef CONFIG_MIPS_FP_SUPPORT
1206 case FPR_BASE ... FPR_BASE + 31: {
1207 union fpureg *fregs = get_fpu_regs(child);
1211 if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
1213 * The odd registers are actually the high
1214 * order bits of the values stored in the even
1217 set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
1222 set_fpr64(&fregs[addr - FPR_BASE], 0, data);
1227 ptrace_setfcr31(child, data);
1231 regs->cp0_epc = data;
1239 #ifdef CONFIG_CPU_HAS_SMARTMIPS
1244 case DSP_BASE ... DSP_BASE + 5: {
1252 dregs = __get_dsp_regs(child);
1253 dregs[addr - DSP_BASE] = data;
1261 child->thread.dsp.dspcontrol = data;
1264 /* The rest are not allowed. */
1271 case PTRACE_GETREGS:
1272 ret = ptrace_getregs(child, datavp);
1275 case PTRACE_SETREGS:
1276 ret = ptrace_setregs(child, datavp);
1279 #ifdef CONFIG_MIPS_FP_SUPPORT
1280 case PTRACE_GETFPREGS:
1281 ret = ptrace_getfpregs(child, datavp);
1284 case PTRACE_SETFPREGS:
1285 ret = ptrace_setfpregs(child, datavp);
1288 case PTRACE_GET_THREAD_AREA:
1289 ret = put_user(task_thread_info(child)->tp_value, datalp);
1292 case PTRACE_GET_WATCH_REGS:
1293 ret = ptrace_get_watch_regs(child, addrp);
1296 case PTRACE_SET_WATCH_REGS:
1297 ret = ptrace_set_watch_regs(child, addrp);
1301 ret = ptrace_request(child, request, addr, data);
1309 * Notification of system call entry/exit
1310 * - triggered by current->work.syscall_trace
1312 asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
1316 current_thread_info()->syscall = syscall;
1318 if (test_thread_flag(TIF_SYSCALL_TRACE)) {
1319 if (ptrace_report_syscall_entry(regs))
1321 syscall = current_thread_info()->syscall;
1324 #ifdef CONFIG_SECCOMP
1325 if (unlikely(test_thread_flag(TIF_SECCOMP))) {
1327 struct seccomp_data sd;
1328 unsigned long args[6];
1331 sd.arch = syscall_get_arch(current);
1332 syscall_get_arguments(current, regs, args);
1333 for (i = 0; i < 6; i++)
1334 sd.args[i] = args[i];
1335 sd.instruction_pointer = KSTK_EIP(current);
1337 ret = __secure_computing(&sd);
1340 syscall = current_thread_info()->syscall;
1344 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1345 trace_sys_enter(regs, regs->regs[2]);
1347 audit_syscall_entry(syscall, regs->regs[4], regs->regs[5],
1348 regs->regs[6], regs->regs[7]);
1351 * Negative syscall numbers are mistaken for rejected syscalls, but
1352 * won't have had the return value set appropriately, so we do so now.
1355 syscall_set_return_value(current, regs, -ENOSYS, 0);
1360 * Notification of system call entry/exit
1361 * - triggered by current->work.syscall_trace
1363 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
1366 * We may come here right after calling schedule_user()
1367 * or do_notify_resume(), in which case we can be in RCU
1372 audit_syscall_exit(regs);
1374 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1375 trace_sys_exit(regs, regs_return_value(regs));
1377 if (test_thread_flag(TIF_SYSCALL_TRACE))
1378 ptrace_report_syscall_exit(regs, 0);