2 * SuperH process tracing
4 * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
5 * Copyright (C) 2002 - 2009 Paul Mundt
7 * Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp>
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
16 #include <linux/smp.h>
17 #include <linux/errno.h>
18 #include <linux/ptrace.h>
19 #include <linux/user.h>
20 #include <linux/security.h>
21 #include <linux/signal.h>
23 #include <linux/audit.h>
24 #include <linux/seccomp.h>
25 #include <linux/tracehook.h>
26 #include <linux/elf.h>
27 #include <linux/regset.h>
28 #include <linux/hw_breakpoint.h>
29 #include <asm/uaccess.h>
30 #include <asm/pgtable.h>
31 #include <asm/system.h>
32 #include <asm/processor.h>
33 #include <asm/mmu_context.h>
34 #include <asm/syscalls.h>
37 #define CREATE_TRACE_POINTS
38 #include <trace/events/syscalls.h>
41 * This routine will get a word off of the process kernel stack.
43 static inline int get_stack_long(struct task_struct *task, int offset)
47 stack = (unsigned char *)task_pt_regs(task);
49 return (*((int *)stack));
53 * This routine will put a word on the process kernel stack.
55 static inline int put_stack_long(struct task_struct *task, int offset,
60 stack = (unsigned char *)task_pt_regs(task);
62 *(unsigned long *) stack = data;
66 void ptrace_triggered(struct perf_event *bp,
67 struct perf_sample_data *data, struct pt_regs *regs)
69 struct perf_event_attr attr;
72 * Disable the breakpoint request here since ptrace has defined a
73 * one-shot behaviour for breakpoint exceptions.
77 modify_user_hw_breakpoint(bp, &attr);
80 static int set_single_step(struct task_struct *tsk, unsigned long addr)
82 struct thread_struct *thread = &tsk->thread;
83 struct perf_event *bp;
84 struct perf_event_attr attr;
86 bp = thread->ptrace_bps[0];
88 ptrace_breakpoint_init(&attr);
91 attr.bp_len = HW_BREAKPOINT_LEN_2;
92 attr.bp_type = HW_BREAKPOINT_R;
94 bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk);
98 thread->ptrace_bps[0] = bp;
104 /* reenable breakpoint */
105 attr.disabled = false;
106 err = modify_user_hw_breakpoint(bp, &attr);
114 void user_enable_single_step(struct task_struct *child)
116 unsigned long pc = get_stack_long(child, offsetof(struct pt_regs, pc));
118 set_tsk_thread_flag(child, TIF_SINGLESTEP);
120 if (ptrace_get_breakpoints(child) < 0)
123 set_single_step(child, pc);
124 ptrace_put_breakpoints(child);
127 void user_disable_single_step(struct task_struct *child)
129 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
133 * Called by kernel/ptrace.c when detaching..
135 * Make sure single step bits etc are not set.
137 void ptrace_disable(struct task_struct *child)
139 user_disable_single_step(child);
142 static int genregs_get(struct task_struct *target,
143 const struct user_regset *regset,
144 unsigned int pos, unsigned int count,
145 void *kbuf, void __user *ubuf)
147 const struct pt_regs *regs = task_pt_regs(target);
150 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
152 0, 16 * sizeof(unsigned long));
154 /* PC, PR, SR, GBR, MACH, MACL, TRA */
155 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
157 offsetof(struct pt_regs, pc),
158 sizeof(struct pt_regs));
160 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
161 sizeof(struct pt_regs), -1);
166 static int genregs_set(struct task_struct *target,
167 const struct user_regset *regset,
168 unsigned int pos, unsigned int count,
169 const void *kbuf, const void __user *ubuf)
171 struct pt_regs *regs = task_pt_regs(target);
174 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
176 0, 16 * sizeof(unsigned long));
177 if (!ret && count > 0)
178 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
180 offsetof(struct pt_regs, pc),
181 sizeof(struct pt_regs));
183 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
184 sizeof(struct pt_regs), -1);
190 int fpregs_get(struct task_struct *target,
191 const struct user_regset *regset,
192 unsigned int pos, unsigned int count,
193 void *kbuf, void __user *ubuf)
197 ret = init_fpu(target);
201 if ((boot_cpu_data.flags & CPU_HAS_FPU))
202 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
203 &target->thread.xstate->hardfpu, 0, -1);
205 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
206 &target->thread.xstate->softfpu, 0, -1);
209 static int fpregs_set(struct task_struct *target,
210 const struct user_regset *regset,
211 unsigned int pos, unsigned int count,
212 const void *kbuf, const void __user *ubuf)
216 ret = init_fpu(target);
220 set_stopped_child_used_math(target);
222 if ((boot_cpu_data.flags & CPU_HAS_FPU))
223 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
224 &target->thread.xstate->hardfpu, 0, -1);
226 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
227 &target->thread.xstate->softfpu, 0, -1);
230 static int fpregs_active(struct task_struct *target,
231 const struct user_regset *regset)
233 return tsk_used_math(target) ? regset->n : 0;
238 static int dspregs_get(struct task_struct *target,
239 const struct user_regset *regset,
240 unsigned int pos, unsigned int count,
241 void *kbuf, void __user *ubuf)
243 const struct pt_dspregs *regs =
244 (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
247 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs,
248 0, sizeof(struct pt_dspregs));
250 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
251 sizeof(struct pt_dspregs), -1);
256 static int dspregs_set(struct task_struct *target,
257 const struct user_regset *regset,
258 unsigned int pos, unsigned int count,
259 const void *kbuf, const void __user *ubuf)
261 struct pt_dspregs *regs =
262 (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
265 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs,
266 0, sizeof(struct pt_dspregs));
268 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
269 sizeof(struct pt_dspregs), -1);
274 static int dspregs_active(struct task_struct *target,
275 const struct user_regset *regset)
277 struct pt_regs *regs = task_pt_regs(target);
279 return regs->sr & SR_DSP ? regset->n : 0;
283 const struct pt_regs_offset regoffset_table[] = {
294 REGS_OFFSET_NAME(10),
295 REGS_OFFSET_NAME(11),
296 REGS_OFFSET_NAME(12),
297 REGS_OFFSET_NAME(13),
298 REGS_OFFSET_NAME(14),
299 REGS_OFFSET_NAME(15),
303 REG_OFFSET_NAME(gbr),
304 REG_OFFSET_NAME(mach),
305 REG_OFFSET_NAME(macl),
306 REG_OFFSET_NAME(tra),
311 * These are our native regset flavours.
323 static const struct user_regset sh_regsets[] = {
327 * PC, PR, SR, GBR, MACH, MACL, TRA
330 .core_note_type = NT_PRSTATUS,
332 .size = sizeof(long),
333 .align = sizeof(long),
340 .core_note_type = NT_PRFPREG,
341 .n = sizeof(struct user_fpu_struct) / sizeof(long),
342 .size = sizeof(long),
343 .align = sizeof(long),
346 .active = fpregs_active,
352 .n = sizeof(struct pt_dspregs) / sizeof(long),
353 .size = sizeof(long),
354 .align = sizeof(long),
357 .active = dspregs_active,
362 static const struct user_regset_view user_sh_native_view = {
365 .regsets = sh_regsets,
366 .n = ARRAY_SIZE(sh_regsets),
369 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
371 return &user_sh_native_view;
374 long arch_ptrace(struct task_struct *child, long request,
375 unsigned long addr, unsigned long data)
377 unsigned long __user *datap = (unsigned long __user *)data;
381 /* read the word at location addr in the USER area. */
382 case PTRACE_PEEKUSR: {
386 if ((addr & 3) || addr < 0 ||
387 addr > sizeof(struct user) - 3)
390 if (addr < sizeof(struct pt_regs))
391 tmp = get_stack_long(child, addr);
392 else if (addr >= offsetof(struct user, fpu) &&
393 addr < offsetof(struct user, u_fpvalid)) {
394 if (!tsk_used_math(child)) {
395 if (addr == offsetof(struct user, fpu.fpscr))
401 ret = init_fpu(child);
404 index = addr - offsetof(struct user, fpu);
405 tmp = ((unsigned long *)child->thread.xstate)
408 } else if (addr == offsetof(struct user, u_fpvalid))
409 tmp = !!tsk_used_math(child);
410 else if (addr == PT_TEXT_ADDR)
411 tmp = child->mm->start_code;
412 else if (addr == PT_DATA_ADDR)
413 tmp = child->mm->start_data;
414 else if (addr == PT_TEXT_END_ADDR)
415 tmp = child->mm->end_code;
416 else if (addr == PT_TEXT_LEN)
417 tmp = child->mm->end_code - child->mm->start_code;
420 ret = put_user(tmp, datap);
424 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
426 if ((addr & 3) || addr < 0 ||
427 addr > sizeof(struct user) - 3)
430 if (addr < sizeof(struct pt_regs))
431 ret = put_stack_long(child, addr, data);
432 else if (addr >= offsetof(struct user, fpu) &&
433 addr < offsetof(struct user, u_fpvalid)) {
435 ret = init_fpu(child);
438 index = addr - offsetof(struct user, fpu);
439 set_stopped_child_used_math(child);
440 ((unsigned long *)child->thread.xstate)
443 } else if (addr == offsetof(struct user, u_fpvalid)) {
444 conditional_stopped_child_used_math(data, child);
450 return copy_regset_to_user(child, &user_sh_native_view,
452 0, sizeof(struct pt_regs),
455 return copy_regset_from_user(child, &user_sh_native_view,
457 0, sizeof(struct pt_regs),
460 case PTRACE_GETFPREGS:
461 return copy_regset_to_user(child, &user_sh_native_view,
463 0, sizeof(struct user_fpu_struct),
465 case PTRACE_SETFPREGS:
466 return copy_regset_from_user(child, &user_sh_native_view,
468 0, sizeof(struct user_fpu_struct),
472 case PTRACE_GETDSPREGS:
473 return copy_regset_to_user(child, &user_sh_native_view,
475 0, sizeof(struct pt_dspregs),
477 case PTRACE_SETDSPREGS:
478 return copy_regset_from_user(child, &user_sh_native_view,
480 0, sizeof(struct pt_dspregs),
484 ret = ptrace_request(child, request, addr, data);
491 static inline int audit_arch(void)
495 #ifdef CONFIG_CPU_LITTLE_ENDIAN
496 arch |= __AUDIT_ARCH_LE;
502 asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
506 secure_computing(regs->regs[0]);
508 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
509 tracehook_report_syscall_entry(regs))
511 * Tracing decided this syscall should not happen.
512 * We'll return a bogus call number to get an ENOSYS
513 * error, but leave the original number in regs->regs[0].
517 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
518 trace_sys_enter(regs, regs->regs[0]);
520 if (unlikely(current->audit_context))
521 audit_syscall_entry(audit_arch(), regs->regs[3],
522 regs->regs[4], regs->regs[5],
523 regs->regs[6], regs->regs[7]);
525 return ret ?: regs->regs[0];
528 asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
532 if (unlikely(current->audit_context))
533 audit_syscall_exit(AUDITSC_RESULT(regs->regs[0]),
536 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
537 trace_sys_exit(regs, regs->regs[0]);
539 step = test_thread_flag(TIF_SINGLESTEP);
540 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
541 tracehook_report_syscall_exit(regs, step);