1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/ptrace.c
5 * (C) Copyright 1999 Linus Torvalds
7 * Common interfaces for "ptrace()" which we do not want
8 * to continually duplicate across every architecture.
11 #include <linux/capability.h>
12 #include <linux/export.h>
13 #include <linux/sched.h>
14 #include <linux/sched/mm.h>
15 #include <linux/sched/coredump.h>
16 #include <linux/sched/task.h>
17 #include <linux/errno.h>
19 #include <linux/highmem.h>
20 #include <linux/pagemap.h>
21 #include <linux/ptrace.h>
22 #include <linux/security.h>
23 #include <linux/signal.h>
24 #include <linux/uio.h>
25 #include <linux/audit.h>
26 #include <linux/pid_namespace.h>
27 #include <linux/syscalls.h>
28 #include <linux/uaccess.h>
29 #include <linux/regset.h>
30 #include <linux/hw_breakpoint.h>
31 #include <linux/cn_proc.h>
32 #include <linux/compat.h>
33 #include <linux/sched/signal.h>
34 #include <linux/minmax.h>
36 #include <asm/syscall.h> /* for syscall_get_* */
39 * Access another process' address space via ptrace.
40 * Source/target buffer must be kernel space,
41 * Do not walk the page table directly, use get_user_pages
43 int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
44 void *buf, int len, unsigned int gup_flags)
49 mm = get_task_mm(tsk);
54 (current != tsk->parent) ||
55 ((get_dumpable(mm) != SUID_DUMP_USER) &&
56 !ptracer_capable(tsk, mm->user_ns))) {
61 ret = __access_remote_vm(mm, addr, buf, len, gup_flags);
68 void __ptrace_link(struct task_struct *child, struct task_struct *new_parent,
69 const struct cred *ptracer_cred)
71 BUG_ON(!list_empty(&child->ptrace_entry));
72 list_add(&child->ptrace_entry, &new_parent->ptraced);
73 child->parent = new_parent;
74 child->ptracer_cred = get_cred(ptracer_cred);
78 * ptrace a task: make the debugger its new parent and
79 * move it to the ptrace list.
81 * Must be called with the tasklist lock write-held.
83 static void ptrace_link(struct task_struct *child, struct task_struct *new_parent)
85 __ptrace_link(child, new_parent, current_cred());
89 * __ptrace_unlink - unlink ptracee and restore its execution state
90 * @child: ptracee to be unlinked
92 * Remove @child from the ptrace list, move it back to the original parent,
93 * and restore the execution state so that it conforms to the group stop
96 * Unlinking can happen via two paths - explicit PTRACE_DETACH or ptracer
97 * exiting. For PTRACE_DETACH, unless the ptracee has been killed between
98 * ptrace_check_attach() and here, it's guaranteed to be in TASK_TRACED.
99 * If the ptracer is exiting, the ptracee can be in any state.
101 * After detach, the ptracee should be in a state which conforms to the
102 * group stop. If the group is stopped or in the process of stopping, the
103 * ptracee should be put into TASK_STOPPED; otherwise, it should be woken
104 * up from TASK_TRACED.
106 * If the ptracee is in TASK_TRACED and needs to be moved to TASK_STOPPED,
107 * it goes through TRACED -> RUNNING -> STOPPED transition which is similar
108 * to but in the opposite direction of what happens while attaching to a
109 * stopped task. However, in this direction, the intermediate RUNNING
110 * state is not hidden even from the current ptracer and if it immediately
111 * re-attaches and performs a WNOHANG wait(2), it may fail.
114 * write_lock_irq(tasklist_lock)
116 void __ptrace_unlink(struct task_struct *child)
118 const struct cred *old_cred;
119 BUG_ON(!child->ptrace);
121 clear_task_syscall_work(child, SYSCALL_TRACE);
122 #if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU)
123 clear_task_syscall_work(child, SYSCALL_EMU);
126 child->parent = child->real_parent;
127 list_del_init(&child->ptrace_entry);
128 old_cred = child->ptracer_cred;
129 child->ptracer_cred = NULL;
132 spin_lock(&child->sighand->siglock);
135 * Clear all pending traps and TRAPPING. TRAPPING should be
136 * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly.
138 task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK);
139 task_clear_jobctl_trapping(child);
142 * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
145 if (!(child->flags & PF_EXITING) &&
146 (child->signal->flags & SIGNAL_STOP_STOPPED ||
147 child->signal->group_stop_count)) {
148 child->jobctl |= JOBCTL_STOP_PENDING;
151 * This is only possible if this thread was cloned by the
152 * traced task running in the stopped group, set the signal
153 * for the future reports.
154 * FIXME: we should change ptrace_init_task() to handle this
157 if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
158 child->jobctl |= SIGSTOP;
162 * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
163 * @child in the butt. Note that @resume should be used iff @child
164 * is in TASK_TRACED; otherwise, we might unduly disrupt
165 * TASK_KILLABLE sleeps.
167 if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
168 ptrace_signal_wake_up(child, true);
170 spin_unlock(&child->sighand->siglock);
173 static bool looks_like_a_spurious_pid(struct task_struct *task)
175 if (task->exit_code != ((PTRACE_EVENT_EXEC << 8) | SIGTRAP))
178 if (task_pid_vnr(task) == task->ptrace_message)
181 * The tracee changed its pid but the PTRACE_EVENT_EXEC event
182 * was not wait()'ed, most probably debugger targets the old
183 * leader which was destroyed in de_thread().
189 * Ensure that nothing can wake it up, even SIGKILL
191 * A task is switched to this state while a ptrace operation is in progress;
192 * such that the ptrace operation is uninterruptible.
194 static bool ptrace_freeze_traced(struct task_struct *task)
198 /* Lockless, nobody but us can set this flag */
199 if (task->jobctl & JOBCTL_LISTENING)
202 spin_lock_irq(&task->sighand->siglock);
203 if (task_is_traced(task) && !looks_like_a_spurious_pid(task) &&
204 !__fatal_signal_pending(task)) {
205 task->jobctl |= JOBCTL_PTRACE_FROZEN;
208 spin_unlock_irq(&task->sighand->siglock);
213 static void ptrace_unfreeze_traced(struct task_struct *task)
218 * The child may be awake and may have cleared
219 * JOBCTL_PTRACE_FROZEN (see ptrace_resume). The child will
220 * not set JOBCTL_PTRACE_FROZEN or enter __TASK_TRACED anew.
222 if (lock_task_sighand(task, &flags)) {
223 task->jobctl &= ~JOBCTL_PTRACE_FROZEN;
224 if (__fatal_signal_pending(task)) {
225 task->jobctl &= ~JOBCTL_TRACED;
226 wake_up_state(task, __TASK_TRACED);
228 unlock_task_sighand(task, &flags);
233 * ptrace_check_attach - check whether ptracee is ready for ptrace operation
234 * @child: ptracee to check for
235 * @ignore_state: don't check whether @child is currently %TASK_TRACED
237 * Check whether @child is being ptraced by %current and ready for further
238 * ptrace operations. If @ignore_state is %false, @child also should be in
239 * %TASK_TRACED state and on return the child is guaranteed to be traced
240 * and not executing. If @ignore_state is %true, @child can be in any
244 * Grabs and releases tasklist_lock and @child->sighand->siglock.
247 * 0 on success, -ESRCH if %child is not ready.
249 static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
254 * We take the read lock around doing both checks to close a
255 * possible race where someone else was tracing our child and
256 * detached between these two checks. After this locked check,
257 * we are sure that this is our traced child and that can only
258 * be changed by us so it's not changing right after this.
260 read_lock(&tasklist_lock);
261 if (child->ptrace && child->parent == current) {
263 * child->sighand can't be NULL, release_task()
264 * does ptrace_unlink() before __exit_signal().
266 if (ignore_state || ptrace_freeze_traced(child))
269 read_unlock(&tasklist_lock);
271 if (!ret && !ignore_state &&
272 WARN_ON_ONCE(!wait_task_inactive(child, __TASK_TRACED|TASK_FROZEN)))
278 static bool ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
280 if (mode & PTRACE_MODE_NOAUDIT)
281 return ns_capable_noaudit(ns, CAP_SYS_PTRACE);
282 return ns_capable(ns, CAP_SYS_PTRACE);
285 /* Returns 0 on success, -errno on denial. */
286 static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
288 const struct cred *cred = current_cred(), *tcred;
289 struct mm_struct *mm;
293 if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) {
294 WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
298 /* May we inspect the given task?
299 * This check is used both for attaching with ptrace
300 * and for allowing access to sensitive information in /proc.
302 * ptrace_attach denies several cases that /proc allows
303 * because setting up the necessary parent/child relationship
304 * or halting the specified task is impossible.
307 /* Don't let security modules deny introspection */
308 if (same_thread_group(task, current))
311 if (mode & PTRACE_MODE_FSCREDS) {
312 caller_uid = cred->fsuid;
313 caller_gid = cred->fsgid;
316 * Using the euid would make more sense here, but something
317 * in userland might rely on the old behavior, and this
318 * shouldn't be a security problem since
319 * PTRACE_MODE_REALCREDS implies that the caller explicitly
320 * used a syscall that requests access to another process
321 * (and not a filesystem syscall to procfs).
323 caller_uid = cred->uid;
324 caller_gid = cred->gid;
326 tcred = __task_cred(task);
327 if (uid_eq(caller_uid, tcred->euid) &&
328 uid_eq(caller_uid, tcred->suid) &&
329 uid_eq(caller_uid, tcred->uid) &&
330 gid_eq(caller_gid, tcred->egid) &&
331 gid_eq(caller_gid, tcred->sgid) &&
332 gid_eq(caller_gid, tcred->gid))
334 if (ptrace_has_cap(tcred->user_ns, mode))
341 * If a task drops privileges and becomes nondumpable (through a syscall
342 * like setresuid()) while we are trying to access it, we must ensure
343 * that the dumpability is read after the credentials; otherwise,
344 * we may be able to attach to a task that we shouldn't be able to
345 * attach to (as if the task had dropped privileges without becoming
347 * Pairs with a write barrier in commit_creds().
352 ((get_dumpable(mm) != SUID_DUMP_USER) &&
353 !ptrace_has_cap(mm->user_ns, mode)))
356 return security_ptrace_access_check(task, mode);
359 bool ptrace_may_access(struct task_struct *task, unsigned int mode)
363 err = __ptrace_may_access(task, mode);
368 static int check_ptrace_options(unsigned long data)
370 if (data & ~(unsigned long)PTRACE_O_MASK)
373 if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) {
374 if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) ||
375 !IS_ENABLED(CONFIG_SECCOMP))
378 if (!capable(CAP_SYS_ADMIN))
381 if (seccomp_mode(¤t->seccomp) != SECCOMP_MODE_DISABLED ||
382 current->ptrace & PT_SUSPEND_SECCOMP)
388 static int ptrace_attach(struct task_struct *task, long request,
392 bool seize = (request == PTRACE_SEIZE);
400 * This duplicates the check in check_ptrace_options() because
401 * ptrace_attach() and ptrace_setoptions() have historically
402 * used different error codes for unknown ptrace options.
404 if (flags & ~(unsigned long)PTRACE_O_MASK)
406 retval = check_ptrace_options(flags);
409 flags = PT_PTRACED | PT_SEIZED | (flags << PT_OPT_FLAG_SHIFT);
417 if (unlikely(task->flags & PF_KTHREAD))
419 if (same_thread_group(task, current))
423 * Protect exec's credential calculations against our interference;
424 * SUID, SGID and LSM creds get determined differently
427 retval = -ERESTARTNOINTR;
428 if (mutex_lock_interruptible(&task->signal->cred_guard_mutex))
432 retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
437 write_lock_irq(&tasklist_lock);
439 if (unlikely(task->exit_state))
440 goto unlock_tasklist;
442 goto unlock_tasklist;
444 task->ptrace = flags;
446 ptrace_link(task, current);
448 /* SEIZE doesn't trap tracee on attach */
450 send_sig_info(SIGSTOP, SEND_SIG_PRIV, task);
452 spin_lock(&task->sighand->siglock);
455 * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
456 * TRAPPING, and kick it so that it transits to TRACED. TRAPPING
457 * will be cleared if the child completes the transition or any
458 * event which clears the group stop states happens. We'll wait
459 * for the transition to complete before returning from this
462 * This hides STOPPED -> RUNNING -> TRACED transition from the
463 * attaching thread but a different thread in the same group can
464 * still observe the transient RUNNING state. IOW, if another
465 * thread's WNOHANG wait(2) on the stopped tracee races against
466 * ATTACH, the wait(2) may fail due to the transient RUNNING.
468 * The following task_is_stopped() test is safe as both transitions
469 * in and out of STOPPED are protected by siglock.
471 if (task_is_stopped(task) &&
472 task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) {
473 task->jobctl &= ~JOBCTL_STOPPED;
474 signal_wake_up_state(task, __TASK_STOPPED);
477 spin_unlock(&task->sighand->siglock);
481 write_unlock_irq(&tasklist_lock);
483 mutex_unlock(&task->signal->cred_guard_mutex);
487 * We do not bother to change retval or clear JOBCTL_TRAPPING
488 * if wait_on_bit() was interrupted by SIGKILL. The tracer will
489 * not return to user-mode, it will exit and clear this bit in
490 * __ptrace_unlink() if it wasn't already cleared by the tracee;
491 * and until then nobody can ptrace this task.
493 wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT, TASK_KILLABLE);
494 proc_ptrace_connector(task, PTRACE_ATTACH);
501 * ptrace_traceme -- helper for PTRACE_TRACEME
503 * Performs checks and sets PT_PTRACED.
504 * Should be used by all ptrace implementations for PTRACE_TRACEME.
506 static int ptrace_traceme(void)
510 write_lock_irq(&tasklist_lock);
511 /* Are we already being traced? */
512 if (!current->ptrace) {
513 ret = security_ptrace_traceme(current->parent);
515 * Check PF_EXITING to ensure ->real_parent has not passed
516 * exit_ptrace(). Otherwise we don't report the error but
517 * pretend ->real_parent untraces us right after return.
519 if (!ret && !(current->real_parent->flags & PF_EXITING)) {
520 current->ptrace = PT_PTRACED;
521 ptrace_link(current, current->real_parent);
524 write_unlock_irq(&tasklist_lock);
530 * Called with irqs disabled, returns true if childs should reap themselves.
532 static int ignoring_children(struct sighand_struct *sigh)
535 spin_lock(&sigh->siglock);
536 ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
537 (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
538 spin_unlock(&sigh->siglock);
543 * Called with tasklist_lock held for writing.
544 * Unlink a traced task, and clean it up if it was a traced zombie.
545 * Return true if it needs to be reaped with release_task().
546 * (We can't call release_task() here because we already hold tasklist_lock.)
548 * If it's a zombie, our attachedness prevented normal parent notification
549 * or self-reaping. Do notification now if it would have happened earlier.
550 * If it should reap itself, return true.
552 * If it's our own child, there is no notification to do. But if our normal
553 * children self-reap, then this child was prevented by ptrace and we must
554 * reap it now, in that case we must also wake up sub-threads sleeping in
557 static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
563 if (p->exit_state != EXIT_ZOMBIE)
566 dead = !thread_group_leader(p);
568 if (!dead && thread_group_empty(p)) {
569 if (!same_thread_group(p->real_parent, tracer))
570 dead = do_notify_parent(p, p->exit_signal);
571 else if (ignoring_children(tracer->sighand)) {
572 __wake_up_parent(p, tracer);
576 /* Mark it as in the process of being reaped. */
578 p->exit_state = EXIT_DEAD;
582 static int ptrace_detach(struct task_struct *child, unsigned int data)
584 if (!valid_signal(data))
587 /* Architecture-specific hardware disable .. */
588 ptrace_disable(child);
590 write_lock_irq(&tasklist_lock);
592 * We rely on ptrace_freeze_traced(). It can't be killed and
593 * untraced by another thread, it can't be a zombie.
595 WARN_ON(!child->ptrace || child->exit_state);
597 * tasklist_lock avoids the race with wait_task_stopped(), see
598 * the comment in ptrace_resume().
600 child->exit_code = data;
601 __ptrace_detach(current, child);
602 write_unlock_irq(&tasklist_lock);
604 proc_ptrace_connector(child, PTRACE_DETACH);
610 * Detach all tasks we were using ptrace on. Called with tasklist held
613 void exit_ptrace(struct task_struct *tracer, struct list_head *dead)
615 struct task_struct *p, *n;
617 list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
618 if (unlikely(p->ptrace & PT_EXITKILL))
619 send_sig_info(SIGKILL, SEND_SIG_PRIV, p);
621 if (__ptrace_detach(tracer, p))
622 list_add(&p->ptrace_entry, dead);
626 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
632 int this_len, retval;
634 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
635 retval = ptrace_access_vm(tsk, src, buf, this_len, FOLL_FORCE);
642 if (copy_to_user(dst, buf, retval))
652 int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
658 int this_len, retval;
660 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
661 if (copy_from_user(buf, src, this_len))
663 retval = ptrace_access_vm(tsk, dst, buf, this_len,
664 FOLL_FORCE | FOLL_WRITE);
678 static int ptrace_setoptions(struct task_struct *child, unsigned long data)
683 ret = check_ptrace_options(data);
687 /* Avoid intermediate state when all opts are cleared */
688 flags = child->ptrace;
689 flags &= ~(PTRACE_O_MASK << PT_OPT_FLAG_SHIFT);
690 flags |= (data << PT_OPT_FLAG_SHIFT);
691 child->ptrace = flags;
696 static int ptrace_getsiginfo(struct task_struct *child, kernel_siginfo_t *info)
701 if (lock_task_sighand(child, &flags)) {
703 if (likely(child->last_siginfo != NULL)) {
704 copy_siginfo(info, child->last_siginfo);
707 unlock_task_sighand(child, &flags);
712 static int ptrace_setsiginfo(struct task_struct *child, const kernel_siginfo_t *info)
717 if (lock_task_sighand(child, &flags)) {
719 if (likely(child->last_siginfo != NULL)) {
720 copy_siginfo(child->last_siginfo, info);
723 unlock_task_sighand(child, &flags);
728 static int ptrace_peek_siginfo(struct task_struct *child,
732 struct ptrace_peeksiginfo_args arg;
733 struct sigpending *pending;
737 ret = copy_from_user(&arg, (void __user *) addr,
738 sizeof(struct ptrace_peeksiginfo_args));
742 if (arg.flags & ~PTRACE_PEEKSIGINFO_SHARED)
743 return -EINVAL; /* unknown flags */
748 /* Ensure arg.off fits in an unsigned long */
749 if (arg.off > ULONG_MAX)
752 if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
753 pending = &child->signal->shared_pending;
755 pending = &child->pending;
757 for (i = 0; i < arg.nr; ) {
758 kernel_siginfo_t info;
759 unsigned long off = arg.off + i;
762 spin_lock_irq(&child->sighand->siglock);
763 list_for_each_entry(q, &pending->list, list) {
766 copy_siginfo(&info, &q->info);
770 spin_unlock_irq(&child->sighand->siglock);
772 if (!found) /* beyond the end of the list */
776 if (unlikely(in_compat_syscall())) {
777 compat_siginfo_t __user *uinfo = compat_ptr(data);
779 if (copy_siginfo_to_user32(uinfo, &info)) {
787 siginfo_t __user *uinfo = (siginfo_t __user *) data;
789 if (copy_siginfo_to_user(uinfo, &info)) {
795 data += sizeof(siginfo_t);
798 if (signal_pending(current))
811 static long ptrace_get_rseq_configuration(struct task_struct *task,
812 unsigned long size, void __user *data)
814 struct ptrace_rseq_configuration conf = {
815 .rseq_abi_pointer = (u64)(uintptr_t)task->rseq,
816 .rseq_abi_size = sizeof(*task->rseq),
817 .signature = task->rseq_sig,
821 size = min_t(unsigned long, size, sizeof(conf));
822 if (copy_to_user(data, &conf, size))
828 #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP)
830 #ifdef PTRACE_SINGLEBLOCK
831 #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK)
833 #define is_singleblock(request) 0
837 #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP)
839 #define is_sysemu_singlestep(request) 0
842 static int ptrace_resume(struct task_struct *child, long request,
845 if (!valid_signal(data))
848 if (request == PTRACE_SYSCALL)
849 set_task_syscall_work(child, SYSCALL_TRACE);
851 clear_task_syscall_work(child, SYSCALL_TRACE);
853 #if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU)
854 if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
855 set_task_syscall_work(child, SYSCALL_EMU);
857 clear_task_syscall_work(child, SYSCALL_EMU);
860 if (is_singleblock(request)) {
861 if (unlikely(!arch_has_block_step()))
863 user_enable_block_step(child);
864 } else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
865 if (unlikely(!arch_has_single_step()))
867 user_enable_single_step(child);
869 user_disable_single_step(child);
873 * Change ->exit_code and ->state under siglock to avoid the race
874 * with wait_task_stopped() in between; a non-zero ->exit_code will
875 * wrongly look like another report from tracee.
877 * Note that we need siglock even if ->exit_code == data and/or this
878 * status was not reported yet, the new status must not be cleared by
879 * wait_task_stopped() after resume.
881 spin_lock_irq(&child->sighand->siglock);
882 child->exit_code = data;
883 child->jobctl &= ~JOBCTL_TRACED;
884 wake_up_state(child, __TASK_TRACED);
885 spin_unlock_irq(&child->sighand->siglock);
890 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
892 static const struct user_regset *
893 find_regset(const struct user_regset_view *view, unsigned int type)
895 const struct user_regset *regset;
898 for (n = 0; n < view->n; ++n) {
899 regset = view->regsets + n;
900 if (regset->core_note_type == type)
907 static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
910 const struct user_regset_view *view = task_user_regset_view(task);
911 const struct user_regset *regset = find_regset(view, type);
914 if (!regset || (kiov->iov_len % regset->size) != 0)
917 regset_no = regset - view->regsets;
918 kiov->iov_len = min(kiov->iov_len,
919 (__kernel_size_t) (regset->n * regset->size));
921 if (req == PTRACE_GETREGSET)
922 return copy_regset_to_user(task, view, regset_no, 0,
923 kiov->iov_len, kiov->iov_base);
925 return copy_regset_from_user(task, view, regset_no, 0,
926 kiov->iov_len, kiov->iov_base);
930 * This is declared in linux/regset.h and defined in machine-dependent
931 * code. We put the export here, near the primary machine-neutral use,
932 * to ensure no machine forgets it.
934 EXPORT_SYMBOL_GPL(task_user_regset_view);
937 ptrace_get_syscall_info_entry(struct task_struct *child, struct pt_regs *regs,
938 struct ptrace_syscall_info *info)
940 unsigned long args[ARRAY_SIZE(info->entry.args)];
943 info->op = PTRACE_SYSCALL_INFO_ENTRY;
944 info->entry.nr = syscall_get_nr(child, regs);
945 syscall_get_arguments(child, regs, args);
946 for (i = 0; i < ARRAY_SIZE(args); i++)
947 info->entry.args[i] = args[i];
949 /* args is the last field in struct ptrace_syscall_info.entry */
950 return offsetofend(struct ptrace_syscall_info, entry.args);
954 ptrace_get_syscall_info_seccomp(struct task_struct *child, struct pt_regs *regs,
955 struct ptrace_syscall_info *info)
958 * As struct ptrace_syscall_info.entry is currently a subset
959 * of struct ptrace_syscall_info.seccomp, it makes sense to
960 * initialize that subset using ptrace_get_syscall_info_entry().
961 * This can be reconsidered in the future if these structures
962 * diverge significantly enough.
964 ptrace_get_syscall_info_entry(child, regs, info);
965 info->op = PTRACE_SYSCALL_INFO_SECCOMP;
966 info->seccomp.ret_data = child->ptrace_message;
968 /* ret_data is the last field in struct ptrace_syscall_info.seccomp */
969 return offsetofend(struct ptrace_syscall_info, seccomp.ret_data);
973 ptrace_get_syscall_info_exit(struct task_struct *child, struct pt_regs *regs,
974 struct ptrace_syscall_info *info)
976 info->op = PTRACE_SYSCALL_INFO_EXIT;
977 info->exit.rval = syscall_get_error(child, regs);
978 info->exit.is_error = !!info->exit.rval;
979 if (!info->exit.is_error)
980 info->exit.rval = syscall_get_return_value(child, regs);
982 /* is_error is the last field in struct ptrace_syscall_info.exit */
983 return offsetofend(struct ptrace_syscall_info, exit.is_error);
987 ptrace_get_syscall_info(struct task_struct *child, unsigned long user_size,
990 struct pt_regs *regs = task_pt_regs(child);
991 struct ptrace_syscall_info info = {
992 .op = PTRACE_SYSCALL_INFO_NONE,
993 .arch = syscall_get_arch(child),
994 .instruction_pointer = instruction_pointer(regs),
995 .stack_pointer = user_stack_pointer(regs),
997 unsigned long actual_size = offsetof(struct ptrace_syscall_info, entry);
998 unsigned long write_size;
1001 * This does not need lock_task_sighand() to access
1002 * child->last_siginfo because ptrace_freeze_traced()
1003 * called earlier by ptrace_check_attach() ensures that
1004 * the tracee cannot go away and clear its last_siginfo.
1006 switch (child->last_siginfo ? child->last_siginfo->si_code : 0) {
1007 case SIGTRAP | 0x80:
1008 switch (child->ptrace_message) {
1009 case PTRACE_EVENTMSG_SYSCALL_ENTRY:
1010 actual_size = ptrace_get_syscall_info_entry(child, regs,
1013 case PTRACE_EVENTMSG_SYSCALL_EXIT:
1014 actual_size = ptrace_get_syscall_info_exit(child, regs,
1019 case SIGTRAP | (PTRACE_EVENT_SECCOMP << 8):
1020 actual_size = ptrace_get_syscall_info_seccomp(child, regs,
1025 write_size = min(actual_size, user_size);
1026 return copy_to_user(datavp, &info, write_size) ? -EFAULT : actual_size;
1028 #endif /* CONFIG_HAVE_ARCH_TRACEHOOK */
1030 int ptrace_request(struct task_struct *child, long request,
1031 unsigned long addr, unsigned long data)
1033 bool seized = child->ptrace & PT_SEIZED;
1035 kernel_siginfo_t siginfo, *si;
1036 void __user *datavp = (void __user *) data;
1037 unsigned long __user *datalp = datavp;
1038 unsigned long flags;
1041 case PTRACE_PEEKTEXT:
1042 case PTRACE_PEEKDATA:
1043 return generic_ptrace_peekdata(child, addr, data);
1044 case PTRACE_POKETEXT:
1045 case PTRACE_POKEDATA:
1046 return generic_ptrace_pokedata(child, addr, data);
1048 #ifdef PTRACE_OLDSETOPTIONS
1049 case PTRACE_OLDSETOPTIONS:
1051 case PTRACE_SETOPTIONS:
1052 ret = ptrace_setoptions(child, data);
1054 case PTRACE_GETEVENTMSG:
1055 ret = put_user(child->ptrace_message, datalp);
1058 case PTRACE_PEEKSIGINFO:
1059 ret = ptrace_peek_siginfo(child, addr, data);
1062 case PTRACE_GETSIGINFO:
1063 ret = ptrace_getsiginfo(child, &siginfo);
1065 ret = copy_siginfo_to_user(datavp, &siginfo);
1068 case PTRACE_SETSIGINFO:
1069 ret = copy_siginfo_from_user(&siginfo, datavp);
1071 ret = ptrace_setsiginfo(child, &siginfo);
1074 case PTRACE_GETSIGMASK: {
1077 if (addr != sizeof(sigset_t)) {
1082 if (test_tsk_restore_sigmask(child))
1083 mask = &child->saved_sigmask;
1085 mask = &child->blocked;
1087 if (copy_to_user(datavp, mask, sizeof(sigset_t)))
1095 case PTRACE_SETSIGMASK: {
1098 if (addr != sizeof(sigset_t)) {
1103 if (copy_from_user(&new_set, datavp, sizeof(sigset_t))) {
1108 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
1111 * Every thread does recalc_sigpending() after resume, so
1112 * retarget_shared_pending() and recalc_sigpending() are not
1115 spin_lock_irq(&child->sighand->siglock);
1116 child->blocked = new_set;
1117 spin_unlock_irq(&child->sighand->siglock);
1119 clear_tsk_restore_sigmask(child);
1125 case PTRACE_INTERRUPT:
1127 * Stop tracee without any side-effect on signal or job
1128 * control. At least one trap is guaranteed to happen
1129 * after this request. If @child is already trapped, the
1130 * current trap is not disturbed and another trap will
1131 * happen after the current trap is ended with PTRACE_CONT.
1133 * The actual trap might not be PTRACE_EVENT_STOP trap but
1134 * the pending condition is cleared regardless.
1136 if (unlikely(!seized || !lock_task_sighand(child, &flags)))
1140 * INTERRUPT doesn't disturb existing trap sans one
1141 * exception. If ptracer issued LISTEN for the current
1142 * STOP, this INTERRUPT should clear LISTEN and re-trap
1145 if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
1146 ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
1148 unlock_task_sighand(child, &flags);
1154 * Listen for events. Tracee must be in STOP. It's not
1155 * resumed per-se but is not considered to be in TRACED by
1156 * wait(2) or ptrace(2). If an async event (e.g. group
1157 * stop state change) happens, tracee will enter STOP trap
1158 * again. Alternatively, ptracer can issue INTERRUPT to
1159 * finish listening and re-trap tracee into STOP.
1161 if (unlikely(!seized || !lock_task_sighand(child, &flags)))
1164 si = child->last_siginfo;
1165 if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) {
1166 child->jobctl |= JOBCTL_LISTENING;
1168 * If NOTIFY is set, it means event happened between
1169 * start of this trap and now. Trigger re-trap.
1171 if (child->jobctl & JOBCTL_TRAP_NOTIFY)
1172 ptrace_signal_wake_up(child, true);
1175 unlock_task_sighand(child, &flags);
1178 case PTRACE_DETACH: /* detach a process that was attached. */
1179 ret = ptrace_detach(child, data);
1182 #ifdef CONFIG_BINFMT_ELF_FDPIC
1183 case PTRACE_GETFDPIC: {
1184 struct mm_struct *mm = get_task_mm(child);
1185 unsigned long tmp = 0;
1192 case PTRACE_GETFDPIC_EXEC:
1193 tmp = mm->context.exec_fdpic_loadmap;
1195 case PTRACE_GETFDPIC_INTERP:
1196 tmp = mm->context.interp_fdpic_loadmap;
1203 ret = put_user(tmp, datalp);
1208 case PTRACE_SINGLESTEP:
1209 #ifdef PTRACE_SINGLEBLOCK
1210 case PTRACE_SINGLEBLOCK:
1212 #ifdef PTRACE_SYSEMU
1214 case PTRACE_SYSEMU_SINGLESTEP:
1216 case PTRACE_SYSCALL:
1218 return ptrace_resume(child, request, data);
1221 send_sig_info(SIGKILL, SEND_SIG_NOINFO, child);
1224 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1225 case PTRACE_GETREGSET:
1226 case PTRACE_SETREGSET: {
1228 struct iovec __user *uiov = datavp;
1230 if (!access_ok(uiov, sizeof(*uiov)))
1233 if (__get_user(kiov.iov_base, &uiov->iov_base) ||
1234 __get_user(kiov.iov_len, &uiov->iov_len))
1237 ret = ptrace_regset(child, request, addr, &kiov);
1239 ret = __put_user(kiov.iov_len, &uiov->iov_len);
1243 case PTRACE_GET_SYSCALL_INFO:
1244 ret = ptrace_get_syscall_info(child, addr, datavp);
1248 case PTRACE_SECCOMP_GET_FILTER:
1249 ret = seccomp_get_filter(child, addr, datavp);
1252 case PTRACE_SECCOMP_GET_METADATA:
1253 ret = seccomp_get_metadata(child, addr, datavp);
1257 case PTRACE_GET_RSEQ_CONFIGURATION:
1258 ret = ptrace_get_rseq_configuration(child, addr, datavp);
1269 SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
1270 unsigned long, data)
1272 struct task_struct *child;
1275 if (request == PTRACE_TRACEME) {
1276 ret = ptrace_traceme();
1280 child = find_get_task_by_vpid(pid);
1286 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1287 ret = ptrace_attach(child, request, addr, data);
1288 goto out_put_task_struct;
1291 ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1292 request == PTRACE_INTERRUPT);
1294 goto out_put_task_struct;
1296 ret = arch_ptrace(child, request, addr, data);
1297 if (ret || request != PTRACE_DETACH)
1298 ptrace_unfreeze_traced(child);
1300 out_put_task_struct:
1301 put_task_struct(child);
1306 int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
1312 copied = ptrace_access_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE);
1313 if (copied != sizeof(tmp))
1315 return put_user(tmp, (unsigned long __user *)data);
1318 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
1323 copied = ptrace_access_vm(tsk, addr, &data, sizeof(data),
1324 FOLL_FORCE | FOLL_WRITE);
1325 return (copied == sizeof(data)) ? 0 : -EIO;
1328 #if defined CONFIG_COMPAT
1330 int compat_ptrace_request(struct task_struct *child, compat_long_t request,
1331 compat_ulong_t addr, compat_ulong_t data)
1333 compat_ulong_t __user *datap = compat_ptr(data);
1334 compat_ulong_t word;
1335 kernel_siginfo_t siginfo;
1339 case PTRACE_PEEKTEXT:
1340 case PTRACE_PEEKDATA:
1341 ret = ptrace_access_vm(child, addr, &word, sizeof(word),
1343 if (ret != sizeof(word))
1346 ret = put_user(word, datap);
1349 case PTRACE_POKETEXT:
1350 case PTRACE_POKEDATA:
1351 ret = ptrace_access_vm(child, addr, &data, sizeof(data),
1352 FOLL_FORCE | FOLL_WRITE);
1353 ret = (ret != sizeof(data) ? -EIO : 0);
1356 case PTRACE_GETEVENTMSG:
1357 ret = put_user((compat_ulong_t) child->ptrace_message, datap);
1360 case PTRACE_GETSIGINFO:
1361 ret = ptrace_getsiginfo(child, &siginfo);
1363 ret = copy_siginfo_to_user32(
1364 (struct compat_siginfo __user *) datap,
1368 case PTRACE_SETSIGINFO:
1369 ret = copy_siginfo_from_user32(
1370 &siginfo, (struct compat_siginfo __user *) datap);
1372 ret = ptrace_setsiginfo(child, &siginfo);
1374 #ifdef CONFIG_HAVE_ARCH_TRACEHOOK
1375 case PTRACE_GETREGSET:
1376 case PTRACE_SETREGSET:
1379 struct compat_iovec __user *uiov =
1380 (struct compat_iovec __user *) datap;
1384 if (!access_ok(uiov, sizeof(*uiov)))
1387 if (__get_user(ptr, &uiov->iov_base) ||
1388 __get_user(len, &uiov->iov_len))
1391 kiov.iov_base = compat_ptr(ptr);
1394 ret = ptrace_regset(child, request, addr, &kiov);
1396 ret = __put_user(kiov.iov_len, &uiov->iov_len);
1402 ret = ptrace_request(child, request, addr, data);
1408 COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
1409 compat_long_t, addr, compat_long_t, data)
1411 struct task_struct *child;
1414 if (request == PTRACE_TRACEME) {
1415 ret = ptrace_traceme();
1419 child = find_get_task_by_vpid(pid);
1425 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
1426 ret = ptrace_attach(child, request, addr, data);
1427 goto out_put_task_struct;
1430 ret = ptrace_check_attach(child, request == PTRACE_KILL ||
1431 request == PTRACE_INTERRUPT);
1433 ret = compat_arch_ptrace(child, request, addr, data);
1434 if (ret || request != PTRACE_DETACH)
1435 ptrace_unfreeze_traced(child);
1438 out_put_task_struct:
1439 put_task_struct(child);
1443 #endif /* CONFIG_COMPAT */