1 /* GNU/Linux native-dependent code common to multiple platforms.
3 Copyright (C) 2001-2018 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
28 #include <sys/syscall.h>
29 #include "nat/gdb_ptrace.h"
30 #include "linux-nat.h"
31 #include "nat/linux-ptrace.h"
32 #include "nat/linux-procfs.h"
33 #include "nat/linux-personality.h"
34 #include "linux-fork.h"
35 #include "gdbthread.h"
39 #include "inf-child.h"
40 #include "inf-ptrace.h"
42 #include <sys/procfs.h> /* for elf_gregset etc. */
43 #include "elf-bfd.h" /* for elfcore_write_* */
44 #include "gregset.h" /* for gregset */
45 #include "gdbcore.h" /* for get_exec_file */
46 #include <ctype.h> /* for isdigit */
47 #include <sys/stat.h> /* for struct stat */
48 #include <fcntl.h> /* for O_RDONLY */
50 #include "event-loop.h"
51 #include "event-top.h"
53 #include <sys/types.h>
55 #include "xml-support.h"
58 #include "nat/linux-osdata.h"
59 #include "linux-tdep.h"
62 #include "tracepoint.h"
64 #include "target-descriptions.h"
65 #include "filestuff.h"
67 #include "nat/linux-namespaces.h"
71 #define SPUFS_MAGIC 0x23c9b64e
74 /* This comment documents high-level logic of this file.
76 Waiting for events in sync mode
77 ===============================
79 When waiting for an event in a specific thread, we just use waitpid,
80 passing the specific pid, and not passing WNOHANG.
82 When waiting for an event in all threads, waitpid is not quite good:
84 - If the thread group leader exits while other threads in the thread
85 group still exist, waitpid(TGID, ...) hangs. That waitpid won't
86 return an exit status until the other threads in the group are
89 - When a non-leader thread execs, that thread just vanishes without
90 reporting an exit (so we'd hang if we waited for it explicitly in
91 that case). The exec event is instead reported to the TGID pid.
93 The solution is to always use -1 and WNOHANG, together with
96 First, we use non-blocking waitpid to check for events. If nothing is
97 found, we use sigsuspend to wait for SIGCHLD. When SIGCHLD arrives,
98 it means something happened to a child process. As soon as we know
99 there's an event, we get back to calling nonblocking waitpid.
101 Note that SIGCHLD should be blocked between waitpid and sigsuspend
102 calls, so that we don't miss a signal. If SIGCHLD arrives in between,
103 when it's blocked, the signal becomes pending and sigsuspend
104 immediately notices it and returns.
106 Waiting for events in async mode (TARGET_WNOHANG)
107 =================================================
109 In async mode, GDB should always be ready to handle both user input
110 and target events, so neither blocking waitpid nor sigsuspend are
111 viable options. Instead, we should asynchronously notify the GDB main
112 event loop whenever there's an unprocessed event from the target. We
113 detect asynchronous target events by handling SIGCHLD signals. To
114 notify the event loop about target events, the self-pipe trick is used
115 --- a pipe is registered as waitable event source in the event loop,
116 the event loop select/poll's on the read end of this pipe (as well on
117 other event sources, e.g., stdin), and the SIGCHLD handler writes a
118 byte to this pipe. This is more portable than relying on
119 pselect/ppoll, since on kernels that lack those syscalls, libc
120 emulates them with select/poll+sigprocmask, and that is racy
121 (a.k.a. plain broken).
123 Obviously, if we fail to notify the event loop if there's a target
124 event, it's bad. OTOH, if we notify the event loop when there's no
125 event from the target, linux_nat_wait will detect that there's no real
126 event to report, and return event of type TARGET_WAITKIND_IGNORE.
127 This is mostly harmless, but it will waste time and is better avoided.
129 The main design point is that every time GDB is outside linux-nat.c,
130 we have a SIGCHLD handler installed that is called when something
131 happens to the target and notifies the GDB event loop. Whenever GDB
132 core decides to handle the event, and calls into linux-nat.c, we
133 process things as in sync mode, except that the we never block in
136 While processing an event, we may end up momentarily blocked in
137 waitpid calls. Those waitpid calls, while blocking, are guarantied to
138 return quickly. E.g., in all-stop mode, before reporting to the core
139 that an LWP hit a breakpoint, all LWPs are stopped by sending them
140 SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
141 Note that this is different from blocking indefinitely waiting for the
142 next event --- here, we're already handling an event.
147 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
148 signal is not entirely significant; we just need for a signal to be delivered,
149 so that we can intercept it. SIGSTOP's advantage is that it can not be
150 blocked. A disadvantage is that it is not a real-time signal, so it can only
151 be queued once; we do not keep track of other sources of SIGSTOP.
153 Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
154 use them, because they have special behavior when the signal is generated -
155 not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
156 kills the entire thread group.
158 A delivered SIGSTOP would stop the entire thread group, not just the thread we
159 tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
160 cancel it (by PTRACE_CONT without passing SIGSTOP).
162 We could use a real-time signal instead. This would solve those problems; we
163 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
164 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
165 generates it, and there are races with trying to find a signal that is not
171 The case of a thread group (process) with 3 or more threads, and a
172 thread other than the leader execs is worth detailing:
174 On an exec, the Linux kernel destroys all threads except the execing
175 one in the thread group, and resets the execing thread's tid to the
176 tgid. No exit notification is sent for the execing thread -- from the
177 ptracer's perspective, it appears as though the execing thread just
178 vanishes. Until we reap all other threads except the leader and the
179 execing thread, the leader will be zombie, and the execing thread will
180 be in `D (disc sleep)' state. As soon as all other threads are
181 reaped, the execing thread changes its tid to the tgid, and the
182 previous (zombie) leader vanishes, giving place to the "new"
186 #define O_LARGEFILE 0
189 struct linux_nat_target *linux_target;
191 /* Does the current host support PTRACE_GETREGSET? */
192 enum tribool have_ptrace_getregset = TRIBOOL_UNKNOWN;
194 /* The method to call, if any, when a new thread is attached. */
195 static void (*linux_nat_new_thread) (struct lwp_info *);
197 /* The method to call, if any, when a thread is destroyed. */
198 static void (*linux_nat_delete_thread) (struct arch_lwp_info *);
200 /* The method to call, if any, when a new fork is attached. */
201 static linux_nat_new_fork_ftype *linux_nat_new_fork;
203 /* The method to call, if any, when a process is no longer
205 static linux_nat_forget_process_ftype *linux_nat_forget_process_hook;
207 /* Hook to call prior to resuming a thread. */
208 static void (*linux_nat_prepare_to_resume) (struct lwp_info *);
210 /* The method to call, if any, when the siginfo object needs to be
211 converted between the layout returned by ptrace, and the layout in
212 the architecture of the inferior. */
213 static int (*linux_nat_siginfo_fixup) (siginfo_t *,
217 /* The saved to_close method, inherited from inf-ptrace.c.
218 Called by our to_close. */
219 static void (*super_close) (struct target_ops *);
221 static unsigned int debug_linux_nat;
223 show_debug_linux_nat (struct ui_file *file, int from_tty,
224 struct cmd_list_element *c, const char *value)
226 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
230 struct simple_pid_list
234 struct simple_pid_list *next;
236 struct simple_pid_list *stopped_pids;
238 /* Whether target_thread_events is in effect. */
239 static int report_thread_events;
241 /* Async mode support. */
243 /* The read/write ends of the pipe registered as waitable file in the
245 static int linux_nat_event_pipe[2] = { -1, -1 };
247 /* True if we're currently in async mode. */
248 #define linux_is_async_p() (linux_nat_event_pipe[0] != -1)
250 /* Flush the event pipe. */
253 async_file_flush (void)
260 ret = read (linux_nat_event_pipe[0], &buf, 1);
262 while (ret >= 0 || (ret == -1 && errno == EINTR));
265 /* Put something (anything, doesn't matter what, or how much) in event
266 pipe, so that the select/poll in the event-loop realizes we have
267 something to process. */
270 async_file_mark (void)
274 /* It doesn't really matter what the pipe contains, as long we end
275 up with something in it. Might as well flush the previous
281 ret = write (linux_nat_event_pipe[1], "+", 1);
283 while (ret == -1 && errno == EINTR);
285 /* Ignore EAGAIN. If the pipe is full, the event loop will already
286 be awakened anyway. */
289 static int kill_lwp (int lwpid, int signo);
291 static int stop_callback (struct lwp_info *lp, void *data);
292 static int resume_stopped_resumed_lwps (struct lwp_info *lp, void *data);
294 static void block_child_signals (sigset_t *prev_mask);
295 static void restore_child_signals_mask (sigset_t *prev_mask);
298 static struct lwp_info *add_lwp (ptid_t ptid);
299 static void purge_lwp_list (int pid);
300 static void delete_lwp (ptid_t ptid);
301 static struct lwp_info *find_lwp_pid (ptid_t ptid);
303 static int lwp_status_pending_p (struct lwp_info *lp);
305 static int sigtrap_is_event (int status);
306 static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
308 static void save_stop_reason (struct lwp_info *lp);
313 /* See nat/linux-nat.h. */
316 ptid_of_lwp (struct lwp_info *lwp)
321 /* See nat/linux-nat.h. */
324 lwp_set_arch_private_info (struct lwp_info *lwp,
325 struct arch_lwp_info *info)
327 lwp->arch_private = info;
330 /* See nat/linux-nat.h. */
332 struct arch_lwp_info *
333 lwp_arch_private_info (struct lwp_info *lwp)
335 return lwp->arch_private;
338 /* See nat/linux-nat.h. */
341 lwp_is_stopped (struct lwp_info *lwp)
346 /* See nat/linux-nat.h. */
348 enum target_stop_reason
349 lwp_stop_reason (struct lwp_info *lwp)
351 return lwp->stop_reason;
354 /* See nat/linux-nat.h. */
357 lwp_is_stepping (struct lwp_info *lwp)
363 /* Trivial list manipulation functions to keep track of a list of
364 new stopped processes. */
366 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
368 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
371 new_pid->status = status;
372 new_pid->next = *listp;
377 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
379 struct simple_pid_list **p;
381 for (p = listp; *p != NULL; p = &(*p)->next)
382 if ((*p)->pid == pid)
384 struct simple_pid_list *next = (*p)->next;
386 *statusp = (*p)->status;
394 /* Return the ptrace options that we want to try to enable. */
397 linux_nat_ptrace_options (int attached)
402 options |= PTRACE_O_EXITKILL;
404 options |= (PTRACE_O_TRACESYSGOOD
405 | PTRACE_O_TRACEVFORKDONE
406 | PTRACE_O_TRACEVFORK
408 | PTRACE_O_TRACEEXEC);
413 /* Initialize ptrace warnings and check for supported ptrace
416 ATTACHED should be nonzero iff we attached to the inferior. */
419 linux_init_ptrace (pid_t pid, int attached)
421 int options = linux_nat_ptrace_options (attached);
423 linux_enable_event_reporting (pid, options);
424 linux_ptrace_init_warnings ();
427 linux_nat_target::~linux_nat_target ()
431 linux_nat_target::post_attach (int pid)
433 linux_init_ptrace (pid, 1);
437 linux_nat_target::post_startup_inferior (ptid_t ptid)
439 linux_init_ptrace (ptid_get_pid (ptid), 0);
442 /* Return the number of known LWPs in the tgid given by PID. */
450 for (lp = lwp_list; lp; lp = lp->next)
451 if (ptid_get_pid (lp->ptid) == pid)
457 /* Call delete_lwp with prototype compatible for make_cleanup. */
460 delete_lwp_cleanup (void *lp_voidp)
462 struct lwp_info *lp = (struct lwp_info *) lp_voidp;
464 delete_lwp (lp->ptid);
467 /* Target hook for follow_fork. On entry inferior_ptid must be the
468 ptid of the followed inferior. At return, inferior_ptid will be
472 linux_nat_target::follow_fork (int follow_child, int detach_fork)
476 struct lwp_info *child_lp = NULL;
477 int status = W_STOPCODE (0);
479 ptid_t parent_ptid, child_ptid;
480 int parent_pid, child_pid;
482 has_vforked = (inferior_thread ()->pending_follow.kind
483 == TARGET_WAITKIND_VFORKED);
484 parent_ptid = inferior_ptid;
485 child_ptid = inferior_thread ()->pending_follow.value.related_pid;
486 parent_pid = ptid_get_lwp (parent_ptid);
487 child_pid = ptid_get_lwp (child_ptid);
489 /* We're already attached to the parent, by default. */
490 child_lp = add_lwp (child_ptid);
491 child_lp->stopped = 1;
492 child_lp->last_resume_kind = resume_stop;
494 /* Detach new forked process? */
497 struct cleanup *old_chain = make_cleanup (delete_lwp_cleanup,
500 if (linux_nat_prepare_to_resume != NULL)
501 linux_nat_prepare_to_resume (child_lp);
503 /* When debugging an inferior in an architecture that supports
504 hardware single stepping on a kernel without commit
505 6580807da14c423f0d0a708108e6df6ebc8bc83d, the vfork child
506 process starts with the TIF_SINGLESTEP/X86_EFLAGS_TF bits
507 set if the parent process had them set.
508 To work around this, single step the child process
509 once before detaching to clear the flags. */
511 /* Note that we consult the parent's architecture instead of
512 the child's because there's no inferior for the child at
514 if (!gdbarch_software_single_step_p (target_thread_architecture
517 linux_disable_event_reporting (child_pid);
518 if (ptrace (PTRACE_SINGLESTEP, child_pid, 0, 0) < 0)
519 perror_with_name (_("Couldn't do single step"));
520 if (my_waitpid (child_pid, &status, 0) < 0)
521 perror_with_name (_("Couldn't wait vfork process"));
524 if (WIFSTOPPED (status))
528 signo = WSTOPSIG (status);
530 && !signal_pass_state (gdb_signal_from_host (signo)))
532 ptrace (PTRACE_DETACH, child_pid, 0, signo);
535 do_cleanups (old_chain);
539 scoped_restore save_inferior_ptid
540 = make_scoped_restore (&inferior_ptid);
541 inferior_ptid = child_ptid;
543 /* Let the thread_db layer learn about this new process. */
544 check_for_thread_db ();
549 struct lwp_info *parent_lp;
551 parent_lp = find_lwp_pid (parent_ptid);
552 gdb_assert (linux_supports_tracefork () >= 0);
554 if (linux_supports_tracevforkdone ())
557 fprintf_unfiltered (gdb_stdlog,
558 "LCFF: waiting for VFORK_DONE on %d\n",
560 parent_lp->stopped = 1;
562 /* We'll handle the VFORK_DONE event like any other
563 event, in target_wait. */
567 /* We can't insert breakpoints until the child has
568 finished with the shared memory region. We need to
569 wait until that happens. Ideal would be to just
571 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
572 - waitpid (parent_pid, &status, __WALL);
573 However, most architectures can't handle a syscall
574 being traced on the way out if it wasn't traced on
577 We might also think to loop, continuing the child
578 until it exits or gets a SIGTRAP. One problem is
579 that the child might call ptrace with PTRACE_TRACEME.
581 There's no simple and reliable way to figure out when
582 the vforked child will be done with its copy of the
583 shared memory. We could step it out of the syscall,
584 two instructions, let it go, and then single-step the
585 parent once. When we have hardware single-step, this
586 would work; with software single-step it could still
587 be made to work but we'd have to be able to insert
588 single-step breakpoints in the child, and we'd have
589 to insert -just- the single-step breakpoint in the
590 parent. Very awkward.
592 In the end, the best we can do is to make sure it
593 runs for a little while. Hopefully it will be out of
594 range of any breakpoints we reinsert. Usually this
595 is only the single-step breakpoint at vfork's return
599 fprintf_unfiltered (gdb_stdlog,
600 "LCFF: no VFORK_DONE "
601 "support, sleeping a bit\n");
605 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
606 and leave it pending. The next linux_nat_resume call
607 will notice a pending event, and bypasses actually
608 resuming the inferior. */
609 parent_lp->status = 0;
610 parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
611 parent_lp->stopped = 1;
613 /* If we're in async mode, need to tell the event loop
614 there's something here to process. */
615 if (target_is_async_p ())
622 struct lwp_info *child_lp;
624 child_lp = add_lwp (inferior_ptid);
625 child_lp->stopped = 1;
626 child_lp->last_resume_kind = resume_stop;
628 /* Let the thread_db layer learn about this new process. */
629 check_for_thread_db ();
637 linux_nat_target::insert_fork_catchpoint (int pid)
639 return !linux_supports_tracefork ();
643 linux_nat_target::remove_fork_catchpoint (int pid)
649 linux_nat_target::insert_vfork_catchpoint (int pid)
651 return !linux_supports_tracefork ();
655 linux_nat_target::remove_vfork_catchpoint (int pid)
661 linux_nat_target::insert_exec_catchpoint (int pid)
663 return !linux_supports_tracefork ();
667 linux_nat_target::remove_exec_catchpoint (int pid)
673 linux_nat_target::set_syscall_catchpoint (int pid, bool needed, int any_count,
674 gdb::array_view<const int> syscall_counts)
676 if (!linux_supports_tracesysgood ())
679 /* On GNU/Linux, we ignore the arguments. It means that we only
680 enable the syscall catchpoints, but do not disable them.
682 Also, we do not use the `syscall_counts' information because we do not
683 filter system calls here. We let GDB do the logic for us. */
687 /* List of known LWPs, keyed by LWP PID. This speeds up the common
688 case of mapping a PID returned from the kernel to our corresponding
689 lwp_info data structure. */
690 static htab_t lwp_lwpid_htab;
692 /* Calculate a hash from a lwp_info's LWP PID. */
695 lwp_info_hash (const void *ap)
697 const struct lwp_info *lp = (struct lwp_info *) ap;
698 pid_t pid = ptid_get_lwp (lp->ptid);
700 return iterative_hash_object (pid, 0);
703 /* Equality function for the lwp_info hash table. Compares the LWP's
707 lwp_lwpid_htab_eq (const void *a, const void *b)
709 const struct lwp_info *entry = (const struct lwp_info *) a;
710 const struct lwp_info *element = (const struct lwp_info *) b;
712 return ptid_get_lwp (entry->ptid) == ptid_get_lwp (element->ptid);
715 /* Create the lwp_lwpid_htab hash table. */
718 lwp_lwpid_htab_create (void)
720 lwp_lwpid_htab = htab_create (100, lwp_info_hash, lwp_lwpid_htab_eq, NULL);
723 /* Add LP to the hash table. */
726 lwp_lwpid_htab_add_lwp (struct lwp_info *lp)
730 slot = htab_find_slot (lwp_lwpid_htab, lp, INSERT);
731 gdb_assert (slot != NULL && *slot == NULL);
735 /* Head of doubly-linked list of known LWPs. Sorted by reverse
736 creation order. This order is assumed in some cases. E.g.,
737 reaping status after killing alls lwps of a process: the leader LWP
738 must be reaped last. */
739 struct lwp_info *lwp_list;
741 /* Add LP to sorted-by-reverse-creation-order doubly-linked list. */
744 lwp_list_add (struct lwp_info *lp)
747 if (lwp_list != NULL)
752 /* Remove LP from sorted-by-reverse-creation-order doubly-linked
756 lwp_list_remove (struct lwp_info *lp)
758 /* Remove from sorted-by-creation-order list. */
759 if (lp->next != NULL)
760 lp->next->prev = lp->prev;
761 if (lp->prev != NULL)
762 lp->prev->next = lp->next;
769 /* Original signal mask. */
770 static sigset_t normal_mask;
772 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
773 _initialize_linux_nat. */
774 static sigset_t suspend_mask;
776 /* Signals to block to make that sigsuspend work. */
777 static sigset_t blocked_mask;
779 /* SIGCHLD action. */
780 struct sigaction sigchld_action;
782 /* Block child signals (SIGCHLD and linux threads signals), and store
783 the previous mask in PREV_MASK. */
786 block_child_signals (sigset_t *prev_mask)
788 /* Make sure SIGCHLD is blocked. */
789 if (!sigismember (&blocked_mask, SIGCHLD))
790 sigaddset (&blocked_mask, SIGCHLD);
792 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
795 /* Restore child signals mask, previously returned by
796 block_child_signals. */
799 restore_child_signals_mask (sigset_t *prev_mask)
801 sigprocmask (SIG_SETMASK, prev_mask, NULL);
804 /* Mask of signals to pass directly to the inferior. */
805 static sigset_t pass_mask;
807 /* Update signals to pass to the inferior. */
809 linux_nat_target::pass_signals (int numsigs, unsigned char *pass_signals)
813 sigemptyset (&pass_mask);
815 for (signo = 1; signo < NSIG; signo++)
817 int target_signo = gdb_signal_from_host (signo);
818 if (target_signo < numsigs && pass_signals[target_signo])
819 sigaddset (&pass_mask, signo);
825 /* Prototypes for local functions. */
826 static int stop_wait_callback (struct lwp_info *lp, void *data);
827 static int resume_stopped_resumed_lwps (struct lwp_info *lp, void *data);
828 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
832 /* Destroy and free LP. */
835 lwp_free (struct lwp_info *lp)
837 /* Let the arch specific bits release arch_lwp_info. */
838 if (linux_nat_delete_thread != NULL)
839 linux_nat_delete_thread (lp->arch_private);
841 gdb_assert (lp->arch_private == NULL);
846 /* Traversal function for purge_lwp_list. */
849 lwp_lwpid_htab_remove_pid (void **slot, void *info)
851 struct lwp_info *lp = (struct lwp_info *) *slot;
852 int pid = *(int *) info;
854 if (ptid_get_pid (lp->ptid) == pid)
856 htab_clear_slot (lwp_lwpid_htab, slot);
857 lwp_list_remove (lp);
864 /* Remove all LWPs belong to PID from the lwp list. */
867 purge_lwp_list (int pid)
869 htab_traverse_noresize (lwp_lwpid_htab, lwp_lwpid_htab_remove_pid, &pid);
872 /* Add the LWP specified by PTID to the list. PTID is the first LWP
873 in the process. Return a pointer to the structure describing the
876 This differs from add_lwp in that we don't let the arch specific
877 bits know about this new thread. Current clients of this callback
878 take the opportunity to install watchpoints in the new thread, and
879 we shouldn't do that for the first thread. If we're spawning a
880 child ("run"), the thread executes the shell wrapper first, and we
881 shouldn't touch it until it execs the program we want to debug.
882 For "attach", it'd be okay to call the callback, but it's not
883 necessary, because watchpoints can't yet have been inserted into
886 static struct lwp_info *
887 add_initial_lwp (ptid_t ptid)
891 gdb_assert (ptid_lwp_p (ptid));
893 lp = XNEW (struct lwp_info);
895 memset (lp, 0, sizeof (struct lwp_info));
897 lp->last_resume_kind = resume_continue;
898 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
903 /* Add to sorted-by-reverse-creation-order list. */
906 /* Add to keyed-by-pid htab. */
907 lwp_lwpid_htab_add_lwp (lp);
912 /* Add the LWP specified by PID to the list. Return a pointer to the
913 structure describing the new LWP. The LWP should already be
916 static struct lwp_info *
917 add_lwp (ptid_t ptid)
921 lp = add_initial_lwp (ptid);
923 /* Let the arch specific bits know about this new thread. Current
924 clients of this callback take the opportunity to install
925 watchpoints in the new thread. We don't do this for the first
926 thread though. See add_initial_lwp. */
927 if (linux_nat_new_thread != NULL)
928 linux_nat_new_thread (lp);
933 /* Remove the LWP specified by PID from the list. */
936 delete_lwp (ptid_t ptid)
940 struct lwp_info dummy;
943 slot = htab_find_slot (lwp_lwpid_htab, &dummy, NO_INSERT);
947 lp = *(struct lwp_info **) slot;
948 gdb_assert (lp != NULL);
950 htab_clear_slot (lwp_lwpid_htab, slot);
952 /* Remove from sorted-by-creation-order list. */
953 lwp_list_remove (lp);
959 /* Return a pointer to the structure describing the LWP corresponding
960 to PID. If no corresponding LWP could be found, return NULL. */
962 static struct lwp_info *
963 find_lwp_pid (ptid_t ptid)
967 struct lwp_info dummy;
969 if (ptid_lwp_p (ptid))
970 lwp = ptid_get_lwp (ptid);
972 lwp = ptid_get_pid (ptid);
974 dummy.ptid = ptid_build (0, lwp, 0);
975 lp = (struct lwp_info *) htab_find (lwp_lwpid_htab, &dummy);
979 /* See nat/linux-nat.h. */
982 iterate_over_lwps (ptid_t filter,
983 iterate_over_lwps_ftype callback,
986 struct lwp_info *lp, *lpnext;
988 for (lp = lwp_list; lp; lp = lpnext)
992 if (ptid_match (lp->ptid, filter))
994 if ((*callback) (lp, data) != 0)
1002 /* Update our internal state when changing from one checkpoint to
1003 another indicated by NEW_PTID. We can only switch single-threaded
1004 applications, so we only create one new LWP, and the previous list
1008 linux_nat_switch_fork (ptid_t new_ptid)
1010 struct lwp_info *lp;
1012 purge_lwp_list (ptid_get_pid (inferior_ptid));
1014 lp = add_lwp (new_ptid);
1017 /* This changes the thread's ptid while preserving the gdb thread
1018 num. Also changes the inferior pid, while preserving the
1020 thread_change_ptid (inferior_ptid, new_ptid);
1022 /* We've just told GDB core that the thread changed target id, but,
1023 in fact, it really is a different thread, with different register
1025 registers_changed ();
1028 /* Handle the exit of a single thread LP. */
1031 exit_lwp (struct lwp_info *lp)
1033 struct thread_info *th = find_thread_ptid (lp->ptid);
1037 if (print_thread_events)
1038 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1040 delete_thread (lp->ptid);
1043 delete_lwp (lp->ptid);
1046 /* Wait for the LWP specified by LP, which we have just attached to.
1047 Returns a wait status for that LWP, to cache. */
1050 linux_nat_post_attach_wait (ptid_t ptid, int *signalled)
1052 pid_t new_pid, pid = ptid_get_lwp (ptid);
1055 if (linux_proc_pid_is_stopped (pid))
1057 if (debug_linux_nat)
1058 fprintf_unfiltered (gdb_stdlog,
1059 "LNPAW: Attaching to a stopped process\n");
1061 /* The process is definitely stopped. It is in a job control
1062 stop, unless the kernel predates the TASK_STOPPED /
1063 TASK_TRACED distinction, in which case it might be in a
1064 ptrace stop. Make sure it is in a ptrace stop; from there we
1065 can kill it, signal it, et cetera.
1067 First make sure there is a pending SIGSTOP. Since we are
1068 already attached, the process can not transition from stopped
1069 to running without a PTRACE_CONT; so we know this signal will
1070 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1071 probably already in the queue (unless this kernel is old
1072 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1073 is not an RT signal, it can only be queued once. */
1074 kill_lwp (pid, SIGSTOP);
1076 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1077 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1078 ptrace (PTRACE_CONT, pid, 0, 0);
1081 /* Make sure the initial process is stopped. The user-level threads
1082 layer might want to poke around in the inferior, and that won't
1083 work if things haven't stabilized yet. */
1084 new_pid = my_waitpid (pid, &status, __WALL);
1085 gdb_assert (pid == new_pid);
1087 if (!WIFSTOPPED (status))
1089 /* The pid we tried to attach has apparently just exited. */
1090 if (debug_linux_nat)
1091 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
1092 pid, status_to_str (status));
1096 if (WSTOPSIG (status) != SIGSTOP)
1099 if (debug_linux_nat)
1100 fprintf_unfiltered (gdb_stdlog,
1101 "LNPAW: Received %s after attaching\n",
1102 status_to_str (status));
1109 linux_nat_target::create_inferior (const char *exec_file,
1110 const std::string &allargs,
1111 char **env, int from_tty)
1113 maybe_disable_address_space_randomization restore_personality
1114 (disable_randomization);
1116 /* The fork_child mechanism is synchronous and calls target_wait, so
1117 we have to mask the async mode. */
1119 /* Make sure we report all signals during startup. */
1120 pass_signals (0, NULL);
1122 inf_ptrace_target::create_inferior (exec_file, allargs, env, from_tty);
1125 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1126 already attached. Returns true if a new LWP is found, false
1130 attach_proc_task_lwp_callback (ptid_t ptid)
1132 struct lwp_info *lp;
1134 /* Ignore LWPs we're already attached to. */
1135 lp = find_lwp_pid (ptid);
1138 int lwpid = ptid_get_lwp (ptid);
1140 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1144 /* Be quiet if we simply raced with the thread exiting.
1145 EPERM is returned if the thread's task still exists, and
1146 is marked as exited or zombie, as well as other
1147 conditions, so in that case, confirm the status in
1148 /proc/PID/status. */
1150 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1152 if (debug_linux_nat)
1154 fprintf_unfiltered (gdb_stdlog,
1155 "Cannot attach to lwp %d: "
1156 "thread is gone (%d: %s)\n",
1157 lwpid, err, safe_strerror (err));
1163 = linux_ptrace_attach_fail_reason_string (ptid, err);
1165 warning (_("Cannot attach to lwp %d: %s"),
1166 lwpid, reason.c_str ());
1171 if (debug_linux_nat)
1172 fprintf_unfiltered (gdb_stdlog,
1173 "PTRACE_ATTACH %s, 0, 0 (OK)\n",
1174 target_pid_to_str (ptid));
1176 lp = add_lwp (ptid);
1178 /* The next time we wait for this LWP we'll see a SIGSTOP as
1179 PTRACE_ATTACH brings it to a halt. */
1182 /* We need to wait for a stop before being able to make the
1183 next ptrace call on this LWP. */
1184 lp->must_set_ptrace_flags = 1;
1186 /* So that wait collects the SIGSTOP. */
1189 /* Also add the LWP to gdb's thread list, in case a
1190 matching libthread_db is not found (or the process uses
1192 add_thread (lp->ptid);
1193 set_running (lp->ptid, 1);
1194 set_executing (lp->ptid, 1);
1203 linux_nat_target::attach (const char *args, int from_tty)
1205 struct lwp_info *lp;
1209 /* Make sure we report all signals during attach. */
1210 pass_signals (0, NULL);
1214 inf_ptrace_target::attach (args, from_tty);
1216 CATCH (ex, RETURN_MASK_ERROR)
1218 pid_t pid = parse_pid_to_attach (args);
1219 std::string reason = linux_ptrace_attach_fail_reason (pid);
1221 if (!reason.empty ())
1222 throw_error (ex.error, "warning: %s\n%s", reason.c_str (), ex.message);
1224 throw_error (ex.error, "%s", ex.message);
1228 /* The ptrace base target adds the main thread with (pid,0,0)
1229 format. Decorate it with lwp info. */
1230 ptid = ptid_build (ptid_get_pid (inferior_ptid),
1231 ptid_get_pid (inferior_ptid),
1233 thread_change_ptid (inferior_ptid, ptid);
1235 /* Add the initial process as the first LWP to the list. */
1236 lp = add_initial_lwp (ptid);
1238 status = linux_nat_post_attach_wait (lp->ptid, &lp->signalled);
1239 if (!WIFSTOPPED (status))
1241 if (WIFEXITED (status))
1243 int exit_code = WEXITSTATUS (status);
1245 target_terminal::ours ();
1246 target_mourn_inferior (inferior_ptid);
1248 error (_("Unable to attach: program exited normally."));
1250 error (_("Unable to attach: program exited with code %d."),
1253 else if (WIFSIGNALED (status))
1255 enum gdb_signal signo;
1257 target_terminal::ours ();
1258 target_mourn_inferior (inferior_ptid);
1260 signo = gdb_signal_from_host (WTERMSIG (status));
1261 error (_("Unable to attach: program terminated with signal "
1263 gdb_signal_to_name (signo),
1264 gdb_signal_to_string (signo));
1267 internal_error (__FILE__, __LINE__,
1268 _("unexpected status %d for PID %ld"),
1269 status, (long) ptid_get_lwp (ptid));
1274 /* Save the wait status to report later. */
1276 if (debug_linux_nat)
1277 fprintf_unfiltered (gdb_stdlog,
1278 "LNA: waitpid %ld, saving status %s\n",
1279 (long) ptid_get_pid (lp->ptid), status_to_str (status));
1281 lp->status = status;
1283 /* We must attach to every LWP. If /proc is mounted, use that to
1284 find them now. The inferior may be using raw clone instead of
1285 using pthreads. But even if it is using pthreads, thread_db
1286 walks structures in the inferior's address space to find the list
1287 of threads/LWPs, and those structures may well be corrupted.
1288 Note that once thread_db is loaded, we'll still use it to list
1289 threads and associate pthread info with each LWP. */
1290 linux_proc_attach_tgid_threads (ptid_get_pid (lp->ptid),
1291 attach_proc_task_lwp_callback);
1293 if (target_can_async_p ())
1297 /* Get pending signal of THREAD as a host signal number, for detaching
1298 purposes. This is the signal the thread last stopped for, which we
1299 need to deliver to the thread when detaching, otherwise, it'd be
1303 get_detach_signal (struct lwp_info *lp)
1305 enum gdb_signal signo = GDB_SIGNAL_0;
1307 /* If we paused threads momentarily, we may have stored pending
1308 events in lp->status or lp->waitstatus (see stop_wait_callback),
1309 and GDB core hasn't seen any signal for those threads.
1310 Otherwise, the last signal reported to the core is found in the
1311 thread object's stop_signal.
1313 There's a corner case that isn't handled here at present. Only
1314 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1315 stop_signal make sense as a real signal to pass to the inferior.
1316 Some catchpoint related events, like
1317 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1318 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1319 those traps are debug API (ptrace in our case) related and
1320 induced; the inferior wouldn't see them if it wasn't being
1321 traced. Hence, we should never pass them to the inferior, even
1322 when set to pass state. Since this corner case isn't handled by
1323 infrun.c when proceeding with a signal, for consistency, neither
1324 do we handle it here (or elsewhere in the file we check for
1325 signal pass state). Normally SIGTRAP isn't set to pass state, so
1326 this is really a corner case. */
1328 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1329 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
1330 else if (lp->status)
1331 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
1332 else if (target_is_non_stop_p () && !is_executing (lp->ptid))
1334 struct thread_info *tp = find_thread_ptid (lp->ptid);
1336 if (tp->suspend.waitstatus_pending_p)
1337 signo = tp->suspend.waitstatus.value.sig;
1339 signo = tp->suspend.stop_signal;
1341 else if (!target_is_non_stop_p ())
1343 struct target_waitstatus last;
1346 get_last_target_status (&last_ptid, &last);
1348 if (ptid_get_lwp (lp->ptid) == ptid_get_lwp (last_ptid))
1350 struct thread_info *tp = find_thread_ptid (lp->ptid);
1352 signo = tp->suspend.stop_signal;
1356 if (signo == GDB_SIGNAL_0)
1358 if (debug_linux_nat)
1359 fprintf_unfiltered (gdb_stdlog,
1360 "GPT: lwp %s has no pending signal\n",
1361 target_pid_to_str (lp->ptid));
1363 else if (!signal_pass_state (signo))
1365 if (debug_linux_nat)
1366 fprintf_unfiltered (gdb_stdlog,
1367 "GPT: lwp %s had signal %s, "
1368 "but it is in no pass state\n",
1369 target_pid_to_str (lp->ptid),
1370 gdb_signal_to_string (signo));
1374 if (debug_linux_nat)
1375 fprintf_unfiltered (gdb_stdlog,
1376 "GPT: lwp %s has pending signal %s\n",
1377 target_pid_to_str (lp->ptid),
1378 gdb_signal_to_string (signo));
1380 return gdb_signal_to_host (signo);
1386 /* Detach from LP. If SIGNO_P is non-NULL, then it points to the
1387 signal number that should be passed to the LWP when detaching.
1388 Otherwise pass any pending signal the LWP may have, if any. */
1391 detach_one_lwp (struct lwp_info *lp, int *signo_p)
1393 int lwpid = ptid_get_lwp (lp->ptid);
1396 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1398 if (debug_linux_nat && lp->status)
1399 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1400 strsignal (WSTOPSIG (lp->status)),
1401 target_pid_to_str (lp->ptid));
1403 /* If there is a pending SIGSTOP, get rid of it. */
1406 if (debug_linux_nat)
1407 fprintf_unfiltered (gdb_stdlog,
1408 "DC: Sending SIGCONT to %s\n",
1409 target_pid_to_str (lp->ptid));
1411 kill_lwp (lwpid, SIGCONT);
1415 if (signo_p == NULL)
1417 /* Pass on any pending signal for this LWP. */
1418 signo = get_detach_signal (lp);
1423 /* Preparing to resume may try to write registers, and fail if the
1424 lwp is zombie. If that happens, ignore the error. We'll handle
1425 it below, when detach fails with ESRCH. */
1428 if (linux_nat_prepare_to_resume != NULL)
1429 linux_nat_prepare_to_resume (lp);
1431 CATCH (ex, RETURN_MASK_ERROR)
1433 if (!check_ptrace_stopped_lwp_gone (lp))
1434 throw_exception (ex);
1438 if (ptrace (PTRACE_DETACH, lwpid, 0, signo) < 0)
1440 int save_errno = errno;
1442 /* We know the thread exists, so ESRCH must mean the lwp is
1443 zombie. This can happen if one of the already-detached
1444 threads exits the whole thread group. In that case we're
1445 still attached, and must reap the lwp. */
1446 if (save_errno == ESRCH)
1450 ret = my_waitpid (lwpid, &status, __WALL);
1453 warning (_("Couldn't reap LWP %d while detaching: %s"),
1454 lwpid, strerror (errno));
1456 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1458 warning (_("Reaping LWP %d while detaching "
1459 "returned unexpected status 0x%x"),
1465 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
1466 safe_strerror (save_errno));
1469 else if (debug_linux_nat)
1471 fprintf_unfiltered (gdb_stdlog,
1472 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1473 target_pid_to_str (lp->ptid),
1477 delete_lwp (lp->ptid);
1481 detach_callback (struct lwp_info *lp, void *data)
1483 /* We don't actually detach from the thread group leader just yet.
1484 If the thread group exits, we must reap the zombie clone lwps
1485 before we're able to reap the leader. */
1486 if (ptid_get_lwp (lp->ptid) != ptid_get_pid (lp->ptid))
1487 detach_one_lwp (lp, NULL);
1492 linux_nat_target::detach (inferior *inf, int from_tty)
1494 struct lwp_info *main_lwp;
1497 /* Don't unregister from the event loop, as there may be other
1498 inferiors running. */
1500 /* Stop all threads before detaching. ptrace requires that the
1501 thread is stopped to sucessfully detach. */
1502 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
1503 /* ... and wait until all of them have reported back that
1504 they're no longer running. */
1505 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
1507 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
1509 /* Only the initial process should be left right now. */
1510 gdb_assert (num_lwps (pid) == 1);
1512 main_lwp = find_lwp_pid (pid_to_ptid (pid));
1514 if (forks_exist_p ())
1516 /* Multi-fork case. The current inferior_ptid is being detached
1517 from, but there are other viable forks to debug. Detach from
1518 the current fork, and context-switch to the first
1520 linux_fork_detach (from_tty);
1524 target_announce_detach (from_tty);
1526 /* Pass on any pending signal for the last LWP. */
1527 int signo = get_detach_signal (main_lwp);
1529 detach_one_lwp (main_lwp, &signo);
1531 detach_success (inf);
1535 /* Resume execution of the inferior process. If STEP is nonzero,
1536 single-step it. If SIGNAL is nonzero, give it that signal. */
1539 linux_resume_one_lwp_throw (struct lwp_info *lp, int step,
1540 enum gdb_signal signo)
1544 /* stop_pc doubles as the PC the LWP had when it was last resumed.
1545 We only presently need that if the LWP is stepped though (to
1546 handle the case of stepping a breakpoint instruction). */
1549 struct regcache *regcache = get_thread_regcache (lp->ptid);
1551 lp->stop_pc = regcache_read_pc (regcache);
1556 if (linux_nat_prepare_to_resume != NULL)
1557 linux_nat_prepare_to_resume (lp);
1558 linux_target->low_resume (lp->ptid, step, signo);
1560 /* Successfully resumed. Clear state that no longer makes sense,
1561 and mark the LWP as running. Must not do this before resuming
1562 otherwise if that fails other code will be confused. E.g., we'd
1563 later try to stop the LWP and hang forever waiting for a stop
1564 status. Note that we must not throw after this is cleared,
1565 otherwise handle_zombie_lwp_error would get confused. */
1568 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
1569 registers_changed_ptid (lp->ptid);
1572 /* Called when we try to resume a stopped LWP and that errors out. If
1573 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
1574 or about to become), discard the error, clear any pending status
1575 the LWP may have, and return true (we'll collect the exit status
1576 soon enough). Otherwise, return false. */
1579 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
1581 /* If we get an error after resuming the LWP successfully, we'd
1582 confuse !T state for the LWP being gone. */
1583 gdb_assert (lp->stopped);
1585 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
1586 because even if ptrace failed with ESRCH, the tracee may be "not
1587 yet fully dead", but already refusing ptrace requests. In that
1588 case the tracee has 'R (Running)' state for a little bit
1589 (observed in Linux 3.18). See also the note on ESRCH in the
1590 ptrace(2) man page. Instead, check whether the LWP has any state
1591 other than ptrace-stopped. */
1593 /* Don't assume anything if /proc/PID/status can't be read. */
1594 if (linux_proc_pid_is_trace_stopped_nowarn (ptid_get_lwp (lp->ptid)) == 0)
1596 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
1598 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1604 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
1605 disappears while we try to resume it. */
1608 linux_resume_one_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1612 linux_resume_one_lwp_throw (lp, step, signo);
1614 CATCH (ex, RETURN_MASK_ERROR)
1616 if (!check_ptrace_stopped_lwp_gone (lp))
1617 throw_exception (ex);
1625 resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1629 struct inferior *inf = find_inferior_ptid (lp->ptid);
1631 if (inf->vfork_child != NULL)
1633 if (debug_linux_nat)
1634 fprintf_unfiltered (gdb_stdlog,
1635 "RC: Not resuming %s (vfork parent)\n",
1636 target_pid_to_str (lp->ptid));
1638 else if (!lwp_status_pending_p (lp))
1640 if (debug_linux_nat)
1641 fprintf_unfiltered (gdb_stdlog,
1642 "RC: Resuming sibling %s, %s, %s\n",
1643 target_pid_to_str (lp->ptid),
1644 (signo != GDB_SIGNAL_0
1645 ? strsignal (gdb_signal_to_host (signo))
1647 step ? "step" : "resume");
1649 linux_resume_one_lwp (lp, step, signo);
1653 if (debug_linux_nat)
1654 fprintf_unfiltered (gdb_stdlog,
1655 "RC: Not resuming sibling %s (has pending)\n",
1656 target_pid_to_str (lp->ptid));
1661 if (debug_linux_nat)
1662 fprintf_unfiltered (gdb_stdlog,
1663 "RC: Not resuming sibling %s (not stopped)\n",
1664 target_pid_to_str (lp->ptid));
1668 /* Callback for iterate_over_lwps. If LWP is EXCEPT, do nothing.
1669 Resume LWP with the last stop signal, if it is in pass state. */
1672 linux_nat_resume_callback (struct lwp_info *lp, void *except)
1674 enum gdb_signal signo = GDB_SIGNAL_0;
1681 struct thread_info *thread;
1683 thread = find_thread_ptid (lp->ptid);
1686 signo = thread->suspend.stop_signal;
1687 thread->suspend.stop_signal = GDB_SIGNAL_0;
1691 resume_lwp (lp, 0, signo);
1696 resume_clear_callback (struct lwp_info *lp, void *data)
1699 lp->last_resume_kind = resume_stop;
1704 resume_set_callback (struct lwp_info *lp, void *data)
1707 lp->last_resume_kind = resume_continue;
1712 linux_nat_target::resume (ptid_t ptid, int step, enum gdb_signal signo)
1714 struct lwp_info *lp;
1717 if (debug_linux_nat)
1718 fprintf_unfiltered (gdb_stdlog,
1719 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1720 step ? "step" : "resume",
1721 target_pid_to_str (ptid),
1722 (signo != GDB_SIGNAL_0
1723 ? strsignal (gdb_signal_to_host (signo)) : "0"),
1724 target_pid_to_str (inferior_ptid));
1726 /* A specific PTID means `step only this process id'. */
1727 resume_many = (ptid_equal (minus_one_ptid, ptid)
1728 || ptid_is_pid (ptid));
1730 /* Mark the lwps we're resuming as resumed. */
1731 iterate_over_lwps (ptid, resume_set_callback, NULL);
1733 /* See if it's the current inferior that should be handled
1736 lp = find_lwp_pid (inferior_ptid);
1738 lp = find_lwp_pid (ptid);
1739 gdb_assert (lp != NULL);
1741 /* Remember if we're stepping. */
1742 lp->last_resume_kind = step ? resume_step : resume_continue;
1744 /* If we have a pending wait status for this thread, there is no
1745 point in resuming the process. But first make sure that
1746 linux_nat_wait won't preemptively handle the event - we
1747 should never take this short-circuit if we are going to
1748 leave LP running, since we have skipped resuming all the
1749 other threads. This bit of code needs to be synchronized
1750 with linux_nat_wait. */
1752 if (lp->status && WIFSTOPPED (lp->status))
1755 && WSTOPSIG (lp->status)
1756 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
1758 if (debug_linux_nat)
1759 fprintf_unfiltered (gdb_stdlog,
1760 "LLR: Not short circuiting for ignored "
1761 "status 0x%x\n", lp->status);
1763 /* FIXME: What should we do if we are supposed to continue
1764 this thread with a signal? */
1765 gdb_assert (signo == GDB_SIGNAL_0);
1766 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
1771 if (lwp_status_pending_p (lp))
1773 /* FIXME: What should we do if we are supposed to continue
1774 this thread with a signal? */
1775 gdb_assert (signo == GDB_SIGNAL_0);
1777 if (debug_linux_nat)
1778 fprintf_unfiltered (gdb_stdlog,
1779 "LLR: Short circuiting for status 0x%x\n",
1782 if (target_can_async_p ())
1785 /* Tell the event loop we have something to process. */
1792 iterate_over_lwps (ptid, linux_nat_resume_callback, lp);
1794 if (debug_linux_nat)
1795 fprintf_unfiltered (gdb_stdlog,
1796 "LLR: %s %s, %s (resume event thread)\n",
1797 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1798 target_pid_to_str (lp->ptid),
1799 (signo != GDB_SIGNAL_0
1800 ? strsignal (gdb_signal_to_host (signo)) : "0"));
1802 linux_resume_one_lwp (lp, step, signo);
1804 if (target_can_async_p ())
1808 /* Send a signal to an LWP. */
1811 kill_lwp (int lwpid, int signo)
1816 ret = syscall (__NR_tkill, lwpid, signo);
1817 if (errno == ENOSYS)
1819 /* If tkill fails, then we are not using nptl threads, a
1820 configuration we no longer support. */
1821 perror_with_name (("tkill"));
1826 /* Handle a GNU/Linux syscall trap wait response. If we see a syscall
1827 event, check if the core is interested in it: if not, ignore the
1828 event, and keep waiting; otherwise, we need to toggle the LWP's
1829 syscall entry/exit status, since the ptrace event itself doesn't
1830 indicate it, and report the trap to higher layers. */
1833 linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
1835 struct target_waitstatus *ourstatus = &lp->waitstatus;
1836 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
1837 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
1841 /* If we're stopping threads, there's a SIGSTOP pending, which
1842 makes it so that the LWP reports an immediate syscall return,
1843 followed by the SIGSTOP. Skip seeing that "return" using
1844 PTRACE_CONT directly, and let stop_wait_callback collect the
1845 SIGSTOP. Later when the thread is resumed, a new syscall
1846 entry event. If we didn't do this (and returned 0), we'd
1847 leave a syscall entry pending, and our caller, by using
1848 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
1849 itself. Later, when the user re-resumes this LWP, we'd see
1850 another syscall entry event and we'd mistake it for a return.
1852 If stop_wait_callback didn't force the SIGSTOP out of the LWP
1853 (leaving immediately with LWP->signalled set, without issuing
1854 a PTRACE_CONT), it would still be problematic to leave this
1855 syscall enter pending, as later when the thread is resumed,
1856 it would then see the same syscall exit mentioned above,
1857 followed by the delayed SIGSTOP, while the syscall didn't
1858 actually get to execute. It seems it would be even more
1859 confusing to the user. */
1861 if (debug_linux_nat)
1862 fprintf_unfiltered (gdb_stdlog,
1863 "LHST: ignoring syscall %d "
1864 "for LWP %ld (stopping threads), "
1865 "resuming with PTRACE_CONT for SIGSTOP\n",
1867 ptid_get_lwp (lp->ptid));
1869 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1870 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
1875 /* Always update the entry/return state, even if this particular
1876 syscall isn't interesting to the core now. In async mode,
1877 the user could install a new catchpoint for this syscall
1878 between syscall enter/return, and we'll need to know to
1879 report a syscall return if that happens. */
1880 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1881 ? TARGET_WAITKIND_SYSCALL_RETURN
1882 : TARGET_WAITKIND_SYSCALL_ENTRY);
1884 if (catch_syscall_enabled ())
1886 if (catching_syscall_number (syscall_number))
1888 /* Alright, an event to report. */
1889 ourstatus->kind = lp->syscall_state;
1890 ourstatus->value.syscall_number = syscall_number;
1892 if (debug_linux_nat)
1893 fprintf_unfiltered (gdb_stdlog,
1894 "LHST: stopping for %s of syscall %d"
1897 == TARGET_WAITKIND_SYSCALL_ENTRY
1898 ? "entry" : "return",
1900 ptid_get_lwp (lp->ptid));
1904 if (debug_linux_nat)
1905 fprintf_unfiltered (gdb_stdlog,
1906 "LHST: ignoring %s of syscall %d "
1908 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1909 ? "entry" : "return",
1911 ptid_get_lwp (lp->ptid));
1915 /* If we had been syscall tracing, and hence used PT_SYSCALL
1916 before on this LWP, it could happen that the user removes all
1917 syscall catchpoints before we get to process this event.
1918 There are two noteworthy issues here:
1920 - When stopped at a syscall entry event, resuming with
1921 PT_STEP still resumes executing the syscall and reports a
1924 - Only PT_SYSCALL catches syscall enters. If we last
1925 single-stepped this thread, then this event can't be a
1926 syscall enter. If we last single-stepped this thread, this
1927 has to be a syscall exit.
1929 The points above mean that the next resume, be it PT_STEP or
1930 PT_CONTINUE, can not trigger a syscall trace event. */
1931 if (debug_linux_nat)
1932 fprintf_unfiltered (gdb_stdlog,
1933 "LHST: caught syscall event "
1934 "with no syscall catchpoints."
1935 " %d for LWP %ld, ignoring\n",
1937 ptid_get_lwp (lp->ptid));
1938 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1941 /* The core isn't interested in this event. For efficiency, avoid
1942 stopping all threads only to have the core resume them all again.
1943 Since we're not stopping threads, if we're still syscall tracing
1944 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
1945 subsequent syscall. Simply resume using the inf-ptrace layer,
1946 which knows when to use PT_SYSCALL or PT_CONTINUE. */
1948 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
1952 /* Handle a GNU/Linux extended wait response. If we see a clone
1953 event, we need to add the new LWP to our list (and not report the
1954 trap to higher layers). This function returns non-zero if the
1955 event should be ignored and we should wait again. If STOPPING is
1956 true, the new LWP remains stopped, otherwise it is continued. */
1959 linux_handle_extended_wait (struct lwp_info *lp, int status)
1961 int pid = ptid_get_lwp (lp->ptid);
1962 struct target_waitstatus *ourstatus = &lp->waitstatus;
1963 int event = linux_ptrace_get_extended_event (status);
1965 /* All extended events we currently use are mid-syscall. Only
1966 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
1967 you have to be using PTRACE_SEIZE to get that. */
1968 lp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
1970 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1971 || event == PTRACE_EVENT_CLONE)
1973 unsigned long new_pid;
1976 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
1978 /* If we haven't already seen the new PID stop, wait for it now. */
1979 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1981 /* The new child has a pending SIGSTOP. We can't affect it until it
1982 hits the SIGSTOP, but we're already attached. */
1983 ret = my_waitpid (new_pid, &status, __WALL);
1985 perror_with_name (_("waiting for new child"));
1986 else if (ret != new_pid)
1987 internal_error (__FILE__, __LINE__,
1988 _("wait returned unexpected PID %d"), ret);
1989 else if (!WIFSTOPPED (status))
1990 internal_error (__FILE__, __LINE__,
1991 _("wait returned unexpected status 0x%x"), status);
1994 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
1996 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
1998 /* The arch-specific native code may need to know about new
1999 forks even if those end up never mapped to an
2001 if (linux_nat_new_fork != NULL)
2002 linux_nat_new_fork (lp, new_pid);
2005 if (event == PTRACE_EVENT_FORK
2006 && linux_fork_checkpointing_p (ptid_get_pid (lp->ptid)))
2008 /* Handle checkpointing by linux-fork.c here as a special
2009 case. We don't want the follow-fork-mode or 'catch fork'
2010 to interfere with this. */
2012 /* This won't actually modify the breakpoint list, but will
2013 physically remove the breakpoints from the child. */
2014 detach_breakpoints (ptid_build (new_pid, new_pid, 0));
2016 /* Retain child fork in ptrace (stopped) state. */
2017 if (!find_fork_pid (new_pid))
2020 /* Report as spurious, so that infrun doesn't want to follow
2021 this fork. We're actually doing an infcall in
2023 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2025 /* Report the stop to the core. */
2029 if (event == PTRACE_EVENT_FORK)
2030 ourstatus->kind = TARGET_WAITKIND_FORKED;
2031 else if (event == PTRACE_EVENT_VFORK)
2032 ourstatus->kind = TARGET_WAITKIND_VFORKED;
2033 else if (event == PTRACE_EVENT_CLONE)
2035 struct lwp_info *new_lp;
2037 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2039 if (debug_linux_nat)
2040 fprintf_unfiltered (gdb_stdlog,
2041 "LHEW: Got clone event "
2042 "from LWP %d, new child is LWP %ld\n",
2045 new_lp = add_lwp (ptid_build (ptid_get_pid (lp->ptid), new_pid, 0));
2046 new_lp->stopped = 1;
2047 new_lp->resumed = 1;
2049 /* If the thread_db layer is active, let it record the user
2050 level thread id and status, and add the thread to GDB's
2052 if (!thread_db_notice_clone (lp->ptid, new_lp->ptid))
2054 /* The process is not using thread_db. Add the LWP to
2056 target_post_attach (ptid_get_lwp (new_lp->ptid));
2057 add_thread (new_lp->ptid);
2060 /* Even if we're stopping the thread for some reason
2061 internal to this module, from the perspective of infrun
2062 and the user/frontend, this new thread is running until
2063 it next reports a stop. */
2064 set_running (new_lp->ptid, 1);
2065 set_executing (new_lp->ptid, 1);
2067 if (WSTOPSIG (status) != SIGSTOP)
2069 /* This can happen if someone starts sending signals to
2070 the new thread before it gets a chance to run, which
2071 have a lower number than SIGSTOP (e.g. SIGUSR1).
2072 This is an unlikely case, and harder to handle for
2073 fork / vfork than for clone, so we do not try - but
2074 we handle it for clone events here. */
2076 new_lp->signalled = 1;
2078 /* We created NEW_LP so it cannot yet contain STATUS. */
2079 gdb_assert (new_lp->status == 0);
2081 /* Save the wait status to report later. */
2082 if (debug_linux_nat)
2083 fprintf_unfiltered (gdb_stdlog,
2084 "LHEW: waitpid of new LWP %ld, "
2085 "saving status %s\n",
2086 (long) ptid_get_lwp (new_lp->ptid),
2087 status_to_str (status));
2088 new_lp->status = status;
2090 else if (report_thread_events)
2092 new_lp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
2093 new_lp->status = status;
2102 if (event == PTRACE_EVENT_EXEC)
2104 if (debug_linux_nat)
2105 fprintf_unfiltered (gdb_stdlog,
2106 "LHEW: Got exec event from LWP %ld\n",
2107 ptid_get_lwp (lp->ptid));
2109 ourstatus->kind = TARGET_WAITKIND_EXECD;
2110 ourstatus->value.execd_pathname
2111 = xstrdup (linux_proc_pid_to_exec_file (pid));
2113 /* The thread that execed must have been resumed, but, when a
2114 thread execs, it changes its tid to the tgid, and the old
2115 tgid thread might have not been resumed. */
2120 if (event == PTRACE_EVENT_VFORK_DONE)
2122 if (current_inferior ()->waiting_for_vfork_done)
2124 if (debug_linux_nat)
2125 fprintf_unfiltered (gdb_stdlog,
2126 "LHEW: Got expected PTRACE_EVENT_"
2127 "VFORK_DONE from LWP %ld: stopping\n",
2128 ptid_get_lwp (lp->ptid));
2130 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2134 if (debug_linux_nat)
2135 fprintf_unfiltered (gdb_stdlog,
2136 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
2137 "from LWP %ld: ignoring\n",
2138 ptid_get_lwp (lp->ptid));
2142 internal_error (__FILE__, __LINE__,
2143 _("unknown ptrace event %d"), event);
2146 /* Suspend waiting for a signal. We're mostly interested in
2152 if (debug_linux_nat)
2153 fprintf_unfiltered (gdb_stdlog, "linux-nat: about to sigsuspend\n");
2154 sigsuspend (&suspend_mask);
2156 /* If the quit flag is set, it means that the user pressed Ctrl-C
2157 and we're debugging a process that is running on a separate
2158 terminal, so we must forward the Ctrl-C to the inferior. (If the
2159 inferior is sharing GDB's terminal, then the Ctrl-C reaches the
2160 inferior directly.) We must do this here because functions that
2161 need to block waiting for a signal loop forever until there's an
2162 event to report before returning back to the event loop. */
2163 if (!target_terminal::is_ours ())
2165 if (check_quit_flag ())
2166 target_pass_ctrlc ();
2170 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2174 wait_lwp (struct lwp_info *lp)
2178 int thread_dead = 0;
2181 gdb_assert (!lp->stopped);
2182 gdb_assert (lp->status == 0);
2184 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2185 block_child_signals (&prev_mask);
2189 pid = my_waitpid (ptid_get_lwp (lp->ptid), &status, __WALL | WNOHANG);
2190 if (pid == -1 && errno == ECHILD)
2192 /* The thread has previously exited. We need to delete it
2193 now because if this was a non-leader thread execing, we
2194 won't get an exit event. See comments on exec events at
2195 the top of the file. */
2197 if (debug_linux_nat)
2198 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2199 target_pid_to_str (lp->ptid));
2204 /* Bugs 10970, 12702.
2205 Thread group leader may have exited in which case we'll lock up in
2206 waitpid if there are other threads, even if they are all zombies too.
2207 Basically, we're not supposed to use waitpid this way.
2208 tkill(pid,0) cannot be used here as it gets ESRCH for both
2209 for zombie and running processes.
2211 As a workaround, check if we're waiting for the thread group leader and
2212 if it's a zombie, and avoid calling waitpid if it is.
2214 This is racy, what if the tgl becomes a zombie right after we check?
2215 Therefore always use WNOHANG with sigsuspend - it is equivalent to
2216 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
2218 if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid)
2219 && linux_proc_pid_is_zombie (ptid_get_lwp (lp->ptid)))
2222 if (debug_linux_nat)
2223 fprintf_unfiltered (gdb_stdlog,
2224 "WL: Thread group leader %s vanished.\n",
2225 target_pid_to_str (lp->ptid));
2229 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2230 get invoked despite our caller had them intentionally blocked by
2231 block_child_signals. This is sensitive only to the loop of
2232 linux_nat_wait_1 and there if we get called my_waitpid gets called
2233 again before it gets to sigsuspend so we can safely let the handlers
2234 get executed here. */
2238 restore_child_signals_mask (&prev_mask);
2242 gdb_assert (pid == ptid_get_lwp (lp->ptid));
2244 if (debug_linux_nat)
2246 fprintf_unfiltered (gdb_stdlog,
2247 "WL: waitpid %s received %s\n",
2248 target_pid_to_str (lp->ptid),
2249 status_to_str (status));
2252 /* Check if the thread has exited. */
2253 if (WIFEXITED (status) || WIFSIGNALED (status))
2255 if (report_thread_events
2256 || ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid))
2258 if (debug_linux_nat)
2259 fprintf_unfiltered (gdb_stdlog, "WL: LWP %d exited.\n",
2260 ptid_get_pid (lp->ptid));
2262 /* If this is the leader exiting, it means the whole
2263 process is gone. Store the status to report to the
2264 core. Store it in lp->waitstatus, because lp->status
2265 would be ambiguous (W_EXITCODE(0,0) == 0). */
2266 store_waitstatus (&lp->waitstatus, status);
2271 if (debug_linux_nat)
2272 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2273 target_pid_to_str (lp->ptid));
2283 gdb_assert (WIFSTOPPED (status));
2286 if (lp->must_set_ptrace_flags)
2288 struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
2289 int options = linux_nat_ptrace_options (inf->attach_flag);
2291 linux_enable_event_reporting (ptid_get_lwp (lp->ptid), options);
2292 lp->must_set_ptrace_flags = 0;
2295 /* Handle GNU/Linux's syscall SIGTRAPs. */
2296 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2298 /* No longer need the sysgood bit. The ptrace event ends up
2299 recorded in lp->waitstatus if we care for it. We can carry
2300 on handling the event like a regular SIGTRAP from here
2302 status = W_STOPCODE (SIGTRAP);
2303 if (linux_handle_syscall_trap (lp, 1))
2304 return wait_lwp (lp);
2308 /* Almost all other ptrace-stops are known to be outside of system
2309 calls, with further exceptions in linux_handle_extended_wait. */
2310 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2313 /* Handle GNU/Linux's extended waitstatus for trace events. */
2314 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2315 && linux_is_extended_waitstatus (status))
2317 if (debug_linux_nat)
2318 fprintf_unfiltered (gdb_stdlog,
2319 "WL: Handling extended status 0x%06x\n",
2321 linux_handle_extended_wait (lp, status);
2328 /* Send a SIGSTOP to LP. */
2331 stop_callback (struct lwp_info *lp, void *data)
2333 if (!lp->stopped && !lp->signalled)
2337 if (debug_linux_nat)
2339 fprintf_unfiltered (gdb_stdlog,
2340 "SC: kill %s **<SIGSTOP>**\n",
2341 target_pid_to_str (lp->ptid));
2344 ret = kill_lwp (ptid_get_lwp (lp->ptid), SIGSTOP);
2345 if (debug_linux_nat)
2347 fprintf_unfiltered (gdb_stdlog,
2348 "SC: lwp kill %d %s\n",
2350 errno ? safe_strerror (errno) : "ERRNO-OK");
2354 gdb_assert (lp->status == 0);
2360 /* Request a stop on LWP. */
2363 linux_stop_lwp (struct lwp_info *lwp)
2365 stop_callback (lwp, NULL);
2368 /* See linux-nat.h */
2371 linux_stop_and_wait_all_lwps (void)
2373 /* Stop all LWP's ... */
2374 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
2376 /* ... and wait until all of them have reported back that
2377 they're no longer running. */
2378 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
2381 /* See linux-nat.h */
2384 linux_unstop_all_lwps (void)
2386 iterate_over_lwps (minus_one_ptid,
2387 resume_stopped_resumed_lwps, &minus_one_ptid);
2390 /* Return non-zero if LWP PID has a pending SIGINT. */
2393 linux_nat_has_pending_sigint (int pid)
2395 sigset_t pending, blocked, ignored;
2397 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2399 if (sigismember (&pending, SIGINT)
2400 && !sigismember (&ignored, SIGINT))
2406 /* Set a flag in LP indicating that we should ignore its next SIGINT. */
2409 set_ignore_sigint (struct lwp_info *lp, void *data)
2411 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2412 flag to consume the next one. */
2413 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2414 && WSTOPSIG (lp->status) == SIGINT)
2417 lp->ignore_sigint = 1;
2422 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2423 This function is called after we know the LWP has stopped; if the LWP
2424 stopped before the expected SIGINT was delivered, then it will never have
2425 arrived. Also, if the signal was delivered to a shared queue and consumed
2426 by a different thread, it will never be delivered to this LWP. */
2429 maybe_clear_ignore_sigint (struct lwp_info *lp)
2431 if (!lp->ignore_sigint)
2434 if (!linux_nat_has_pending_sigint (ptid_get_lwp (lp->ptid)))
2436 if (debug_linux_nat)
2437 fprintf_unfiltered (gdb_stdlog,
2438 "MCIS: Clearing bogus flag for %s\n",
2439 target_pid_to_str (lp->ptid));
2440 lp->ignore_sigint = 0;
2444 /* Fetch the possible triggered data watchpoint info and store it in
2447 On some archs, like x86, that use debug registers to set
2448 watchpoints, it's possible that the way to know which watched
2449 address trapped, is to check the register that is used to select
2450 which address to watch. Problem is, between setting the watchpoint
2451 and reading back which data address trapped, the user may change
2452 the set of watchpoints, and, as a consequence, GDB changes the
2453 debug registers in the inferior. To avoid reading back a stale
2454 stopped-data-address when that happens, we cache in LP the fact
2455 that a watchpoint trapped, and the corresponding data address, as
2456 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2457 registers meanwhile, we have the cached data we can rely on. */
2460 check_stopped_by_watchpoint (struct lwp_info *lp)
2462 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
2463 inferior_ptid = lp->ptid;
2465 if (linux_target->low_stopped_by_watchpoint ())
2467 lp->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2468 lp->stopped_data_address_p
2469 = linux_target->low_stopped_data_address (&lp->stopped_data_address);
2472 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2475 /* Returns true if the LWP had stopped for a watchpoint. */
2478 linux_nat_target::stopped_by_watchpoint ()
2480 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2482 gdb_assert (lp != NULL);
2484 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2488 linux_nat_target::stopped_data_address (CORE_ADDR *addr_p)
2490 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2492 gdb_assert (lp != NULL);
2494 *addr_p = lp->stopped_data_address;
2496 return lp->stopped_data_address_p;
2499 /* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2502 sigtrap_is_event (int status)
2504 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2507 /* Set alternative SIGTRAP-like events recognizer. If
2508 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2512 linux_nat_set_status_is_event (struct target_ops *t,
2513 int (*status_is_event) (int status))
2515 linux_nat_status_is_event = status_is_event;
2518 /* Wait until LP is stopped. */
2521 stop_wait_callback (struct lwp_info *lp, void *data)
2523 struct inferior *inf = find_inferior_ptid (lp->ptid);
2525 /* If this is a vfork parent, bail out, it is not going to report
2526 any SIGSTOP until the vfork is done with. */
2527 if (inf->vfork_child != NULL)
2534 status = wait_lwp (lp);
2538 if (lp->ignore_sigint && WIFSTOPPED (status)
2539 && WSTOPSIG (status) == SIGINT)
2541 lp->ignore_sigint = 0;
2544 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
2546 if (debug_linux_nat)
2547 fprintf_unfiltered (gdb_stdlog,
2548 "PTRACE_CONT %s, 0, 0 (%s) "
2549 "(discarding SIGINT)\n",
2550 target_pid_to_str (lp->ptid),
2551 errno ? safe_strerror (errno) : "OK");
2553 return stop_wait_callback (lp, NULL);
2556 maybe_clear_ignore_sigint (lp);
2558 if (WSTOPSIG (status) != SIGSTOP)
2560 /* The thread was stopped with a signal other than SIGSTOP. */
2562 if (debug_linux_nat)
2563 fprintf_unfiltered (gdb_stdlog,
2564 "SWC: Pending event %s in %s\n",
2565 status_to_str ((int) status),
2566 target_pid_to_str (lp->ptid));
2568 /* Save the sigtrap event. */
2569 lp->status = status;
2570 gdb_assert (lp->signalled);
2571 save_stop_reason (lp);
2575 /* We caught the SIGSTOP that we intended to catch, so
2576 there's no SIGSTOP pending. */
2578 if (debug_linux_nat)
2579 fprintf_unfiltered (gdb_stdlog,
2580 "SWC: Expected SIGSTOP caught for %s.\n",
2581 target_pid_to_str (lp->ptid));
2583 /* Reset SIGNALLED only after the stop_wait_callback call
2584 above as it does gdb_assert on SIGNALLED. */
2592 /* Return non-zero if LP has a wait status pending. Discard the
2593 pending event and resume the LWP if the event that originally
2594 caused the stop became uninteresting. */
2597 status_callback (struct lwp_info *lp, void *data)
2599 /* Only report a pending wait status if we pretend that this has
2600 indeed been resumed. */
2604 if (!lwp_status_pending_p (lp))
2607 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
2608 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
2610 struct regcache *regcache = get_thread_regcache (lp->ptid);
2614 pc = regcache_read_pc (regcache);
2616 if (pc != lp->stop_pc)
2618 if (debug_linux_nat)
2619 fprintf_unfiltered (gdb_stdlog,
2620 "SC: PC of %s changed. was=%s, now=%s\n",
2621 target_pid_to_str (lp->ptid),
2622 paddress (target_gdbarch (), lp->stop_pc),
2623 paddress (target_gdbarch (), pc));
2627 #if !USE_SIGTRAP_SIGINFO
2628 else if (!breakpoint_inserted_here_p (regcache->aspace (), pc))
2630 if (debug_linux_nat)
2631 fprintf_unfiltered (gdb_stdlog,
2632 "SC: previous breakpoint of %s, at %s gone\n",
2633 target_pid_to_str (lp->ptid),
2634 paddress (target_gdbarch (), lp->stop_pc));
2642 if (debug_linux_nat)
2643 fprintf_unfiltered (gdb_stdlog,
2644 "SC: pending event of %s cancelled.\n",
2645 target_pid_to_str (lp->ptid));
2648 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
2656 /* Count the LWP's that have had events. */
2659 count_events_callback (struct lwp_info *lp, void *data)
2661 int *count = (int *) data;
2663 gdb_assert (count != NULL);
2665 /* Select only resumed LWPs that have an event pending. */
2666 if (lp->resumed && lwp_status_pending_p (lp))
2672 /* Select the LWP (if any) that is currently being single-stepped. */
2675 select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2677 if (lp->last_resume_kind == resume_step
2684 /* Returns true if LP has a status pending. */
2687 lwp_status_pending_p (struct lwp_info *lp)
2689 /* We check for lp->waitstatus in addition to lp->status, because we
2690 can have pending process exits recorded in lp->status and
2691 W_EXITCODE(0,0) happens to be 0. */
2692 return lp->status != 0 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE;
2695 /* Select the Nth LWP that has had an event. */
2698 select_event_lwp_callback (struct lwp_info *lp, void *data)
2700 int *selector = (int *) data;
2702 gdb_assert (selector != NULL);
2704 /* Select only resumed LWPs that have an event pending. */
2705 if (lp->resumed && lwp_status_pending_p (lp))
2706 if ((*selector)-- == 0)
2712 /* Called when the LWP stopped for a signal/trap. If it stopped for a
2713 trap check what caused it (breakpoint, watchpoint, trace, etc.),
2714 and save the result in the LWP's stop_reason field. If it stopped
2715 for a breakpoint, decrement the PC if necessary on the lwp's
2719 save_stop_reason (struct lwp_info *lp)
2721 struct regcache *regcache;
2722 struct gdbarch *gdbarch;
2725 #if USE_SIGTRAP_SIGINFO
2729 gdb_assert (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON);
2730 gdb_assert (lp->status != 0);
2732 if (!linux_nat_status_is_event (lp->status))
2735 regcache = get_thread_regcache (lp->ptid);
2736 gdbarch = regcache->arch ();
2738 pc = regcache_read_pc (regcache);
2739 sw_bp_pc = pc - gdbarch_decr_pc_after_break (gdbarch);
2741 #if USE_SIGTRAP_SIGINFO
2742 if (linux_nat_get_siginfo (lp->ptid, &siginfo))
2744 if (siginfo.si_signo == SIGTRAP)
2746 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
2747 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
2749 /* The si_code is ambiguous on this arch -- check debug
2751 if (!check_stopped_by_watchpoint (lp))
2752 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2754 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
2756 /* If we determine the LWP stopped for a SW breakpoint,
2757 trust it. Particularly don't check watchpoint
2758 registers, because at least on s390, we'd find
2759 stopped-by-watchpoint as long as there's a watchpoint
2761 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2763 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
2765 /* This can indicate either a hardware breakpoint or
2766 hardware watchpoint. Check debug registers. */
2767 if (!check_stopped_by_watchpoint (lp))
2768 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
2770 else if (siginfo.si_code == TRAP_TRACE)
2772 if (debug_linux_nat)
2773 fprintf_unfiltered (gdb_stdlog,
2774 "CSBB: %s stopped by trace\n",
2775 target_pid_to_str (lp->ptid));
2777 /* We may have single stepped an instruction that
2778 triggered a watchpoint. In that case, on some
2779 architectures (such as x86), instead of TRAP_HWBKPT,
2780 si_code indicates TRAP_TRACE, and we need to check
2781 the debug registers separately. */
2782 check_stopped_by_watchpoint (lp);
2787 if ((!lp->step || lp->stop_pc == sw_bp_pc)
2788 && software_breakpoint_inserted_here_p (regcache->aspace (),
2791 /* The LWP was either continued, or stepped a software
2792 breakpoint instruction. */
2793 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2796 if (hardware_breakpoint_inserted_here_p (regcache->aspace (), pc))
2797 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
2799 if (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
2800 check_stopped_by_watchpoint (lp);
2803 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2805 if (debug_linux_nat)
2806 fprintf_unfiltered (gdb_stdlog,
2807 "CSBB: %s stopped by software breakpoint\n",
2808 target_pid_to_str (lp->ptid));
2810 /* Back up the PC if necessary. */
2812 regcache_write_pc (regcache, sw_bp_pc);
2814 /* Update this so we record the correct stop PC below. */
2817 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
2819 if (debug_linux_nat)
2820 fprintf_unfiltered (gdb_stdlog,
2821 "CSBB: %s stopped by hardware breakpoint\n",
2822 target_pid_to_str (lp->ptid));
2824 else if (lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
2826 if (debug_linux_nat)
2827 fprintf_unfiltered (gdb_stdlog,
2828 "CSBB: %s stopped by hardware watchpoint\n",
2829 target_pid_to_str (lp->ptid));
2836 /* Returns true if the LWP had stopped for a software breakpoint. */
2839 linux_nat_target::stopped_by_sw_breakpoint ()
2841 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2843 gdb_assert (lp != NULL);
2845 return lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2848 /* Implement the supports_stopped_by_sw_breakpoint method. */
2851 linux_nat_target::supports_stopped_by_sw_breakpoint ()
2853 return USE_SIGTRAP_SIGINFO;
2856 /* Returns true if the LWP had stopped for a hardware
2857 breakpoint/watchpoint. */
2860 linux_nat_target::stopped_by_hw_breakpoint ()
2862 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2864 gdb_assert (lp != NULL);
2866 return lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2869 /* Implement the supports_stopped_by_hw_breakpoint method. */
2872 linux_nat_target::supports_stopped_by_hw_breakpoint ()
2874 return USE_SIGTRAP_SIGINFO;
2877 /* Select one LWP out of those that have events pending. */
2880 select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
2883 int random_selector;
2884 struct lwp_info *event_lp = NULL;
2886 /* Record the wait status for the original LWP. */
2887 (*orig_lp)->status = *status;
2889 /* In all-stop, give preference to the LWP that is being
2890 single-stepped. There will be at most one, and it will be the
2891 LWP that the core is most interested in. If we didn't do this,
2892 then we'd have to handle pending step SIGTRAPs somehow in case
2893 the core later continues the previously-stepped thread, as
2894 otherwise we'd report the pending SIGTRAP then, and the core, not
2895 having stepped the thread, wouldn't understand what the trap was
2896 for, and therefore would report it to the user as a random
2898 if (!target_is_non_stop_p ())
2900 event_lp = iterate_over_lwps (filter,
2901 select_singlestep_lwp_callback, NULL);
2902 if (event_lp != NULL)
2904 if (debug_linux_nat)
2905 fprintf_unfiltered (gdb_stdlog,
2906 "SEL: Select single-step %s\n",
2907 target_pid_to_str (event_lp->ptid));
2911 if (event_lp == NULL)
2913 /* Pick one at random, out of those which have had events. */
2915 /* First see how many events we have. */
2916 iterate_over_lwps (filter, count_events_callback, &num_events);
2917 gdb_assert (num_events > 0);
2919 /* Now randomly pick a LWP out of those that have had
2921 random_selector = (int)
2922 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2924 if (debug_linux_nat && num_events > 1)
2925 fprintf_unfiltered (gdb_stdlog,
2926 "SEL: Found %d events, selecting #%d\n",
2927 num_events, random_selector);
2929 event_lp = iterate_over_lwps (filter,
2930 select_event_lwp_callback,
2934 if (event_lp != NULL)
2936 /* Switch the event LWP. */
2937 *orig_lp = event_lp;
2938 *status = event_lp->status;
2941 /* Flush the wait status for the event LWP. */
2942 (*orig_lp)->status = 0;
2945 /* Return non-zero if LP has been resumed. */
2948 resumed_callback (struct lwp_info *lp, void *data)
2953 /* Check if we should go on and pass this event to common code.
2954 Return the affected lwp if we are, or NULL otherwise. */
2956 static struct lwp_info *
2957 linux_nat_filter_event (int lwpid, int status)
2959 struct lwp_info *lp;
2960 int event = linux_ptrace_get_extended_event (status);
2962 lp = find_lwp_pid (pid_to_ptid (lwpid));
2964 /* Check for stop events reported by a process we didn't already
2965 know about - anything not already in our LWP list.
2967 If we're expecting to receive stopped processes after
2968 fork, vfork, and clone events, then we'll just add the
2969 new one to our list and go back to waiting for the event
2970 to be reported - the stopped process might be returned
2971 from waitpid before or after the event is.
2973 But note the case of a non-leader thread exec'ing after the
2974 leader having exited, and gone from our lists. The non-leader
2975 thread changes its tid to the tgid. */
2977 if (WIFSTOPPED (status) && lp == NULL
2978 && (WSTOPSIG (status) == SIGTRAP && event == PTRACE_EVENT_EXEC))
2980 /* A multi-thread exec after we had seen the leader exiting. */
2981 if (debug_linux_nat)
2982 fprintf_unfiltered (gdb_stdlog,
2983 "LLW: Re-adding thread group leader LWP %d.\n",
2986 lp = add_lwp (ptid_build (lwpid, lwpid, 0));
2989 add_thread (lp->ptid);
2992 if (WIFSTOPPED (status) && !lp)
2994 if (debug_linux_nat)
2995 fprintf_unfiltered (gdb_stdlog,
2996 "LHEW: saving LWP %ld status %s in stopped_pids list\n",
2997 (long) lwpid, status_to_str (status));
2998 add_to_pid_list (&stopped_pids, lwpid, status);
3002 /* Make sure we don't report an event for the exit of an LWP not in
3003 our list, i.e. not part of the current process. This can happen
3004 if we detach from a program we originally forked and then it
3006 if (!WIFSTOPPED (status) && !lp)
3009 /* This LWP is stopped now. (And if dead, this prevents it from
3010 ever being continued.) */
3013 if (WIFSTOPPED (status) && lp->must_set_ptrace_flags)
3015 struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
3016 int options = linux_nat_ptrace_options (inf->attach_flag);
3018 linux_enable_event_reporting (ptid_get_lwp (lp->ptid), options);
3019 lp->must_set_ptrace_flags = 0;
3022 /* Handle GNU/Linux's syscall SIGTRAPs. */
3023 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
3025 /* No longer need the sysgood bit. The ptrace event ends up
3026 recorded in lp->waitstatus if we care for it. We can carry
3027 on handling the event like a regular SIGTRAP from here
3029 status = W_STOPCODE (SIGTRAP);
3030 if (linux_handle_syscall_trap (lp, 0))
3035 /* Almost all other ptrace-stops are known to be outside of system
3036 calls, with further exceptions in linux_handle_extended_wait. */
3037 lp->syscall_state = TARGET_WAITKIND_IGNORE;
3040 /* Handle GNU/Linux's extended waitstatus for trace events. */
3041 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
3042 && linux_is_extended_waitstatus (status))
3044 if (debug_linux_nat)
3045 fprintf_unfiltered (gdb_stdlog,
3046 "LLW: Handling extended status 0x%06x\n",
3048 if (linux_handle_extended_wait (lp, status))
3052 /* Check if the thread has exited. */
3053 if (WIFEXITED (status) || WIFSIGNALED (status))
3055 if (!report_thread_events
3056 && num_lwps (ptid_get_pid (lp->ptid)) > 1)
3058 if (debug_linux_nat)
3059 fprintf_unfiltered (gdb_stdlog,
3060 "LLW: %s exited.\n",
3061 target_pid_to_str (lp->ptid));
3063 /* If there is at least one more LWP, then the exit signal
3064 was not the end of the debugged application and should be
3070 /* Note that even if the leader was ptrace-stopped, it can still
3071 exit, if e.g., some other thread brings down the whole
3072 process (calls `exit'). So don't assert that the lwp is
3074 if (debug_linux_nat)
3075 fprintf_unfiltered (gdb_stdlog,
3076 "LWP %ld exited (resumed=%d)\n",
3077 ptid_get_lwp (lp->ptid), lp->resumed);
3079 /* Dead LWP's aren't expected to reported a pending sigstop. */
3082 /* Store the pending event in the waitstatus, because
3083 W_EXITCODE(0,0) == 0. */
3084 store_waitstatus (&lp->waitstatus, status);
3088 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3089 an attempt to stop an LWP. */
3091 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3095 if (lp->last_resume_kind == resume_stop)
3097 if (debug_linux_nat)
3098 fprintf_unfiltered (gdb_stdlog,
3099 "LLW: resume_stop SIGSTOP caught for %s.\n",
3100 target_pid_to_str (lp->ptid));
3104 /* This is a delayed SIGSTOP. Filter out the event. */
3106 if (debug_linux_nat)
3107 fprintf_unfiltered (gdb_stdlog,
3108 "LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
3110 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3111 target_pid_to_str (lp->ptid));
3113 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
3114 gdb_assert (lp->resumed);
3119 /* Make sure we don't report a SIGINT that we have already displayed
3120 for another thread. */
3121 if (lp->ignore_sigint
3122 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3124 if (debug_linux_nat)
3125 fprintf_unfiltered (gdb_stdlog,
3126 "LLW: Delayed SIGINT caught for %s.\n",
3127 target_pid_to_str (lp->ptid));
3129 /* This is a delayed SIGINT. */
3130 lp->ignore_sigint = 0;
3132 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
3133 if (debug_linux_nat)
3134 fprintf_unfiltered (gdb_stdlog,
3135 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3137 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3138 target_pid_to_str (lp->ptid));
3139 gdb_assert (lp->resumed);
3141 /* Discard the event. */
3145 /* Don't report signals that GDB isn't interested in, such as
3146 signals that are neither printed nor stopped upon. Stopping all
3147 threads can be a bit time-consuming so if we want decent
3148 performance with heavily multi-threaded programs, especially when
3149 they're using a high frequency timer, we'd better avoid it if we
3151 if (WIFSTOPPED (status))
3153 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
3155 if (!target_is_non_stop_p ())
3157 /* Only do the below in all-stop, as we currently use SIGSTOP
3158 to implement target_stop (see linux_nat_stop) in
3160 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
3162 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3163 forwarded to the entire process group, that is, all LWPs
3164 will receive it - unless they're using CLONE_THREAD to
3165 share signals. Since we only want to report it once, we
3166 mark it as ignored for all LWPs except this one. */
3167 iterate_over_lwps (pid_to_ptid (ptid_get_pid (lp->ptid)),
3168 set_ignore_sigint, NULL);
3169 lp->ignore_sigint = 0;
3172 maybe_clear_ignore_sigint (lp);
3175 /* When using hardware single-step, we need to report every signal.
3176 Otherwise, signals in pass_mask may be short-circuited
3177 except signals that might be caused by a breakpoint. */
3179 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status))
3180 && !linux_wstatus_maybe_breakpoint (status))
3182 linux_resume_one_lwp (lp, lp->step, signo);
3183 if (debug_linux_nat)
3184 fprintf_unfiltered (gdb_stdlog,
3185 "LLW: %s %s, %s (preempt 'handle')\n",
3187 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3188 target_pid_to_str (lp->ptid),
3189 (signo != GDB_SIGNAL_0
3190 ? strsignal (gdb_signal_to_host (signo))
3196 /* An interesting event. */
3198 lp->status = status;
3199 save_stop_reason (lp);
3203 /* Detect zombie thread group leaders, and "exit" them. We can't reap
3204 their exits until all other threads in the group have exited. */
3207 check_zombie_leaders (void)
3209 struct inferior *inf;
3213 struct lwp_info *leader_lp;
3218 leader_lp = find_lwp_pid (pid_to_ptid (inf->pid));
3219 if (leader_lp != NULL
3220 /* Check if there are other threads in the group, as we may
3221 have raced with the inferior simply exiting. */
3222 && num_lwps (inf->pid) > 1
3223 && linux_proc_pid_is_zombie (inf->pid))
3225 if (debug_linux_nat)
3226 fprintf_unfiltered (gdb_stdlog,
3227 "CZL: Thread group leader %d zombie "
3228 "(it exited, or another thread execd).\n",
3231 /* A leader zombie can mean one of two things:
3233 - It exited, and there's an exit status pending
3234 available, or only the leader exited (not the whole
3235 program). In the latter case, we can't waitpid the
3236 leader's exit status until all other threads are gone.
3238 - There are 3 or more threads in the group, and a thread
3239 other than the leader exec'd. See comments on exec
3240 events at the top of the file. We could try
3241 distinguishing the exit and exec cases, by waiting once
3242 more, and seeing if something comes out, but it doesn't
3243 sound useful. The previous leader _does_ go away, and
3244 we'll re-add the new one once we see the exec event
3245 (which is just the same as what would happen if the
3246 previous leader did exit voluntarily before some other
3249 if (debug_linux_nat)
3250 fprintf_unfiltered (gdb_stdlog,
3251 "CZL: Thread group leader %d vanished.\n",
3253 exit_lwp (leader_lp);
3258 /* Convenience function that is called when the kernel reports an exit
3259 event. This decides whether to report the event to GDB as a
3260 process exit event, a thread exit event, or to suppress the
3264 filter_exit_event (struct lwp_info *event_child,
3265 struct target_waitstatus *ourstatus)
3267 ptid_t ptid = event_child->ptid;
3269 if (num_lwps (ptid_get_pid (ptid)) > 1)
3271 if (report_thread_events)
3272 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3274 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3276 exit_lwp (event_child);
3283 linux_nat_wait_1 (ptid_t ptid, struct target_waitstatus *ourstatus,
3287 enum resume_kind last_resume_kind;
3288 struct lwp_info *lp;
3291 if (debug_linux_nat)
3292 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3294 /* The first time we get here after starting a new inferior, we may
3295 not have added it to the LWP list yet - this is the earliest
3296 moment at which we know its PID. */
3297 if (ptid_is_pid (inferior_ptid))
3299 /* Upgrade the main thread's ptid. */
3300 thread_change_ptid (inferior_ptid,
3301 ptid_build (ptid_get_pid (inferior_ptid),
3302 ptid_get_pid (inferior_ptid), 0));
3304 lp = add_initial_lwp (inferior_ptid);
3308 /* Make sure SIGCHLD is blocked until the sigsuspend below. */
3309 block_child_signals (&prev_mask);
3311 /* First check if there is a LWP with a wait status pending. */
3312 lp = iterate_over_lwps (ptid, status_callback, NULL);
3315 if (debug_linux_nat)
3316 fprintf_unfiltered (gdb_stdlog,
3317 "LLW: Using pending wait status %s for %s.\n",
3318 status_to_str (lp->status),
3319 target_pid_to_str (lp->ptid));
3322 /* But if we don't find a pending event, we'll have to wait. Always
3323 pull all events out of the kernel. We'll randomly select an
3324 event LWP out of all that have events, to prevent starvation. */
3330 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3333 - If the thread group leader exits while other threads in the
3334 thread group still exist, waitpid(TGID, ...) hangs. That
3335 waitpid won't return an exit status until the other threads
3336 in the group are reapped.
3338 - When a non-leader thread execs, that thread just vanishes
3339 without reporting an exit (so we'd hang if we waited for it
3340 explicitly in that case). The exec event is reported to
3344 lwpid = my_waitpid (-1, &status, __WALL | WNOHANG);
3346 if (debug_linux_nat)
3347 fprintf_unfiltered (gdb_stdlog,
3348 "LNW: waitpid(-1, ...) returned %d, %s\n",
3349 lwpid, errno ? safe_strerror (errno) : "ERRNO-OK");
3353 if (debug_linux_nat)
3355 fprintf_unfiltered (gdb_stdlog,
3356 "LLW: waitpid %ld received %s\n",
3357 (long) lwpid, status_to_str (status));
3360 linux_nat_filter_event (lwpid, status);
3361 /* Retry until nothing comes out of waitpid. A single
3362 SIGCHLD can indicate more than one child stopped. */
3366 /* Now that we've pulled all events out of the kernel, resume
3367 LWPs that don't have an interesting event to report. */
3368 iterate_over_lwps (minus_one_ptid,
3369 resume_stopped_resumed_lwps, &minus_one_ptid);
3371 /* ... and find an LWP with a status to report to the core, if
3373 lp = iterate_over_lwps (ptid, status_callback, NULL);
3377 /* Check for zombie thread group leaders. Those can't be reaped
3378 until all other threads in the thread group are. */
3379 check_zombie_leaders ();
3381 /* If there are no resumed children left, bail. We'd be stuck
3382 forever in the sigsuspend call below otherwise. */
3383 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3385 if (debug_linux_nat)
3386 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
3388 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3390 restore_child_signals_mask (&prev_mask);
3391 return minus_one_ptid;
3394 /* No interesting event to report to the core. */
3396 if (target_options & TARGET_WNOHANG)
3398 if (debug_linux_nat)
3399 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3401 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3402 restore_child_signals_mask (&prev_mask);
3403 return minus_one_ptid;
3406 /* We shouldn't end up here unless we want to try again. */
3407 gdb_assert (lp == NULL);
3409 /* Block until we get an event reported with SIGCHLD. */
3415 status = lp->status;
3418 if (!target_is_non_stop_p ())
3420 /* Now stop all other LWP's ... */
3421 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
3423 /* ... and wait until all of them have reported back that
3424 they're no longer running. */
3425 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
3428 /* If we're not waiting for a specific LWP, choose an event LWP from
3429 among those that have had events. Giving equal priority to all
3430 LWPs that have had events helps prevent starvation. */
3431 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3432 select_event_lwp (ptid, &lp, &status);
3434 gdb_assert (lp != NULL);
3436 /* Now that we've selected our final event LWP, un-adjust its PC if
3437 it was a software breakpoint, and we can't reliably support the
3438 "stopped by software breakpoint" stop reason. */
3439 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3440 && !USE_SIGTRAP_SIGINFO)
3442 struct regcache *regcache = get_thread_regcache (lp->ptid);
3443 struct gdbarch *gdbarch = regcache->arch ();
3444 int decr_pc = gdbarch_decr_pc_after_break (gdbarch);
3450 pc = regcache_read_pc (regcache);
3451 regcache_write_pc (regcache, pc + decr_pc);
3455 /* We'll need this to determine whether to report a SIGSTOP as
3456 GDB_SIGNAL_0. Need to take a copy because resume_clear_callback
3458 last_resume_kind = lp->last_resume_kind;
3460 if (!target_is_non_stop_p ())
3462 /* In all-stop, from the core's perspective, all LWPs are now
3463 stopped until a new resume action is sent over. */
3464 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3468 resume_clear_callback (lp, NULL);
3471 if (linux_nat_status_is_event (status))
3473 if (debug_linux_nat)
3474 fprintf_unfiltered (gdb_stdlog,
3475 "LLW: trap ptid is %s.\n",
3476 target_pid_to_str (lp->ptid));
3479 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3481 *ourstatus = lp->waitstatus;
3482 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3485 store_waitstatus (ourstatus, status);
3487 if (debug_linux_nat)
3488 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3490 restore_child_signals_mask (&prev_mask);
3492 if (last_resume_kind == resume_stop
3493 && ourstatus->kind == TARGET_WAITKIND_STOPPED
3494 && WSTOPSIG (status) == SIGSTOP)
3496 /* A thread that has been requested to stop by GDB with
3497 target_stop, and it stopped cleanly, so report as SIG0. The
3498 use of SIGSTOP is an implementation detail. */
3499 ourstatus->value.sig = GDB_SIGNAL_0;
3502 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3503 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3506 lp->core = linux_common_core_of_thread (lp->ptid);
3508 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3509 return filter_exit_event (lp, ourstatus);
3514 /* Resume LWPs that are currently stopped without any pending status
3515 to report, but are resumed from the core's perspective. */
3518 resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3520 ptid_t *wait_ptid_p = (ptid_t *) data;
3524 if (debug_linux_nat)
3525 fprintf_unfiltered (gdb_stdlog,
3526 "RSRL: NOT resuming LWP %s, not stopped\n",
3527 target_pid_to_str (lp->ptid));
3529 else if (!lp->resumed)
3531 if (debug_linux_nat)
3532 fprintf_unfiltered (gdb_stdlog,
3533 "RSRL: NOT resuming LWP %s, not resumed\n",
3534 target_pid_to_str (lp->ptid));
3536 else if (lwp_status_pending_p (lp))
3538 if (debug_linux_nat)
3539 fprintf_unfiltered (gdb_stdlog,
3540 "RSRL: NOT resuming LWP %s, has pending status\n",
3541 target_pid_to_str (lp->ptid));
3545 struct regcache *regcache = get_thread_regcache (lp->ptid);
3546 struct gdbarch *gdbarch = regcache->arch ();
3550 CORE_ADDR pc = regcache_read_pc (regcache);
3551 int leave_stopped = 0;
3553 /* Don't bother if there's a breakpoint at PC that we'd hit
3554 immediately, and we're not waiting for this LWP. */
3555 if (!ptid_match (lp->ptid, *wait_ptid_p))
3557 if (breakpoint_inserted_here_p (regcache->aspace (), pc))
3563 if (debug_linux_nat)
3564 fprintf_unfiltered (gdb_stdlog,
3565 "RSRL: resuming stopped-resumed LWP %s at "
3567 target_pid_to_str (lp->ptid),
3568 paddress (gdbarch, pc),
3571 linux_resume_one_lwp_throw (lp, lp->step, GDB_SIGNAL_0);
3574 CATCH (ex, RETURN_MASK_ERROR)
3576 if (!check_ptrace_stopped_lwp_gone (lp))
3577 throw_exception (ex);
3586 linux_nat_target::wait (ptid_t ptid, struct target_waitstatus *ourstatus,
3591 if (debug_linux_nat)
3593 char *options_string;
3595 options_string = target_options_to_string (target_options);
3596 fprintf_unfiltered (gdb_stdlog,
3597 "linux_nat_wait: [%s], [%s]\n",
3598 target_pid_to_str (ptid),
3600 xfree (options_string);
3603 /* Flush the async file first. */
3604 if (target_is_async_p ())
3605 async_file_flush ();
3607 /* Resume LWPs that are currently stopped without any pending status
3608 to report, but are resumed from the core's perspective. LWPs get
3609 in this state if we find them stopping at a time we're not
3610 interested in reporting the event (target_wait on a
3611 specific_process, for example, see linux_nat_wait_1), and
3612 meanwhile the event became uninteresting. Don't bother resuming
3613 LWPs we're not going to wait for if they'd stop immediately. */
3614 if (target_is_non_stop_p ())
3615 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
3617 event_ptid = linux_nat_wait_1 (ptid, ourstatus, target_options);
3619 /* If we requested any event, and something came out, assume there
3620 may be more. If we requested a specific lwp or process, also
3621 assume there may be more. */
3622 if (target_is_async_p ()
3623 && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
3624 && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
3625 || !ptid_equal (ptid, minus_one_ptid)))
3634 kill_one_lwp (pid_t pid)
3636 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3639 kill_lwp (pid, SIGKILL);
3640 if (debug_linux_nat)
3642 int save_errno = errno;
3644 fprintf_unfiltered (gdb_stdlog,
3645 "KC: kill (SIGKILL) %ld, 0, 0 (%s)\n", (long) pid,
3646 save_errno ? safe_strerror (save_errno) : "OK");
3649 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3652 ptrace (PTRACE_KILL, pid, 0, 0);
3653 if (debug_linux_nat)
3655 int save_errno = errno;
3657 fprintf_unfiltered (gdb_stdlog,
3658 "KC: PTRACE_KILL %ld, 0, 0 (%s)\n", (long) pid,
3659 save_errno ? safe_strerror (save_errno) : "OK");
3663 /* Wait for an LWP to die. */
3666 kill_wait_one_lwp (pid_t pid)
3670 /* We must make sure that there are no pending events (delayed
3671 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3672 program doesn't interfere with any following debugging session. */
3676 res = my_waitpid (pid, NULL, __WALL);
3677 if (res != (pid_t) -1)
3679 if (debug_linux_nat)
3680 fprintf_unfiltered (gdb_stdlog,
3681 "KWC: wait %ld received unknown.\n",
3683 /* The Linux kernel sometimes fails to kill a thread
3684 completely after PTRACE_KILL; that goes from the stop
3685 point in do_fork out to the one in get_signal_to_deliver
3686 and waits again. So kill it again. */
3692 gdb_assert (res == -1 && errno == ECHILD);
3695 /* Callback for iterate_over_lwps. */
3698 kill_callback (struct lwp_info *lp, void *data)
3700 kill_one_lwp (ptid_get_lwp (lp->ptid));
3704 /* Callback for iterate_over_lwps. */
3707 kill_wait_callback (struct lwp_info *lp, void *data)
3709 kill_wait_one_lwp (ptid_get_lwp (lp->ptid));
3713 /* Kill the fork children of any threads of inferior INF that are
3714 stopped at a fork event. */
3717 kill_unfollowed_fork_children (struct inferior *inf)
3719 struct thread_info *thread;
3721 ALL_NON_EXITED_THREADS (thread)
3722 if (thread->inf == inf)
3724 struct target_waitstatus *ws = &thread->pending_follow;
3726 if (ws->kind == TARGET_WAITKIND_FORKED
3727 || ws->kind == TARGET_WAITKIND_VFORKED)
3729 ptid_t child_ptid = ws->value.related_pid;
3730 int child_pid = ptid_get_pid (child_ptid);
3731 int child_lwp = ptid_get_lwp (child_ptid);
3733 kill_one_lwp (child_lwp);
3734 kill_wait_one_lwp (child_lwp);
3736 /* Let the arch-specific native code know this process is
3738 linux_nat_forget_process (child_pid);
3744 linux_nat_target::kill ()
3746 /* If we're stopped while forking and we haven't followed yet,
3747 kill the other task. We need to do this first because the
3748 parent will be sleeping if this is a vfork. */
3749 kill_unfollowed_fork_children (current_inferior ());
3751 if (forks_exist_p ())
3752 linux_fork_killall ();
3755 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
3757 /* Stop all threads before killing them, since ptrace requires
3758 that the thread is stopped to sucessfully PTRACE_KILL. */
3759 iterate_over_lwps (ptid, stop_callback, NULL);
3760 /* ... and wait until all of them have reported back that
3761 they're no longer running. */
3762 iterate_over_lwps (ptid, stop_wait_callback, NULL);
3764 /* Kill all LWP's ... */
3765 iterate_over_lwps (ptid, kill_callback, NULL);
3767 /* ... and wait until we've flushed all events. */
3768 iterate_over_lwps (ptid, kill_wait_callback, NULL);
3771 target_mourn_inferior (inferior_ptid);
3775 linux_nat_target::mourn_inferior ()
3777 int pid = ptid_get_pid (inferior_ptid);
3779 purge_lwp_list (pid);
3781 if (! forks_exist_p ())
3782 /* Normal case, no other forks available. */
3783 inf_ptrace_target::mourn_inferior ();
3785 /* Multi-fork case. The current inferior_ptid has exited, but
3786 there are other viable forks to debug. Delete the exiting
3787 one and context-switch to the first available. */
3788 linux_fork_mourn_inferior ();
3790 /* Let the arch-specific native code know this process is gone. */
3791 linux_nat_forget_process (pid);
3794 /* Convert a native/host siginfo object, into/from the siginfo in the
3795 layout of the inferiors' architecture. */
3798 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
3802 if (linux_nat_siginfo_fixup != NULL)
3803 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
3805 /* If there was no callback, or the callback didn't do anything,
3806 then just do a straight memcpy. */
3810 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
3812 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
3816 static enum target_xfer_status
3817 linux_xfer_siginfo (enum target_object object,
3818 const char *annex, gdb_byte *readbuf,
3819 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3820 ULONGEST *xfered_len)
3824 gdb_byte inf_siginfo[sizeof (siginfo_t)];
3826 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3827 gdb_assert (readbuf || writebuf);
3829 pid = ptid_get_lwp (inferior_ptid);
3831 pid = ptid_get_pid (inferior_ptid);
3833 if (offset > sizeof (siginfo))
3834 return TARGET_XFER_E_IO;
3837 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3839 return TARGET_XFER_E_IO;
3841 /* When GDB is built as a 64-bit application, ptrace writes into
3842 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3843 inferior with a 64-bit GDB should look the same as debugging it
3844 with a 32-bit GDB, we need to convert it. GDB core always sees
3845 the converted layout, so any read/write will have to be done
3847 siginfo_fixup (&siginfo, inf_siginfo, 0);
3849 if (offset + len > sizeof (siginfo))
3850 len = sizeof (siginfo) - offset;
3852 if (readbuf != NULL)
3853 memcpy (readbuf, inf_siginfo + offset, len);
3856 memcpy (inf_siginfo + offset, writebuf, len);
3858 /* Convert back to ptrace layout before flushing it out. */
3859 siginfo_fixup (&siginfo, inf_siginfo, 1);
3862 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3864 return TARGET_XFER_E_IO;
3868 return TARGET_XFER_OK;
3871 static enum target_xfer_status
3872 linux_nat_xfer_osdata (enum target_object object,
3873 const char *annex, gdb_byte *readbuf,
3874 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3875 ULONGEST *xfered_len);
3877 static enum target_xfer_status
3878 linux_proc_xfer_spu (enum target_object object,
3879 const char *annex, gdb_byte *readbuf,
3880 const gdb_byte *writebuf,
3881 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len);
3883 static enum target_xfer_status
3884 linux_proc_xfer_partial (enum target_object object,
3885 const char *annex, gdb_byte *readbuf,
3886 const gdb_byte *writebuf,
3887 ULONGEST offset, LONGEST len, ULONGEST *xfered_len);
3889 enum target_xfer_status
3890 linux_nat_target::xfer_partial (enum target_object object,
3891 const char *annex, gdb_byte *readbuf,
3892 const gdb_byte *writebuf,
3893 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
3895 enum target_xfer_status xfer;
3897 if (object == TARGET_OBJECT_SIGNAL_INFO)
3898 return linux_xfer_siginfo (object, annex, readbuf, writebuf,
3899 offset, len, xfered_len);
3901 /* The target is connected but no live inferior is selected. Pass
3902 this request down to a lower stratum (e.g., the executable
3904 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
3905 return TARGET_XFER_EOF;
3907 if (object == TARGET_OBJECT_AUXV)
3908 return memory_xfer_auxv (this, object, annex, readbuf, writebuf,
3909 offset, len, xfered_len);
3911 if (object == TARGET_OBJECT_OSDATA)
3912 return linux_nat_xfer_osdata (object, annex, readbuf, writebuf,
3913 offset, len, xfered_len);
3915 if (object == TARGET_OBJECT_SPU)
3916 return linux_proc_xfer_spu (object, annex, readbuf, writebuf,
3917 offset, len, xfered_len);
3919 /* GDB calculates all addresses in the largest possible address
3921 The address width must be masked before its final use - either by
3922 linux_proc_xfer_partial or inf_ptrace_target::xfer_partial.
3924 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
3926 if (object == TARGET_OBJECT_MEMORY)
3928 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
3930 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
3931 offset &= ((ULONGEST) 1 << addr_bit) - 1;
3934 xfer = linux_proc_xfer_partial (object, annex, readbuf, writebuf,
3935 offset, len, xfered_len);
3936 if (xfer != TARGET_XFER_EOF)
3939 return inf_ptrace_target::xfer_partial (object, annex, readbuf, writebuf,
3940 offset, len, xfered_len);
3944 linux_nat_target::thread_alive (ptid_t ptid)
3946 /* As long as a PTID is in lwp list, consider it alive. */
3947 return find_lwp_pid (ptid) != NULL;
3950 /* Implement the to_update_thread_list target method for this
3954 linux_nat_target::update_thread_list ()
3956 struct lwp_info *lwp;
3958 /* We add/delete threads from the list as clone/exit events are
3959 processed, so just try deleting exited threads still in the
3961 delete_exited_threads ();
3963 /* Update the processor core that each lwp/thread was last seen
3967 /* Avoid accessing /proc if the thread hasn't run since we last
3968 time we fetched the thread's core. Accessing /proc becomes
3969 noticeably expensive when we have thousands of LWPs. */
3970 if (lwp->core == -1)
3971 lwp->core = linux_common_core_of_thread (lwp->ptid);
3976 linux_nat_target::pid_to_str (ptid_t ptid)
3978 static char buf[64];
3980 if (ptid_lwp_p (ptid)
3981 && (ptid_get_pid (ptid) != ptid_get_lwp (ptid)
3982 || num_lwps (ptid_get_pid (ptid)) > 1))
3984 snprintf (buf, sizeof (buf), "LWP %ld", ptid_get_lwp (ptid));
3988 return normal_pid_to_str (ptid);
3992 linux_nat_target::thread_name (struct thread_info *thr)
3994 return linux_proc_tid_get_name (thr->ptid);
3997 /* Accepts an integer PID; Returns a string representing a file that
3998 can be opened to get the symbols for the child process. */
4001 linux_nat_target::pid_to_exec_file (int pid)
4003 return linux_proc_pid_to_exec_file (pid);
4006 /* Implement the to_xfer_partial target method using /proc/<pid>/mem.
4007 Because we can use a single read/write call, this can be much more
4008 efficient than banging away at PTRACE_PEEKTEXT. */
4010 static enum target_xfer_status
4011 linux_proc_xfer_partial (enum target_object object,
4012 const char *annex, gdb_byte *readbuf,
4013 const gdb_byte *writebuf,
4014 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
4020 if (object != TARGET_OBJECT_MEMORY)
4021 return TARGET_XFER_EOF;
4023 /* Don't bother for one word. */
4024 if (len < 3 * sizeof (long))
4025 return TARGET_XFER_EOF;
4027 /* We could keep this file open and cache it - possibly one per
4028 thread. That requires some juggling, but is even faster. */
4029 xsnprintf (filename, sizeof filename, "/proc/%ld/mem",
4030 ptid_get_lwp (inferior_ptid));
4031 fd = gdb_open_cloexec (filename, ((readbuf ? O_RDONLY : O_WRONLY)
4034 return TARGET_XFER_EOF;
4036 /* Use pread64/pwrite64 if available, since they save a syscall and can
4037 handle 64-bit offsets even on 32-bit platforms (for instance, SPARC
4038 debugging a SPARC64 application). */
4040 ret = (readbuf ? pread64 (fd, readbuf, len, offset)
4041 : pwrite64 (fd, writebuf, len, offset));
4043 ret = lseek (fd, offset, SEEK_SET);
4045 ret = (readbuf ? read (fd, readbuf, len)
4046 : write (fd, writebuf, len));
4051 if (ret == -1 || ret == 0)
4052 return TARGET_XFER_EOF;
4056 return TARGET_XFER_OK;
4061 /* Enumerate spufs IDs for process PID. */
4063 spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, ULONGEST len)
4065 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
4067 LONGEST written = 0;
4070 struct dirent *entry;
4072 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4073 dir = opendir (path);
4078 while ((entry = readdir (dir)) != NULL)
4084 fd = atoi (entry->d_name);
4088 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4089 if (stat (path, &st) != 0)
4091 if (!S_ISDIR (st.st_mode))
4094 if (statfs (path, &stfs) != 0)
4096 if (stfs.f_type != SPUFS_MAGIC)
4099 if (pos >= offset && pos + 4 <= offset + len)
4101 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
4111 /* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
4112 object type, using the /proc file system. */
4114 static enum target_xfer_status
4115 linux_proc_xfer_spu (enum target_object object,
4116 const char *annex, gdb_byte *readbuf,
4117 const gdb_byte *writebuf,
4118 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
4123 int pid = ptid_get_lwp (inferior_ptid);
4128 return TARGET_XFER_E_IO;
4131 LONGEST l = spu_enumerate_spu_ids (pid, readbuf, offset, len);
4134 return TARGET_XFER_E_IO;
4136 return TARGET_XFER_EOF;
4139 *xfered_len = (ULONGEST) l;
4140 return TARGET_XFER_OK;
4145 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
4146 fd = gdb_open_cloexec (buf, writebuf? O_WRONLY : O_RDONLY, 0);
4148 return TARGET_XFER_E_IO;
4151 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4154 return TARGET_XFER_EOF;
4158 ret = write (fd, writebuf, (size_t) len);
4160 ret = read (fd, readbuf, (size_t) len);
4165 return TARGET_XFER_E_IO;
4167 return TARGET_XFER_EOF;
4170 *xfered_len = (ULONGEST) ret;
4171 return TARGET_XFER_OK;
4176 /* Parse LINE as a signal set and add its set bits to SIGS. */
4179 add_line_to_sigset (const char *line, sigset_t *sigs)
4181 int len = strlen (line) - 1;
4185 if (line[len] != '\n')
4186 error (_("Could not parse signal set: %s"), line);
4194 if (*p >= '0' && *p <= '9')
4196 else if (*p >= 'a' && *p <= 'f')
4197 digit = *p - 'a' + 10;
4199 error (_("Could not parse signal set: %s"), line);
4204 sigaddset (sigs, signum + 1);
4206 sigaddset (sigs, signum + 2);
4208 sigaddset (sigs, signum + 3);
4210 sigaddset (sigs, signum + 4);
4216 /* Find process PID's pending signals from /proc/pid/status and set
4220 linux_proc_pending_signals (int pid, sigset_t *pending,
4221 sigset_t *blocked, sigset_t *ignored)
4223 char buffer[PATH_MAX], fname[PATH_MAX];
4225 sigemptyset (pending);
4226 sigemptyset (blocked);
4227 sigemptyset (ignored);
4228 xsnprintf (fname, sizeof fname, "/proc/%d/status", pid);
4229 gdb_file_up procfile = gdb_fopen_cloexec (fname, "r");
4230 if (procfile == NULL)
4231 error (_("Could not open %s"), fname);
4233 while (fgets (buffer, PATH_MAX, procfile.get ()) != NULL)
4235 /* Normal queued signals are on the SigPnd line in the status
4236 file. However, 2.6 kernels also have a "shared" pending
4237 queue for delivering signals to a thread group, so check for
4240 Unfortunately some Red Hat kernels include the shared pending
4241 queue but not the ShdPnd status field. */
4243 if (startswith (buffer, "SigPnd:\t"))
4244 add_line_to_sigset (buffer + 8, pending);
4245 else if (startswith (buffer, "ShdPnd:\t"))
4246 add_line_to_sigset (buffer + 8, pending);
4247 else if (startswith (buffer, "SigBlk:\t"))
4248 add_line_to_sigset (buffer + 8, blocked);
4249 else if (startswith (buffer, "SigIgn:\t"))
4250 add_line_to_sigset (buffer + 8, ignored);
4254 static enum target_xfer_status
4255 linux_nat_xfer_osdata (enum target_object object,
4256 const char *annex, gdb_byte *readbuf,
4257 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4258 ULONGEST *xfered_len)
4260 gdb_assert (object == TARGET_OBJECT_OSDATA);
4262 *xfered_len = linux_common_xfer_osdata (annex, readbuf, offset, len);
4263 if (*xfered_len == 0)
4264 return TARGET_XFER_EOF;
4266 return TARGET_XFER_OK;
4270 cleanup_target_stop (void *arg)
4272 ptid_t *ptid = (ptid_t *) arg;
4274 gdb_assert (arg != NULL);
4277 target_continue_no_signal (*ptid);
4280 std::vector<static_tracepoint_marker>
4281 linux_nat_target::static_tracepoint_markers_by_strid (const char *strid)
4283 char s[IPA_CMD_BUF_SIZE];
4284 struct cleanup *old_chain;
4285 int pid = ptid_get_pid (inferior_ptid);
4286 std::vector<static_tracepoint_marker> markers;
4288 ptid_t ptid = ptid_build (pid, 0, 0);
4289 static_tracepoint_marker marker;
4294 memcpy (s, "qTfSTM", sizeof ("qTfSTM"));
4295 s[sizeof ("qTfSTM")] = 0;
4297 agent_run_command (pid, s, strlen (s) + 1);
4299 old_chain = make_cleanup (cleanup_target_stop, &ptid);
4305 parse_static_tracepoint_marker_definition (p, &p, &marker);
4307 if (strid == NULL || marker.str_id == strid)
4308 markers.push_back (std::move (marker));
4310 while (*p++ == ','); /* comma-separated list */
4312 memcpy (s, "qTsSTM", sizeof ("qTsSTM"));
4313 s[sizeof ("qTsSTM")] = 0;
4314 agent_run_command (pid, s, strlen (s) + 1);
4318 do_cleanups (old_chain);
4323 /* target_is_async_p implementation. */
4326 linux_nat_target::is_async_p ()
4328 return linux_is_async_p ();
4331 /* target_can_async_p implementation. */
4334 linux_nat_target::can_async_p ()
4336 /* We're always async, unless the user explicitly prevented it with the
4337 "maint set target-async" command. */
4338 return target_async_permitted;
4342 linux_nat_target::supports_non_stop ()
4347 /* to_always_non_stop_p implementation. */
4350 linux_nat_target::always_non_stop_p ()
4355 /* True if we want to support multi-process. To be removed when GDB
4356 supports multi-exec. */
4358 int linux_multi_process = 1;
4361 linux_nat_target::supports_multi_process ()
4363 return linux_multi_process;
4367 linux_nat_target::supports_disable_randomization ()
4369 #ifdef HAVE_PERSONALITY
4376 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4377 so we notice when any child changes state, and notify the
4378 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4379 above to wait for the arrival of a SIGCHLD. */
4382 sigchld_handler (int signo)
4384 int old_errno = errno;
4386 if (debug_linux_nat)
4387 ui_file_write_async_safe (gdb_stdlog,
4388 "sigchld\n", sizeof ("sigchld\n") - 1);
4390 if (signo == SIGCHLD
4391 && linux_nat_event_pipe[0] != -1)
4392 async_file_mark (); /* Let the event loop know that there are
4393 events to handle. */
4398 /* Callback registered with the target events file descriptor. */
4401 handle_target_event (int error, gdb_client_data client_data)
4403 inferior_event_handler (INF_REG_EVENT, NULL);
4406 /* Create/destroy the target events pipe. Returns previous state. */
4409 linux_async_pipe (int enable)
4411 int previous = linux_is_async_p ();
4413 if (previous != enable)
4417 /* Block child signals while we create/destroy the pipe, as
4418 their handler writes to it. */
4419 block_child_signals (&prev_mask);
4423 if (gdb_pipe_cloexec (linux_nat_event_pipe) == -1)
4424 internal_error (__FILE__, __LINE__,
4425 "creating event pipe failed.");
4427 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4428 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4432 close (linux_nat_event_pipe[0]);
4433 close (linux_nat_event_pipe[1]);
4434 linux_nat_event_pipe[0] = -1;
4435 linux_nat_event_pipe[1] = -1;
4438 restore_child_signals_mask (&prev_mask);
4444 /* target_async implementation. */
4447 linux_nat_target::async (int enable)
4451 if (!linux_async_pipe (1))
4453 add_file_handler (linux_nat_event_pipe[0],
4454 handle_target_event, NULL);
4455 /* There may be pending events to handle. Tell the event loop
4462 delete_file_handler (linux_nat_event_pipe[0]);
4463 linux_async_pipe (0);
4468 /* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
4472 linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4476 if (debug_linux_nat)
4477 fprintf_unfiltered (gdb_stdlog,
4478 "LNSL: running -> suspending %s\n",
4479 target_pid_to_str (lwp->ptid));
4482 if (lwp->last_resume_kind == resume_stop)
4484 if (debug_linux_nat)
4485 fprintf_unfiltered (gdb_stdlog,
4486 "linux-nat: already stopping LWP %ld at "
4488 ptid_get_lwp (lwp->ptid));
4492 stop_callback (lwp, NULL);
4493 lwp->last_resume_kind = resume_stop;
4497 /* Already known to be stopped; do nothing. */
4499 if (debug_linux_nat)
4501 if (find_thread_ptid (lwp->ptid)->stop_requested)
4502 fprintf_unfiltered (gdb_stdlog,
4503 "LNSL: already stopped/stop_requested %s\n",
4504 target_pid_to_str (lwp->ptid));
4506 fprintf_unfiltered (gdb_stdlog,
4507 "LNSL: already stopped/no "
4508 "stop_requested yet %s\n",
4509 target_pid_to_str (lwp->ptid));
4516 linux_nat_target::stop (ptid_t ptid)
4518 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
4522 linux_nat_target::close ()
4524 /* Unregister from the event loop. */
4528 inf_ptrace_target::close ();
4531 /* When requests are passed down from the linux-nat layer to the
4532 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
4533 used. The address space pointer is stored in the inferior object,
4534 but the common code that is passed such ptid can't tell whether
4535 lwpid is a "main" process id or not (it assumes so). We reverse
4536 look up the "main" process id from the lwp here. */
4538 struct address_space *
4539 linux_nat_target::thread_address_space (ptid_t ptid)
4541 struct lwp_info *lwp;
4542 struct inferior *inf;
4545 if (ptid_get_lwp (ptid) == 0)
4547 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
4549 lwp = find_lwp_pid (ptid);
4550 pid = ptid_get_pid (lwp->ptid);
4554 /* A (pid,lwpid,0) ptid. */
4555 pid = ptid_get_pid (ptid);
4558 inf = find_inferior_pid (pid);
4559 gdb_assert (inf != NULL);
4563 /* Return the cached value of the processor core for thread PTID. */
4566 linux_nat_target::core_of_thread (ptid_t ptid)
4568 struct lwp_info *info = find_lwp_pid (ptid);
4575 /* Implementation of to_filesystem_is_local. */
4578 linux_nat_target::filesystem_is_local ()
4580 struct inferior *inf = current_inferior ();
4582 if (inf->fake_pid_p || inf->pid == 0)
4585 return linux_ns_same (inf->pid, LINUX_NS_MNT);
4588 /* Convert the INF argument passed to a to_fileio_* method
4589 to a process ID suitable for passing to its corresponding
4590 linux_mntns_* function. If INF is non-NULL then the
4591 caller is requesting the filesystem seen by INF. If INF
4592 is NULL then the caller is requesting the filesystem seen
4593 by the GDB. We fall back to GDB's filesystem in the case
4594 that INF is non-NULL but its PID is unknown. */
4597 linux_nat_fileio_pid_of (struct inferior *inf)
4599 if (inf == NULL || inf->fake_pid_p || inf->pid == 0)
4605 /* Implementation of to_fileio_open. */
4608 linux_nat_target::fileio_open (struct inferior *inf, const char *filename,
4609 int flags, int mode, int warn_if_slow,
4616 if (fileio_to_host_openflags (flags, &nat_flags) == -1
4617 || fileio_to_host_mode (mode, &nat_mode) == -1)
4619 *target_errno = FILEIO_EINVAL;
4623 fd = linux_mntns_open_cloexec (linux_nat_fileio_pid_of (inf),
4624 filename, nat_flags, nat_mode);
4626 *target_errno = host_to_fileio_error (errno);
4631 /* Implementation of to_fileio_readlink. */
4633 gdb::optional<std::string>
4634 linux_nat_target::fileio_readlink (struct inferior *inf, const char *filename,
4640 len = linux_mntns_readlink (linux_nat_fileio_pid_of (inf),
4641 filename, buf, sizeof (buf));
4644 *target_errno = host_to_fileio_error (errno);
4648 return std::string (buf, len);
4651 /* Implementation of to_fileio_unlink. */
4654 linux_nat_target::fileio_unlink (struct inferior *inf, const char *filename,
4659 ret = linux_mntns_unlink (linux_nat_fileio_pid_of (inf),
4662 *target_errno = host_to_fileio_error (errno);
4667 /* Implementation of the to_thread_events method. */
4670 linux_nat_target::thread_events (int enable)
4672 report_thread_events = enable;
4675 linux_nat_target::linux_nat_target ()
4677 /* We don't change the stratum; this target will sit at
4678 process_stratum and thread_db will set at thread_stratum. This
4679 is a little strange, since this is a multi-threaded-capable
4680 target, but we want to be on the stack below thread_db, and we
4681 also want to be used for single-threaded processes. */
4684 /* Register a method to call whenever a new thread is attached. */
4686 linux_nat_set_new_thread (struct target_ops *t,
4687 void (*new_thread) (struct lwp_info *))
4689 /* Save the pointer. We only support a single registered instance
4690 of the GNU/Linux native target, so we do not need to map this to
4692 linux_nat_new_thread = new_thread;
4695 /* Register a method to call whenever a new thread is attached. */
4697 linux_nat_set_delete_thread (struct target_ops *t,
4698 void (*delete_thread) (struct arch_lwp_info *))
4700 /* Save the pointer. We only support a single registered instance
4701 of the GNU/Linux native target, so we do not need to map this to
4703 linux_nat_delete_thread = delete_thread;
4706 /* See declaration in linux-nat.h. */
4709 linux_nat_set_new_fork (struct target_ops *t,
4710 linux_nat_new_fork_ftype *new_fork)
4712 /* Save the pointer. */
4713 linux_nat_new_fork = new_fork;
4716 /* See declaration in linux-nat.h. */
4719 linux_nat_set_forget_process (struct target_ops *t,
4720 linux_nat_forget_process_ftype *fn)
4722 /* Save the pointer. */
4723 linux_nat_forget_process_hook = fn;
4726 /* See declaration in linux-nat.h. */
4729 linux_nat_forget_process (pid_t pid)
4731 if (linux_nat_forget_process_hook != NULL)
4732 linux_nat_forget_process_hook (pid);
4735 /* Register a method that converts a siginfo object between the layout
4736 that ptrace returns, and the layout in the architecture of the
4739 linux_nat_set_siginfo_fixup (struct target_ops *t,
4740 int (*siginfo_fixup) (siginfo_t *,
4744 /* Save the pointer. */
4745 linux_nat_siginfo_fixup = siginfo_fixup;
4748 /* Register a method to call prior to resuming a thread. */
4751 linux_nat_set_prepare_to_resume (struct target_ops *t,
4752 void (*prepare_to_resume) (struct lwp_info *))
4754 /* Save the pointer. */
4755 linux_nat_prepare_to_resume = prepare_to_resume;
4758 /* See linux-nat.h. */
4761 linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
4765 pid = ptid_get_lwp (ptid);
4767 pid = ptid_get_pid (ptid);
4770 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo);
4773 memset (siginfo, 0, sizeof (*siginfo));
4779 /* See nat/linux-nat.h. */
4782 current_lwp_ptid (void)
4784 gdb_assert (ptid_lwp_p (inferior_ptid));
4785 return inferior_ptid;
4789 _initialize_linux_nat (void)
4791 add_setshow_zuinteger_cmd ("lin-lwp", class_maintenance,
4792 &debug_linux_nat, _("\
4793 Set debugging of GNU/Linux lwp module."), _("\
4794 Show debugging of GNU/Linux lwp module."), _("\
4795 Enables printf debugging output."),
4797 show_debug_linux_nat,
4798 &setdebuglist, &showdebuglist);
4800 add_setshow_boolean_cmd ("linux-namespaces", class_maintenance,
4801 &debug_linux_namespaces, _("\
4802 Set debugging of GNU/Linux namespaces module."), _("\
4803 Show debugging of GNU/Linux namespaces module."), _("\
4804 Enables printf debugging output."),
4807 &setdebuglist, &showdebuglist);
4809 /* Save this mask as the default. */
4810 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
4812 /* Install a SIGCHLD handler. */
4813 sigchld_action.sa_handler = sigchld_handler;
4814 sigemptyset (&sigchld_action.sa_mask);
4815 sigchld_action.sa_flags = SA_RESTART;
4817 /* Make it the default. */
4818 sigaction (SIGCHLD, &sigchld_action, NULL);
4820 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4821 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
4822 sigdelset (&suspend_mask, SIGCHLD);
4824 sigemptyset (&blocked_mask);
4826 lwp_lwpid_htab_create ();
4830 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4831 the GNU/Linux Threads library and therefore doesn't really belong
4834 /* Return the set of signals used by the threads library in *SET. */
4837 lin_thread_get_thread_signals (sigset_t *set)
4841 /* NPTL reserves the first two RT signals, but does not provide any
4842 way for the debugger to query the signal numbers - fortunately
4843 they don't change. */
4844 sigaddset (set, __SIGRTMIN);
4845 sigaddset (set, __SIGRTMIN + 1);