1 /* GNU/Linux native-dependent code common to multiple platforms.
3 Copyright (C) 2001-2015 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
27 #ifdef HAVE_TKILL_SYSCALL
29 #include <sys/syscall.h>
31 #include <sys/ptrace.h>
32 #include "linux-nat.h"
33 #include "nat/linux-ptrace.h"
34 #include "nat/linux-procfs.h"
35 #include "linux-fork.h"
36 #include "gdbthread.h"
40 #include "inf-child.h"
41 #include "inf-ptrace.h"
43 #include <sys/procfs.h> /* for elf_gregset etc. */
44 #include "elf-bfd.h" /* for elfcore_write_* */
45 #include "gregset.h" /* for gregset */
46 #include "gdbcore.h" /* for get_exec_file */
47 #include <ctype.h> /* for isdigit */
48 #include <sys/stat.h> /* for struct stat */
49 #include <fcntl.h> /* for O_RDONLY */
51 #include "event-loop.h"
52 #include "event-top.h"
54 #include <sys/types.h>
56 #include "xml-support.h"
59 #include "nat/linux-osdata.h"
60 #include "linux-tdep.h"
63 #include "tracepoint.h"
65 #include "target-descriptions.h"
66 #include "filestuff.h"
70 #define SPUFS_MAGIC 0x23c9b64e
73 #ifdef HAVE_PERSONALITY
74 # include <sys/personality.h>
75 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
76 # define ADDR_NO_RANDOMIZE 0x0040000
78 #endif /* HAVE_PERSONALITY */
80 /* This comment documents high-level logic of this file.
82 Waiting for events in sync mode
83 ===============================
85 When waiting for an event in a specific thread, we just use waitpid, passing
86 the specific pid, and not passing WNOHANG.
88 When waiting for an event in all threads, waitpid is not quite good. Prior to
89 version 2.4, Linux can either wait for event in main thread, or in secondary
90 threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
91 miss an event. The solution is to use non-blocking waitpid, together with
92 sigsuspend. First, we use non-blocking waitpid to get an event in the main
93 process, if any. Second, we use non-blocking waitpid with the __WCLONED
94 flag to check for events in cloned processes. If nothing is found, we use
95 sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
96 happened to a child process -- and SIGCHLD will be delivered both for events
97 in main debugged process and in cloned processes. As soon as we know there's
98 an event, we get back to calling nonblocking waitpid with and without
101 Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
102 so that we don't miss a signal. If SIGCHLD arrives in between, when it's
103 blocked, the signal becomes pending and sigsuspend immediately
104 notices it and returns.
106 Waiting for events in async mode
107 ================================
109 In async mode, GDB should always be ready to handle both user input
110 and target events, so neither blocking waitpid nor sigsuspend are
111 viable options. Instead, we should asynchronously notify the GDB main
112 event loop whenever there's an unprocessed event from the target. We
113 detect asynchronous target events by handling SIGCHLD signals. To
114 notify the event loop about target events, the self-pipe trick is used
115 --- a pipe is registered as waitable event source in the event loop,
116 the event loop select/poll's on the read end of this pipe (as well on
117 other event sources, e.g., stdin), and the SIGCHLD handler writes a
118 byte to this pipe. This is more portable than relying on
119 pselect/ppoll, since on kernels that lack those syscalls, libc
120 emulates them with select/poll+sigprocmask, and that is racy
121 (a.k.a. plain broken).
123 Obviously, if we fail to notify the event loop if there's a target
124 event, it's bad. OTOH, if we notify the event loop when there's no
125 event from the target, linux_nat_wait will detect that there's no real
126 event to report, and return event of type TARGET_WAITKIND_IGNORE.
127 This is mostly harmless, but it will waste time and is better avoided.
129 The main design point is that every time GDB is outside linux-nat.c,
130 we have a SIGCHLD handler installed that is called when something
131 happens to the target and notifies the GDB event loop. Whenever GDB
132 core decides to handle the event, and calls into linux-nat.c, we
133 process things as in sync mode, except that the we never block in
136 While processing an event, we may end up momentarily blocked in
137 waitpid calls. Those waitpid calls, while blocking, are guarantied to
138 return quickly. E.g., in all-stop mode, before reporting to the core
139 that an LWP hit a breakpoint, all LWPs are stopped by sending them
140 SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
141 Note that this is different from blocking indefinitely waiting for the
142 next event --- here, we're already handling an event.
147 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
148 signal is not entirely significant; we just need for a signal to be delivered,
149 so that we can intercept it. SIGSTOP's advantage is that it can not be
150 blocked. A disadvantage is that it is not a real-time signal, so it can only
151 be queued once; we do not keep track of other sources of SIGSTOP.
153 Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
154 use them, because they have special behavior when the signal is generated -
155 not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
156 kills the entire thread group.
158 A delivered SIGSTOP would stop the entire thread group, not just the thread we
159 tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
160 cancel it (by PTRACE_CONT without passing SIGSTOP).
162 We could use a real-time signal instead. This would solve those problems; we
163 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
164 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
165 generates it, and there are races with trying to find a signal that is not
169 #define O_LARGEFILE 0
172 /* The single-threaded native GNU/Linux target_ops. We save a pointer for
173 the use of the multi-threaded target. */
174 static struct target_ops *linux_ops;
175 static struct target_ops linux_ops_saved;
177 /* The method to call, if any, when a new thread is attached. */
178 static void (*linux_nat_new_thread) (struct lwp_info *);
180 /* The method to call, if any, when a new fork is attached. */
181 static linux_nat_new_fork_ftype *linux_nat_new_fork;
183 /* The method to call, if any, when a process is no longer
185 static linux_nat_forget_process_ftype *linux_nat_forget_process_hook;
187 /* Hook to call prior to resuming a thread. */
188 static void (*linux_nat_prepare_to_resume) (struct lwp_info *);
190 /* The method to call, if any, when the siginfo object needs to be
191 converted between the layout returned by ptrace, and the layout in
192 the architecture of the inferior. */
193 static int (*linux_nat_siginfo_fixup) (siginfo_t *,
197 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
198 Called by our to_xfer_partial. */
199 static target_xfer_partial_ftype *super_xfer_partial;
201 /* The saved to_close method, inherited from inf-ptrace.c.
202 Called by our to_close. */
203 static void (*super_close) (struct target_ops *);
205 static unsigned int debug_linux_nat;
207 show_debug_linux_nat (struct ui_file *file, int from_tty,
208 struct cmd_list_element *c, const char *value)
210 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
214 struct simple_pid_list
218 struct simple_pid_list *next;
220 struct simple_pid_list *stopped_pids;
222 /* Async mode support. */
224 /* The read/write ends of the pipe registered as waitable file in the
226 static int linux_nat_event_pipe[2] = { -1, -1 };
228 /* Flush the event pipe. */
231 async_file_flush (void)
238 ret = read (linux_nat_event_pipe[0], &buf, 1);
240 while (ret >= 0 || (ret == -1 && errno == EINTR));
243 /* Put something (anything, doesn't matter what, or how much) in event
244 pipe, so that the select/poll in the event-loop realizes we have
245 something to process. */
248 async_file_mark (void)
252 /* It doesn't really matter what the pipe contains, as long we end
253 up with something in it. Might as well flush the previous
259 ret = write (linux_nat_event_pipe[1], "+", 1);
261 while (ret == -1 && errno == EINTR);
263 /* Ignore EAGAIN. If the pipe is full, the event loop will already
264 be awakened anyway. */
267 static int kill_lwp (int lwpid, int signo);
269 static int stop_callback (struct lwp_info *lp, void *data);
271 static void block_child_signals (sigset_t *prev_mask);
272 static void restore_child_signals_mask (sigset_t *prev_mask);
275 static struct lwp_info *add_lwp (ptid_t ptid);
276 static void purge_lwp_list (int pid);
277 static void delete_lwp (ptid_t ptid);
278 static struct lwp_info *find_lwp_pid (ptid_t ptid);
281 /* Trivial list manipulation functions to keep track of a list of
282 new stopped processes. */
284 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
286 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
289 new_pid->status = status;
290 new_pid->next = *listp;
295 in_pid_list_p (struct simple_pid_list *list, int pid)
297 struct simple_pid_list *p;
299 for (p = list; p != NULL; p = p->next)
306 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
308 struct simple_pid_list **p;
310 for (p = listp; *p != NULL; p = &(*p)->next)
311 if ((*p)->pid == pid)
313 struct simple_pid_list *next = (*p)->next;
315 *statusp = (*p)->status;
323 /* Initialize ptrace warnings and check for supported ptrace
326 ATTACHED should be nonzero iff we attached to the inferior. */
329 linux_init_ptrace (pid_t pid, int attached)
331 linux_enable_event_reporting (pid, attached);
332 linux_ptrace_init_warnings ();
336 linux_child_post_attach (struct target_ops *self, int pid)
338 linux_init_ptrace (pid, 1);
342 linux_child_post_startup_inferior (struct target_ops *self, ptid_t ptid)
344 linux_init_ptrace (ptid_get_pid (ptid), 0);
347 /* Return the number of known LWPs in the tgid given by PID. */
355 for (lp = lwp_list; lp; lp = lp->next)
356 if (ptid_get_pid (lp->ptid) == pid)
362 /* Call delete_lwp with prototype compatible for make_cleanup. */
365 delete_lwp_cleanup (void *lp_voidp)
367 struct lwp_info *lp = lp_voidp;
369 delete_lwp (lp->ptid);
372 /* Target hook for follow_fork. On entry inferior_ptid must be the
373 ptid of the followed inferior. At return, inferior_ptid will be
377 linux_child_follow_fork (struct target_ops *ops, int follow_child,
382 struct lwp_info *child_lp = NULL;
383 int status = W_STOPCODE (0);
384 struct cleanup *old_chain;
386 int parent_pid, child_pid;
388 has_vforked = (inferior_thread ()->pending_follow.kind
389 == TARGET_WAITKIND_VFORKED);
390 parent_pid = ptid_get_lwp (inferior_ptid);
392 parent_pid = ptid_get_pid (inferior_ptid);
394 = ptid_get_pid (inferior_thread ()->pending_follow.value.related_pid);
397 /* We're already attached to the parent, by default. */
398 old_chain = save_inferior_ptid ();
399 inferior_ptid = ptid_build (child_pid, child_pid, 0);
400 child_lp = add_lwp (inferior_ptid);
401 child_lp->stopped = 1;
402 child_lp->last_resume_kind = resume_stop;
404 /* Detach new forked process? */
407 make_cleanup (delete_lwp_cleanup, child_lp);
409 if (linux_nat_prepare_to_resume != NULL)
410 linux_nat_prepare_to_resume (child_lp);
412 /* When debugging an inferior in an architecture that supports
413 hardware single stepping on a kernel without commit
414 6580807da14c423f0d0a708108e6df6ebc8bc83d, the vfork child
415 process starts with the TIF_SINGLESTEP/X86_EFLAGS_TF bits
416 set if the parent process had them set.
417 To work around this, single step the child process
418 once before detaching to clear the flags. */
420 if (!gdbarch_software_single_step_p (target_thread_architecture
423 linux_disable_event_reporting (child_pid);
424 if (ptrace (PTRACE_SINGLESTEP, child_pid, 0, 0) < 0)
425 perror_with_name (_("Couldn't do single step"));
426 if (my_waitpid (child_pid, &status, 0) < 0)
427 perror_with_name (_("Couldn't wait vfork process"));
430 if (WIFSTOPPED (status))
434 signo = WSTOPSIG (status);
436 && !signal_pass_state (gdb_signal_from_host (signo)))
438 ptrace (PTRACE_DETACH, child_pid, 0, signo);
441 /* Resets value of inferior_ptid to parent ptid. */
442 do_cleanups (old_chain);
446 /* Let the thread_db layer learn about this new process. */
447 check_for_thread_db ();
450 do_cleanups (old_chain);
454 struct lwp_info *parent_lp;
456 parent_lp = find_lwp_pid (pid_to_ptid (parent_pid));
457 gdb_assert (linux_supports_tracefork () >= 0);
459 if (linux_supports_tracevforkdone ())
462 fprintf_unfiltered (gdb_stdlog,
463 "LCFF: waiting for VFORK_DONE on %d\n",
465 parent_lp->stopped = 1;
467 /* We'll handle the VFORK_DONE event like any other
468 event, in target_wait. */
472 /* We can't insert breakpoints until the child has
473 finished with the shared memory region. We need to
474 wait until that happens. Ideal would be to just
476 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
477 - waitpid (parent_pid, &status, __WALL);
478 However, most architectures can't handle a syscall
479 being traced on the way out if it wasn't traced on
482 We might also think to loop, continuing the child
483 until it exits or gets a SIGTRAP. One problem is
484 that the child might call ptrace with PTRACE_TRACEME.
486 There's no simple and reliable way to figure out when
487 the vforked child will be done with its copy of the
488 shared memory. We could step it out of the syscall,
489 two instructions, let it go, and then single-step the
490 parent once. When we have hardware single-step, this
491 would work; with software single-step it could still
492 be made to work but we'd have to be able to insert
493 single-step breakpoints in the child, and we'd have
494 to insert -just- the single-step breakpoint in the
495 parent. Very awkward.
497 In the end, the best we can do is to make sure it
498 runs for a little while. Hopefully it will be out of
499 range of any breakpoints we reinsert. Usually this
500 is only the single-step breakpoint at vfork's return
504 fprintf_unfiltered (gdb_stdlog,
505 "LCFF: no VFORK_DONE "
506 "support, sleeping a bit\n");
510 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
511 and leave it pending. The next linux_nat_resume call
512 will notice a pending event, and bypasses actually
513 resuming the inferior. */
514 parent_lp->status = 0;
515 parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
516 parent_lp->stopped = 1;
518 /* If we're in async mode, need to tell the event loop
519 there's something here to process. */
520 if (target_can_async_p ())
527 struct lwp_info *child_lp;
529 child_lp = add_lwp (inferior_ptid);
530 child_lp->stopped = 1;
531 child_lp->last_resume_kind = resume_stop;
533 /* Let the thread_db layer learn about this new process. */
534 check_for_thread_db ();
542 linux_child_insert_fork_catchpoint (struct target_ops *self, int pid)
544 return !linux_supports_tracefork ();
548 linux_child_remove_fork_catchpoint (struct target_ops *self, int pid)
554 linux_child_insert_vfork_catchpoint (struct target_ops *self, int pid)
556 return !linux_supports_tracefork ();
560 linux_child_remove_vfork_catchpoint (struct target_ops *self, int pid)
566 linux_child_insert_exec_catchpoint (struct target_ops *self, int pid)
568 return !linux_supports_tracefork ();
572 linux_child_remove_exec_catchpoint (struct target_ops *self, int pid)
578 linux_child_set_syscall_catchpoint (struct target_ops *self,
579 int pid, int needed, int any_count,
580 int table_size, int *table)
582 if (!linux_supports_tracesysgood ())
585 /* On GNU/Linux, we ignore the arguments. It means that we only
586 enable the syscall catchpoints, but do not disable them.
588 Also, we do not use the `table' information because we do not
589 filter system calls here. We let GDB do the logic for us. */
593 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
594 are processes sharing the same VM space. A multi-threaded process
595 is basically a group of such processes. However, such a grouping
596 is almost entirely a user-space issue; the kernel doesn't enforce
597 such a grouping at all (this might change in the future). In
598 general, we'll rely on the threads library (i.e. the GNU/Linux
599 Threads library) to provide such a grouping.
601 It is perfectly well possible to write a multi-threaded application
602 without the assistance of a threads library, by using the clone
603 system call directly. This module should be able to give some
604 rudimentary support for debugging such applications if developers
605 specify the CLONE_PTRACE flag in the clone system call, and are
606 using the Linux kernel 2.4 or above.
608 Note that there are some peculiarities in GNU/Linux that affect
611 - In general one should specify the __WCLONE flag to waitpid in
612 order to make it report events for any of the cloned processes
613 (and leave it out for the initial process). However, if a cloned
614 process has exited the exit status is only reported if the
615 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
616 we cannot use it since GDB must work on older systems too.
618 - When a traced, cloned process exits and is waited for by the
619 debugger, the kernel reassigns it to the original parent and
620 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
621 library doesn't notice this, which leads to the "zombie problem":
622 When debugged a multi-threaded process that spawns a lot of
623 threads will run out of processes, even if the threads exit,
624 because the "zombies" stay around. */
626 /* List of known LWPs. */
627 struct lwp_info *lwp_list;
630 /* Original signal mask. */
631 static sigset_t normal_mask;
633 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
634 _initialize_linux_nat. */
635 static sigset_t suspend_mask;
637 /* Signals to block to make that sigsuspend work. */
638 static sigset_t blocked_mask;
640 /* SIGCHLD action. */
641 struct sigaction sigchld_action;
643 /* Block child signals (SIGCHLD and linux threads signals), and store
644 the previous mask in PREV_MASK. */
647 block_child_signals (sigset_t *prev_mask)
649 /* Make sure SIGCHLD is blocked. */
650 if (!sigismember (&blocked_mask, SIGCHLD))
651 sigaddset (&blocked_mask, SIGCHLD);
653 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
656 /* Restore child signals mask, previously returned by
657 block_child_signals. */
660 restore_child_signals_mask (sigset_t *prev_mask)
662 sigprocmask (SIG_SETMASK, prev_mask, NULL);
665 /* Mask of signals to pass directly to the inferior. */
666 static sigset_t pass_mask;
668 /* Update signals to pass to the inferior. */
670 linux_nat_pass_signals (struct target_ops *self,
671 int numsigs, unsigned char *pass_signals)
675 sigemptyset (&pass_mask);
677 for (signo = 1; signo < NSIG; signo++)
679 int target_signo = gdb_signal_from_host (signo);
680 if (target_signo < numsigs && pass_signals[target_signo])
681 sigaddset (&pass_mask, signo);
687 /* Prototypes for local functions. */
688 static int stop_wait_callback (struct lwp_info *lp, void *data);
689 static int linux_thread_alive (ptid_t ptid);
690 static char *linux_child_pid_to_exec_file (struct target_ops *self, int pid);
694 /* Destroy and free LP. */
697 lwp_free (struct lwp_info *lp)
699 xfree (lp->arch_private);
703 /* Remove all LWPs belong to PID from the lwp list. */
706 purge_lwp_list (int pid)
708 struct lwp_info *lp, *lpprev, *lpnext;
712 for (lp = lwp_list; lp; lp = lpnext)
716 if (ptid_get_pid (lp->ptid) == pid)
721 lpprev->next = lp->next;
730 /* Add the LWP specified by PTID to the list. PTID is the first LWP
731 in the process. Return a pointer to the structure describing the
734 This differs from add_lwp in that we don't let the arch specific
735 bits know about this new thread. Current clients of this callback
736 take the opportunity to install watchpoints in the new thread, and
737 we shouldn't do that for the first thread. If we're spawning a
738 child ("run"), the thread executes the shell wrapper first, and we
739 shouldn't touch it until it execs the program we want to debug.
740 For "attach", it'd be okay to call the callback, but it's not
741 necessary, because watchpoints can't yet have been inserted into
744 static struct lwp_info *
745 add_initial_lwp (ptid_t ptid)
749 gdb_assert (ptid_lwp_p (ptid));
751 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
753 memset (lp, 0, sizeof (struct lwp_info));
755 lp->last_resume_kind = resume_continue;
756 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
767 /* Add the LWP specified by PID to the list. Return a pointer to the
768 structure describing the new LWP. The LWP should already be
771 static struct lwp_info *
772 add_lwp (ptid_t ptid)
776 lp = add_initial_lwp (ptid);
778 /* Let the arch specific bits know about this new thread. Current
779 clients of this callback take the opportunity to install
780 watchpoints in the new thread. We don't do this for the first
781 thread though. See add_initial_lwp. */
782 if (linux_nat_new_thread != NULL)
783 linux_nat_new_thread (lp);
788 /* Remove the LWP specified by PID from the list. */
791 delete_lwp (ptid_t ptid)
793 struct lwp_info *lp, *lpprev;
797 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
798 if (ptid_equal (lp->ptid, ptid))
805 lpprev->next = lp->next;
812 /* Return a pointer to the structure describing the LWP corresponding
813 to PID. If no corresponding LWP could be found, return NULL. */
815 static struct lwp_info *
816 find_lwp_pid (ptid_t ptid)
821 if (ptid_lwp_p (ptid))
822 lwp = ptid_get_lwp (ptid);
824 lwp = ptid_get_pid (ptid);
826 for (lp = lwp_list; lp; lp = lp->next)
827 if (lwp == ptid_get_lwp (lp->ptid))
833 /* Call CALLBACK with its second argument set to DATA for every LWP in
834 the list. If CALLBACK returns 1 for a particular LWP, return a
835 pointer to the structure describing that LWP immediately.
836 Otherwise return NULL. */
839 iterate_over_lwps (ptid_t filter,
840 int (*callback) (struct lwp_info *, void *),
843 struct lwp_info *lp, *lpnext;
845 for (lp = lwp_list; lp; lp = lpnext)
849 if (ptid_match (lp->ptid, filter))
851 if ((*callback) (lp, data))
859 /* Update our internal state when changing from one checkpoint to
860 another indicated by NEW_PTID. We can only switch single-threaded
861 applications, so we only create one new LWP, and the previous list
865 linux_nat_switch_fork (ptid_t new_ptid)
869 purge_lwp_list (ptid_get_pid (inferior_ptid));
871 lp = add_lwp (new_ptid);
874 /* This changes the thread's ptid while preserving the gdb thread
875 num. Also changes the inferior pid, while preserving the
877 thread_change_ptid (inferior_ptid, new_ptid);
879 /* We've just told GDB core that the thread changed target id, but,
880 in fact, it really is a different thread, with different register
882 registers_changed ();
885 /* Handle the exit of a single thread LP. */
888 exit_lwp (struct lwp_info *lp)
890 struct thread_info *th = find_thread_ptid (lp->ptid);
894 if (print_thread_events)
895 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
897 delete_thread (lp->ptid);
900 delete_lwp (lp->ptid);
903 /* Wait for the LWP specified by LP, which we have just attached to.
904 Returns a wait status for that LWP, to cache. */
907 linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
910 pid_t new_pid, pid = ptid_get_lwp (ptid);
913 if (linux_proc_pid_is_stopped (pid))
916 fprintf_unfiltered (gdb_stdlog,
917 "LNPAW: Attaching to a stopped process\n");
919 /* The process is definitely stopped. It is in a job control
920 stop, unless the kernel predates the TASK_STOPPED /
921 TASK_TRACED distinction, in which case it might be in a
922 ptrace stop. Make sure it is in a ptrace stop; from there we
923 can kill it, signal it, et cetera.
925 First make sure there is a pending SIGSTOP. Since we are
926 already attached, the process can not transition from stopped
927 to running without a PTRACE_CONT; so we know this signal will
928 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
929 probably already in the queue (unless this kernel is old
930 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
931 is not an RT signal, it can only be queued once. */
932 kill_lwp (pid, SIGSTOP);
934 /* Finally, resume the stopped process. This will deliver the SIGSTOP
935 (or a higher priority signal, just like normal PTRACE_ATTACH). */
936 ptrace (PTRACE_CONT, pid, 0, 0);
939 /* Make sure the initial process is stopped. The user-level threads
940 layer might want to poke around in the inferior, and that won't
941 work if things haven't stabilized yet. */
942 new_pid = my_waitpid (pid, &status, 0);
943 if (new_pid == -1 && errno == ECHILD)
946 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
948 /* Try again with __WCLONE to check cloned processes. */
949 new_pid = my_waitpid (pid, &status, __WCLONE);
953 gdb_assert (pid == new_pid);
955 if (!WIFSTOPPED (status))
957 /* The pid we tried to attach has apparently just exited. */
959 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
960 pid, status_to_str (status));
964 if (WSTOPSIG (status) != SIGSTOP)
968 fprintf_unfiltered (gdb_stdlog,
969 "LNPAW: Received %s after attaching\n",
970 status_to_str (status));
976 /* Attach to the LWP specified by PID. Return 0 if successful, -1 if
977 the new LWP could not be attached, or 1 if we're already auto
978 attached to this thread, but haven't processed the
979 PTRACE_EVENT_CLONE event of its parent thread, so we just ignore
980 its existance, without considering it an error. */
983 lin_lwp_attach_lwp (ptid_t ptid)
988 gdb_assert (ptid_lwp_p (ptid));
990 lp = find_lwp_pid (ptid);
991 lwpid = ptid_get_lwp (ptid);
993 /* We assume that we're already attached to any LWP that has an id
994 equal to the overall process id, and to any LWP that is already
995 in our list of LWPs. If we're not seeing exit events from threads
996 and we've had PID wraparound since we last tried to stop all threads,
997 this assumption might be wrong; fortunately, this is very unlikely
999 if (lwpid != ptid_get_pid (ptid) && lp == NULL)
1001 int status, cloned = 0, signalled = 0;
1003 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1005 if (linux_supports_tracefork ())
1007 /* If we haven't stopped all threads when we get here,
1008 we may have seen a thread listed in thread_db's list,
1009 but not processed the PTRACE_EVENT_CLONE yet. If
1010 that's the case, ignore this new thread, and let
1011 normal event handling discover it later. */
1012 if (in_pid_list_p (stopped_pids, lwpid))
1014 /* We've already seen this thread stop, but we
1015 haven't seen the PTRACE_EVENT_CLONE extended
1024 /* See if we've got a stop for this new child
1025 pending. If so, we're already attached. */
1026 gdb_assert (lwpid > 0);
1027 new_pid = my_waitpid (lwpid, &status, WNOHANG);
1028 if (new_pid == -1 && errno == ECHILD)
1029 new_pid = my_waitpid (lwpid, &status, __WCLONE | WNOHANG);
1032 if (WIFSTOPPED (status))
1033 add_to_pid_list (&stopped_pids, lwpid, status);
1039 /* If we fail to attach to the thread, issue a warning,
1040 but continue. One way this can happen is if thread
1041 creation is interrupted; as of Linux kernel 2.6.19, a
1042 bug may place threads in the thread list and then fail
1044 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1045 safe_strerror (errno));
1049 if (debug_linux_nat)
1050 fprintf_unfiltered (gdb_stdlog,
1051 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1052 target_pid_to_str (ptid));
1054 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
1055 if (!WIFSTOPPED (status))
1058 lp = add_lwp (ptid);
1060 lp->cloned = cloned;
1061 lp->signalled = signalled;
1062 if (WSTOPSIG (status) != SIGSTOP)
1065 lp->status = status;
1068 target_post_attach (ptid_get_lwp (lp->ptid));
1070 if (debug_linux_nat)
1072 fprintf_unfiltered (gdb_stdlog,
1073 "LLAL: waitpid %s received %s\n",
1074 target_pid_to_str (ptid),
1075 status_to_str (status));
1080 /* We assume that the LWP representing the original process is
1081 already stopped. Mark it as stopped in the data structure
1082 that the GNU/linux ptrace layer uses to keep track of
1083 threads. Note that this won't have already been done since
1084 the main thread will have, we assume, been stopped by an
1085 attach from a different layer. */
1087 lp = add_lwp (ptid);
1091 lp->last_resume_kind = resume_stop;
1096 linux_nat_create_inferior (struct target_ops *ops,
1097 char *exec_file, char *allargs, char **env,
1100 #ifdef HAVE_PERSONALITY
1101 int personality_orig = 0, personality_set = 0;
1102 #endif /* HAVE_PERSONALITY */
1104 /* The fork_child mechanism is synchronous and calls target_wait, so
1105 we have to mask the async mode. */
1107 #ifdef HAVE_PERSONALITY
1108 if (disable_randomization)
1111 personality_orig = personality (0xffffffff);
1112 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1114 personality_set = 1;
1115 personality (personality_orig | ADDR_NO_RANDOMIZE);
1117 if (errno != 0 || (personality_set
1118 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1119 warning (_("Error disabling address space randomization: %s"),
1120 safe_strerror (errno));
1122 #endif /* HAVE_PERSONALITY */
1124 /* Make sure we report all signals during startup. */
1125 linux_nat_pass_signals (ops, 0, NULL);
1127 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
1129 #ifdef HAVE_PERSONALITY
1130 if (personality_set)
1133 personality (personality_orig);
1135 warning (_("Error restoring address space randomization: %s"),
1136 safe_strerror (errno));
1138 #endif /* HAVE_PERSONALITY */
1141 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1142 already attached. Returns true if a new LWP is found, false
1146 attach_proc_task_lwp_callback (ptid_t ptid)
1148 struct lwp_info *lp;
1150 /* Ignore LWPs we're already attached to. */
1151 lp = find_lwp_pid (ptid);
1154 int lwpid = ptid_get_lwp (ptid);
1156 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1160 /* Be quiet if we simply raced with the thread exiting.
1161 EPERM is returned if the thread's task still exists, and
1162 is marked as exited or zombie, as well as other
1163 conditions, so in that case, confirm the status in
1164 /proc/PID/status. */
1166 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1168 if (debug_linux_nat)
1170 fprintf_unfiltered (gdb_stdlog,
1171 "Cannot attach to lwp %d: "
1172 "thread is gone (%d: %s)\n",
1173 lwpid, err, safe_strerror (err));
1178 warning (_("Cannot attach to lwp %d: %s\n"),
1180 linux_ptrace_attach_fail_reason_string (ptid,
1186 if (debug_linux_nat)
1187 fprintf_unfiltered (gdb_stdlog,
1188 "PTRACE_ATTACH %s, 0, 0 (OK)\n",
1189 target_pid_to_str (ptid));
1191 lp = add_lwp (ptid);
1194 /* The next time we wait for this LWP we'll see a SIGSTOP as
1195 PTRACE_ATTACH brings it to a halt. */
1198 /* We need to wait for a stop before being able to make the
1199 next ptrace call on this LWP. */
1200 lp->must_set_ptrace_flags = 1;
1209 linux_nat_attach (struct target_ops *ops, const char *args, int from_tty)
1211 struct lwp_info *lp;
1214 volatile struct gdb_exception ex;
1216 /* Make sure we report all signals during attach. */
1217 linux_nat_pass_signals (ops, 0, NULL);
1219 TRY_CATCH (ex, RETURN_MASK_ERROR)
1221 linux_ops->to_attach (ops, args, from_tty);
1225 pid_t pid = parse_pid_to_attach (args);
1226 struct buffer buffer;
1227 char *message, *buffer_s;
1229 message = xstrdup (ex.message);
1230 make_cleanup (xfree, message);
1232 buffer_init (&buffer);
1233 linux_ptrace_attach_fail_reason (pid, &buffer);
1235 buffer_grow_str0 (&buffer, "");
1236 buffer_s = buffer_finish (&buffer);
1237 make_cleanup (xfree, buffer_s);
1239 if (*buffer_s != '\0')
1240 throw_error (ex.error, "warning: %s\n%s", buffer_s, message);
1242 throw_error (ex.error, "%s", message);
1245 /* The ptrace base target adds the main thread with (pid,0,0)
1246 format. Decorate it with lwp info. */
1247 ptid = ptid_build (ptid_get_pid (inferior_ptid),
1248 ptid_get_pid (inferior_ptid),
1250 thread_change_ptid (inferior_ptid, ptid);
1252 /* Add the initial process as the first LWP to the list. */
1253 lp = add_initial_lwp (ptid);
1255 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1257 if (!WIFSTOPPED (status))
1259 if (WIFEXITED (status))
1261 int exit_code = WEXITSTATUS (status);
1263 target_terminal_ours ();
1264 target_mourn_inferior ();
1266 error (_("Unable to attach: program exited normally."));
1268 error (_("Unable to attach: program exited with code %d."),
1271 else if (WIFSIGNALED (status))
1273 enum gdb_signal signo;
1275 target_terminal_ours ();
1276 target_mourn_inferior ();
1278 signo = gdb_signal_from_host (WTERMSIG (status));
1279 error (_("Unable to attach: program terminated with signal "
1281 gdb_signal_to_name (signo),
1282 gdb_signal_to_string (signo));
1285 internal_error (__FILE__, __LINE__,
1286 _("unexpected status %d for PID %ld"),
1287 status, (long) ptid_get_lwp (ptid));
1292 /* Save the wait status to report later. */
1294 if (debug_linux_nat)
1295 fprintf_unfiltered (gdb_stdlog,
1296 "LNA: waitpid %ld, saving status %s\n",
1297 (long) ptid_get_pid (lp->ptid), status_to_str (status));
1299 lp->status = status;
1301 /* We must attach to every LWP. If /proc is mounted, use that to
1302 find them now. The inferior may be using raw clone instead of
1303 using pthreads. But even if it is using pthreads, thread_db
1304 walks structures in the inferior's address space to find the list
1305 of threads/LWPs, and those structures may well be corrupted.
1306 Note that once thread_db is loaded, we'll still use it to list
1307 threads and associate pthread info with each LWP. */
1308 linux_proc_attach_tgid_threads (ptid_get_pid (lp->ptid),
1309 attach_proc_task_lwp_callback);
1311 if (target_can_async_p ())
1312 target_async (inferior_event_handler, 0);
1315 /* Get pending status of LP. */
1317 get_pending_status (struct lwp_info *lp, int *status)
1319 enum gdb_signal signo = GDB_SIGNAL_0;
1321 /* If we paused threads momentarily, we may have stored pending
1322 events in lp->status or lp->waitstatus (see stop_wait_callback),
1323 and GDB core hasn't seen any signal for those threads.
1324 Otherwise, the last signal reported to the core is found in the
1325 thread object's stop_signal.
1327 There's a corner case that isn't handled here at present. Only
1328 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1329 stop_signal make sense as a real signal to pass to the inferior.
1330 Some catchpoint related events, like
1331 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1332 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1333 those traps are debug API (ptrace in our case) related and
1334 induced; the inferior wouldn't see them if it wasn't being
1335 traced. Hence, we should never pass them to the inferior, even
1336 when set to pass state. Since this corner case isn't handled by
1337 infrun.c when proceeding with a signal, for consistency, neither
1338 do we handle it here (or elsewhere in the file we check for
1339 signal pass state). Normally SIGTRAP isn't set to pass state, so
1340 this is really a corner case. */
1342 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1343 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
1344 else if (lp->status)
1345 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
1346 else if (non_stop && !is_executing (lp->ptid))
1348 struct thread_info *tp = find_thread_ptid (lp->ptid);
1350 signo = tp->suspend.stop_signal;
1354 struct target_waitstatus last;
1357 get_last_target_status (&last_ptid, &last);
1359 if (ptid_get_lwp (lp->ptid) == ptid_get_lwp (last_ptid))
1361 struct thread_info *tp = find_thread_ptid (lp->ptid);
1363 signo = tp->suspend.stop_signal;
1369 if (signo == GDB_SIGNAL_0)
1371 if (debug_linux_nat)
1372 fprintf_unfiltered (gdb_stdlog,
1373 "GPT: lwp %s has no pending signal\n",
1374 target_pid_to_str (lp->ptid));
1376 else if (!signal_pass_state (signo))
1378 if (debug_linux_nat)
1379 fprintf_unfiltered (gdb_stdlog,
1380 "GPT: lwp %s had signal %s, "
1381 "but it is in no pass state\n",
1382 target_pid_to_str (lp->ptid),
1383 gdb_signal_to_string (signo));
1387 *status = W_STOPCODE (gdb_signal_to_host (signo));
1389 if (debug_linux_nat)
1390 fprintf_unfiltered (gdb_stdlog,
1391 "GPT: lwp %s has pending signal %s\n",
1392 target_pid_to_str (lp->ptid),
1393 gdb_signal_to_string (signo));
1400 detach_callback (struct lwp_info *lp, void *data)
1402 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1404 if (debug_linux_nat && lp->status)
1405 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1406 strsignal (WSTOPSIG (lp->status)),
1407 target_pid_to_str (lp->ptid));
1409 /* If there is a pending SIGSTOP, get rid of it. */
1412 if (debug_linux_nat)
1413 fprintf_unfiltered (gdb_stdlog,
1414 "DC: Sending SIGCONT to %s\n",
1415 target_pid_to_str (lp->ptid));
1417 kill_lwp (ptid_get_lwp (lp->ptid), SIGCONT);
1421 /* We don't actually detach from the LWP that has an id equal to the
1422 overall process id just yet. */
1423 if (ptid_get_lwp (lp->ptid) != ptid_get_pid (lp->ptid))
1427 /* Pass on any pending signal for this LWP. */
1428 get_pending_status (lp, &status);
1430 if (linux_nat_prepare_to_resume != NULL)
1431 linux_nat_prepare_to_resume (lp);
1433 if (ptrace (PTRACE_DETACH, ptid_get_lwp (lp->ptid), 0,
1434 WSTOPSIG (status)) < 0)
1435 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
1436 safe_strerror (errno));
1438 if (debug_linux_nat)
1439 fprintf_unfiltered (gdb_stdlog,
1440 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1441 target_pid_to_str (lp->ptid),
1442 strsignal (WSTOPSIG (status)));
1444 delete_lwp (lp->ptid);
1451 linux_nat_detach (struct target_ops *ops, const char *args, int from_tty)
1455 struct lwp_info *main_lwp;
1457 pid = ptid_get_pid (inferior_ptid);
1459 /* Don't unregister from the event loop, as there may be other
1460 inferiors running. */
1462 /* Stop all threads before detaching. ptrace requires that the
1463 thread is stopped to sucessfully detach. */
1464 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
1465 /* ... and wait until all of them have reported back that
1466 they're no longer running. */
1467 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
1469 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
1471 /* Only the initial process should be left right now. */
1472 gdb_assert (num_lwps (ptid_get_pid (inferior_ptid)) == 1);
1474 main_lwp = find_lwp_pid (pid_to_ptid (pid));
1476 /* Pass on any pending signal for the last LWP. */
1477 if ((args == NULL || *args == '\0')
1478 && get_pending_status (main_lwp, &status) != -1
1479 && WIFSTOPPED (status))
1483 /* Put the signal number in ARGS so that inf_ptrace_detach will
1484 pass it along with PTRACE_DETACH. */
1486 xsnprintf (tem, 8, "%d", (int) WSTOPSIG (status));
1488 if (debug_linux_nat)
1489 fprintf_unfiltered (gdb_stdlog,
1490 "LND: Sending signal %s to %s\n",
1492 target_pid_to_str (main_lwp->ptid));
1495 if (linux_nat_prepare_to_resume != NULL)
1496 linux_nat_prepare_to_resume (main_lwp);
1497 delete_lwp (main_lwp->ptid);
1499 if (forks_exist_p ())
1501 /* Multi-fork case. The current inferior_ptid is being detached
1502 from, but there are other viable forks to debug. Detach from
1503 the current fork, and context-switch to the first
1505 linux_fork_detach (args, from_tty);
1508 linux_ops->to_detach (ops, args, from_tty);
1514 resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1518 struct inferior *inf = find_inferior_ptid (lp->ptid);
1520 if (inf->vfork_child != NULL)
1522 if (debug_linux_nat)
1523 fprintf_unfiltered (gdb_stdlog,
1524 "RC: Not resuming %s (vfork parent)\n",
1525 target_pid_to_str (lp->ptid));
1527 else if (lp->status == 0
1528 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
1530 if (debug_linux_nat)
1531 fprintf_unfiltered (gdb_stdlog,
1532 "RC: Resuming sibling %s, %s, %s\n",
1533 target_pid_to_str (lp->ptid),
1534 (signo != GDB_SIGNAL_0
1535 ? strsignal (gdb_signal_to_host (signo))
1537 step ? "step" : "resume");
1539 if (linux_nat_prepare_to_resume != NULL)
1540 linux_nat_prepare_to_resume (lp);
1541 linux_ops->to_resume (linux_ops,
1542 pid_to_ptid (ptid_get_lwp (lp->ptid)),
1546 lp->stopped_by_watchpoint = 0;
1550 if (debug_linux_nat)
1551 fprintf_unfiltered (gdb_stdlog,
1552 "RC: Not resuming sibling %s (has pending)\n",
1553 target_pid_to_str (lp->ptid));
1558 if (debug_linux_nat)
1559 fprintf_unfiltered (gdb_stdlog,
1560 "RC: Not resuming sibling %s (not stopped)\n",
1561 target_pid_to_str (lp->ptid));
1565 /* Callback for iterate_over_lwps. If LWP is EXCEPT, do nothing.
1566 Resume LWP with the last stop signal, if it is in pass state. */
1569 linux_nat_resume_callback (struct lwp_info *lp, void *except)
1571 enum gdb_signal signo = GDB_SIGNAL_0;
1578 struct thread_info *thread;
1580 thread = find_thread_ptid (lp->ptid);
1583 signo = thread->suspend.stop_signal;
1584 thread->suspend.stop_signal = GDB_SIGNAL_0;
1588 resume_lwp (lp, 0, signo);
1593 resume_clear_callback (struct lwp_info *lp, void *data)
1596 lp->last_resume_kind = resume_stop;
1601 resume_set_callback (struct lwp_info *lp, void *data)
1604 lp->last_resume_kind = resume_continue;
1609 linux_nat_resume (struct target_ops *ops,
1610 ptid_t ptid, int step, enum gdb_signal signo)
1612 struct lwp_info *lp;
1615 if (debug_linux_nat)
1616 fprintf_unfiltered (gdb_stdlog,
1617 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1618 step ? "step" : "resume",
1619 target_pid_to_str (ptid),
1620 (signo != GDB_SIGNAL_0
1621 ? strsignal (gdb_signal_to_host (signo)) : "0"),
1622 target_pid_to_str (inferior_ptid));
1624 /* A specific PTID means `step only this process id'. */
1625 resume_many = (ptid_equal (minus_one_ptid, ptid)
1626 || ptid_is_pid (ptid));
1628 /* Mark the lwps we're resuming as resumed. */
1629 iterate_over_lwps (ptid, resume_set_callback, NULL);
1631 /* See if it's the current inferior that should be handled
1634 lp = find_lwp_pid (inferior_ptid);
1636 lp = find_lwp_pid (ptid);
1637 gdb_assert (lp != NULL);
1639 /* Remember if we're stepping. */
1641 lp->last_resume_kind = step ? resume_step : resume_continue;
1643 /* If we have a pending wait status for this thread, there is no
1644 point in resuming the process. But first make sure that
1645 linux_nat_wait won't preemptively handle the event - we
1646 should never take this short-circuit if we are going to
1647 leave LP running, since we have skipped resuming all the
1648 other threads. This bit of code needs to be synchronized
1649 with linux_nat_wait. */
1651 if (lp->status && WIFSTOPPED (lp->status))
1654 && WSTOPSIG (lp->status)
1655 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
1657 if (debug_linux_nat)
1658 fprintf_unfiltered (gdb_stdlog,
1659 "LLR: Not short circuiting for ignored "
1660 "status 0x%x\n", lp->status);
1662 /* FIXME: What should we do if we are supposed to continue
1663 this thread with a signal? */
1664 gdb_assert (signo == GDB_SIGNAL_0);
1665 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
1670 if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1672 /* FIXME: What should we do if we are supposed to continue
1673 this thread with a signal? */
1674 gdb_assert (signo == GDB_SIGNAL_0);
1676 if (debug_linux_nat)
1677 fprintf_unfiltered (gdb_stdlog,
1678 "LLR: Short circuiting for status 0x%x\n",
1681 if (target_can_async_p ())
1683 target_async (inferior_event_handler, 0);
1684 /* Tell the event loop we have something to process. */
1691 iterate_over_lwps (ptid, linux_nat_resume_callback, lp);
1693 /* Convert to something the lower layer understands. */
1694 ptid = pid_to_ptid (ptid_get_lwp (lp->ptid));
1696 if (linux_nat_prepare_to_resume != NULL)
1697 linux_nat_prepare_to_resume (lp);
1698 linux_ops->to_resume (linux_ops, ptid, step, signo);
1699 lp->stopped_by_watchpoint = 0;
1702 if (debug_linux_nat)
1703 fprintf_unfiltered (gdb_stdlog,
1704 "LLR: %s %s, %s (resume event thread)\n",
1705 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1706 target_pid_to_str (ptid),
1707 (signo != GDB_SIGNAL_0
1708 ? strsignal (gdb_signal_to_host (signo)) : "0"));
1710 if (target_can_async_p ())
1711 target_async (inferior_event_handler, 0);
1714 /* Send a signal to an LWP. */
1717 kill_lwp (int lwpid, int signo)
1719 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1720 fails, then we are not using nptl threads and we should be using kill. */
1722 #ifdef HAVE_TKILL_SYSCALL
1724 static int tkill_failed;
1731 ret = syscall (__NR_tkill, lwpid, signo);
1732 if (errno != ENOSYS)
1739 return kill (lwpid, signo);
1742 /* Handle a GNU/Linux syscall trap wait response. If we see a syscall
1743 event, check if the core is interested in it: if not, ignore the
1744 event, and keep waiting; otherwise, we need to toggle the LWP's
1745 syscall entry/exit status, since the ptrace event itself doesn't
1746 indicate it, and report the trap to higher layers. */
1749 linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
1751 struct target_waitstatus *ourstatus = &lp->waitstatus;
1752 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
1753 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
1757 /* If we're stopping threads, there's a SIGSTOP pending, which
1758 makes it so that the LWP reports an immediate syscall return,
1759 followed by the SIGSTOP. Skip seeing that "return" using
1760 PTRACE_CONT directly, and let stop_wait_callback collect the
1761 SIGSTOP. Later when the thread is resumed, a new syscall
1762 entry event. If we didn't do this (and returned 0), we'd
1763 leave a syscall entry pending, and our caller, by using
1764 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
1765 itself. Later, when the user re-resumes this LWP, we'd see
1766 another syscall entry event and we'd mistake it for a return.
1768 If stop_wait_callback didn't force the SIGSTOP out of the LWP
1769 (leaving immediately with LWP->signalled set, without issuing
1770 a PTRACE_CONT), it would still be problematic to leave this
1771 syscall enter pending, as later when the thread is resumed,
1772 it would then see the same syscall exit mentioned above,
1773 followed by the delayed SIGSTOP, while the syscall didn't
1774 actually get to execute. It seems it would be even more
1775 confusing to the user. */
1777 if (debug_linux_nat)
1778 fprintf_unfiltered (gdb_stdlog,
1779 "LHST: ignoring syscall %d "
1780 "for LWP %ld (stopping threads), "
1781 "resuming with PTRACE_CONT for SIGSTOP\n",
1783 ptid_get_lwp (lp->ptid));
1785 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1786 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
1791 if (catch_syscall_enabled ())
1793 /* Always update the entry/return state, even if this particular
1794 syscall isn't interesting to the core now. In async mode,
1795 the user could install a new catchpoint for this syscall
1796 between syscall enter/return, and we'll need to know to
1797 report a syscall return if that happens. */
1798 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1799 ? TARGET_WAITKIND_SYSCALL_RETURN
1800 : TARGET_WAITKIND_SYSCALL_ENTRY);
1802 if (catching_syscall_number (syscall_number))
1804 /* Alright, an event to report. */
1805 ourstatus->kind = lp->syscall_state;
1806 ourstatus->value.syscall_number = syscall_number;
1808 if (debug_linux_nat)
1809 fprintf_unfiltered (gdb_stdlog,
1810 "LHST: stopping for %s of syscall %d"
1813 == TARGET_WAITKIND_SYSCALL_ENTRY
1814 ? "entry" : "return",
1816 ptid_get_lwp (lp->ptid));
1820 if (debug_linux_nat)
1821 fprintf_unfiltered (gdb_stdlog,
1822 "LHST: ignoring %s of syscall %d "
1824 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1825 ? "entry" : "return",
1827 ptid_get_lwp (lp->ptid));
1831 /* If we had been syscall tracing, and hence used PT_SYSCALL
1832 before on this LWP, it could happen that the user removes all
1833 syscall catchpoints before we get to process this event.
1834 There are two noteworthy issues here:
1836 - When stopped at a syscall entry event, resuming with
1837 PT_STEP still resumes executing the syscall and reports a
1840 - Only PT_SYSCALL catches syscall enters. If we last
1841 single-stepped this thread, then this event can't be a
1842 syscall enter. If we last single-stepped this thread, this
1843 has to be a syscall exit.
1845 The points above mean that the next resume, be it PT_STEP or
1846 PT_CONTINUE, can not trigger a syscall trace event. */
1847 if (debug_linux_nat)
1848 fprintf_unfiltered (gdb_stdlog,
1849 "LHST: caught syscall event "
1850 "with no syscall catchpoints."
1851 " %d for LWP %ld, ignoring\n",
1853 ptid_get_lwp (lp->ptid));
1854 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1857 /* The core isn't interested in this event. For efficiency, avoid
1858 stopping all threads only to have the core resume them all again.
1859 Since we're not stopping threads, if we're still syscall tracing
1860 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
1861 subsequent syscall. Simply resume using the inf-ptrace layer,
1862 which knows when to use PT_SYSCALL or PT_CONTINUE. */
1864 /* Note that gdbarch_get_syscall_number may access registers, hence
1866 registers_changed ();
1867 if (linux_nat_prepare_to_resume != NULL)
1868 linux_nat_prepare_to_resume (lp);
1869 linux_ops->to_resume (linux_ops, pid_to_ptid (ptid_get_lwp (lp->ptid)),
1870 lp->step, GDB_SIGNAL_0);
1875 /* Handle a GNU/Linux extended wait response. If we see a clone
1876 event, we need to add the new LWP to our list (and not report the
1877 trap to higher layers). This function returns non-zero if the
1878 event should be ignored and we should wait again. If STOPPING is
1879 true, the new LWP remains stopped, otherwise it is continued. */
1882 linux_handle_extended_wait (struct lwp_info *lp, int status,
1885 int pid = ptid_get_lwp (lp->ptid);
1886 struct target_waitstatus *ourstatus = &lp->waitstatus;
1887 int event = linux_ptrace_get_extended_event (status);
1889 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1890 || event == PTRACE_EVENT_CLONE)
1892 unsigned long new_pid;
1895 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
1897 /* If we haven't already seen the new PID stop, wait for it now. */
1898 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1900 /* The new child has a pending SIGSTOP. We can't affect it until it
1901 hits the SIGSTOP, but we're already attached. */
1902 ret = my_waitpid (new_pid, &status,
1903 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
1905 perror_with_name (_("waiting for new child"));
1906 else if (ret != new_pid)
1907 internal_error (__FILE__, __LINE__,
1908 _("wait returned unexpected PID %d"), ret);
1909 else if (!WIFSTOPPED (status))
1910 internal_error (__FILE__, __LINE__,
1911 _("wait returned unexpected status 0x%x"), status);
1914 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
1916 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
1918 /* The arch-specific native code may need to know about new
1919 forks even if those end up never mapped to an
1921 if (linux_nat_new_fork != NULL)
1922 linux_nat_new_fork (lp, new_pid);
1925 if (event == PTRACE_EVENT_FORK
1926 && linux_fork_checkpointing_p (ptid_get_pid (lp->ptid)))
1928 /* Handle checkpointing by linux-fork.c here as a special
1929 case. We don't want the follow-fork-mode or 'catch fork'
1930 to interfere with this. */
1932 /* This won't actually modify the breakpoint list, but will
1933 physically remove the breakpoints from the child. */
1934 detach_breakpoints (ptid_build (new_pid, new_pid, 0));
1936 /* Retain child fork in ptrace (stopped) state. */
1937 if (!find_fork_pid (new_pid))
1940 /* Report as spurious, so that infrun doesn't want to follow
1941 this fork. We're actually doing an infcall in
1943 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
1945 /* Report the stop to the core. */
1949 if (event == PTRACE_EVENT_FORK)
1950 ourstatus->kind = TARGET_WAITKIND_FORKED;
1951 else if (event == PTRACE_EVENT_VFORK)
1952 ourstatus->kind = TARGET_WAITKIND_VFORKED;
1955 struct lwp_info *new_lp;
1957 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1959 if (debug_linux_nat)
1960 fprintf_unfiltered (gdb_stdlog,
1961 "LHEW: Got clone event "
1962 "from LWP %d, new child is LWP %ld\n",
1965 new_lp = add_lwp (ptid_build (ptid_get_pid (lp->ptid), new_pid, 0));
1967 new_lp->stopped = 1;
1969 if (WSTOPSIG (status) != SIGSTOP)
1971 /* This can happen if someone starts sending signals to
1972 the new thread before it gets a chance to run, which
1973 have a lower number than SIGSTOP (e.g. SIGUSR1).
1974 This is an unlikely case, and harder to handle for
1975 fork / vfork than for clone, so we do not try - but
1976 we handle it for clone events here. We'll send
1977 the other signal on to the thread below. */
1979 new_lp->signalled = 1;
1983 struct thread_info *tp;
1985 /* When we stop for an event in some other thread, and
1986 pull the thread list just as this thread has cloned,
1987 we'll have seen the new thread in the thread_db list
1988 before handling the CLONE event (glibc's
1989 pthread_create adds the new thread to the thread list
1990 before clone'ing, and has the kernel fill in the
1991 thread's tid on the clone call with
1992 CLONE_PARENT_SETTID). If that happened, and the core
1993 had requested the new thread to stop, we'll have
1994 killed it with SIGSTOP. But since SIGSTOP is not an
1995 RT signal, it can only be queued once. We need to be
1996 careful to not resume the LWP if we wanted it to
1997 stop. In that case, we'll leave the SIGSTOP pending.
1998 It will later be reported as GDB_SIGNAL_0. */
1999 tp = find_thread_ptid (new_lp->ptid);
2000 if (tp != NULL && tp->stop_requested)
2001 new_lp->last_resume_kind = resume_stop;
2008 /* Add the new thread to GDB's lists as soon as possible
2011 1) the frontend doesn't have to wait for a stop to
2014 2) we tag it with the correct running state. */
2016 /* If the thread_db layer is active, let it know about
2017 this new thread, and add it to GDB's list. */
2018 if (!thread_db_attach_lwp (new_lp->ptid))
2020 /* We're not using thread_db. Add it to GDB's
2022 target_post_attach (ptid_get_lwp (new_lp->ptid));
2023 add_thread (new_lp->ptid);
2028 set_running (new_lp->ptid, 1);
2029 set_executing (new_lp->ptid, 1);
2030 /* thread_db_attach_lwp -> lin_lwp_attach_lwp forced
2032 new_lp->last_resume_kind = resume_continue;
2038 /* We created NEW_LP so it cannot yet contain STATUS. */
2039 gdb_assert (new_lp->status == 0);
2041 /* Save the wait status to report later. */
2042 if (debug_linux_nat)
2043 fprintf_unfiltered (gdb_stdlog,
2044 "LHEW: waitpid of new LWP %ld, "
2045 "saving status %s\n",
2046 (long) ptid_get_lwp (new_lp->ptid),
2047 status_to_str (status));
2048 new_lp->status = status;
2051 /* Note the need to use the low target ops to resume, to
2052 handle resuming with PT_SYSCALL if we have syscall
2056 new_lp->resumed = 1;
2060 gdb_assert (new_lp->last_resume_kind == resume_continue);
2061 if (debug_linux_nat)
2062 fprintf_unfiltered (gdb_stdlog,
2063 "LHEW: resuming new LWP %ld\n",
2064 ptid_get_lwp (new_lp->ptid));
2065 if (linux_nat_prepare_to_resume != NULL)
2066 linux_nat_prepare_to_resume (new_lp);
2067 linux_ops->to_resume (linux_ops, pid_to_ptid (new_pid),
2069 new_lp->stopped = 0;
2073 if (debug_linux_nat)
2074 fprintf_unfiltered (gdb_stdlog,
2075 "LHEW: resuming parent LWP %d\n", pid);
2076 if (linux_nat_prepare_to_resume != NULL)
2077 linux_nat_prepare_to_resume (lp);
2078 linux_ops->to_resume (linux_ops,
2079 pid_to_ptid (ptid_get_lwp (lp->ptid)),
2088 if (event == PTRACE_EVENT_EXEC)
2090 if (debug_linux_nat)
2091 fprintf_unfiltered (gdb_stdlog,
2092 "LHEW: Got exec event from LWP %ld\n",
2093 ptid_get_lwp (lp->ptid));
2095 ourstatus->kind = TARGET_WAITKIND_EXECD;
2096 ourstatus->value.execd_pathname
2097 = xstrdup (linux_child_pid_to_exec_file (NULL, pid));
2102 if (event == PTRACE_EVENT_VFORK_DONE)
2104 if (current_inferior ()->waiting_for_vfork_done)
2106 if (debug_linux_nat)
2107 fprintf_unfiltered (gdb_stdlog,
2108 "LHEW: Got expected PTRACE_EVENT_"
2109 "VFORK_DONE from LWP %ld: stopping\n",
2110 ptid_get_lwp (lp->ptid));
2112 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2116 if (debug_linux_nat)
2117 fprintf_unfiltered (gdb_stdlog,
2118 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
2119 "from LWP %ld: resuming\n",
2120 ptid_get_lwp (lp->ptid));
2121 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
2125 internal_error (__FILE__, __LINE__,
2126 _("unknown ptrace event %d"), event);
2129 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2133 wait_lwp (struct lwp_info *lp)
2137 int thread_dead = 0;
2140 gdb_assert (!lp->stopped);
2141 gdb_assert (lp->status == 0);
2143 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2144 block_child_signals (&prev_mask);
2148 /* If my_waitpid returns 0 it means the __WCLONE vs. non-__WCLONE kind
2149 was right and we should just call sigsuspend. */
2151 pid = my_waitpid (ptid_get_lwp (lp->ptid), &status, WNOHANG);
2152 if (pid == -1 && errno == ECHILD)
2153 pid = my_waitpid (ptid_get_lwp (lp->ptid), &status, __WCLONE | WNOHANG);
2154 if (pid == -1 && errno == ECHILD)
2156 /* The thread has previously exited. We need to delete it
2157 now because, for some vendor 2.4 kernels with NPTL
2158 support backported, there won't be an exit event unless
2159 it is the main thread. 2.6 kernels will report an exit
2160 event for each thread that exits, as expected. */
2162 if (debug_linux_nat)
2163 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2164 target_pid_to_str (lp->ptid));
2169 /* Bugs 10970, 12702.
2170 Thread group leader may have exited in which case we'll lock up in
2171 waitpid if there are other threads, even if they are all zombies too.
2172 Basically, we're not supposed to use waitpid this way.
2173 __WCLONE is not applicable for the leader so we can't use that.
2174 LINUX_NAT_THREAD_ALIVE cannot be used here as it requires a STOPPED
2175 process; it gets ESRCH both for the zombie and for running processes.
2177 As a workaround, check if we're waiting for the thread group leader and
2178 if it's a zombie, and avoid calling waitpid if it is.
2180 This is racy, what if the tgl becomes a zombie right after we check?
2181 Therefore always use WNOHANG with sigsuspend - it is equivalent to
2182 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
2184 if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid)
2185 && linux_proc_pid_is_zombie (ptid_get_lwp (lp->ptid)))
2188 if (debug_linux_nat)
2189 fprintf_unfiltered (gdb_stdlog,
2190 "WL: Thread group leader %s vanished.\n",
2191 target_pid_to_str (lp->ptid));
2195 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2196 get invoked despite our caller had them intentionally blocked by
2197 block_child_signals. This is sensitive only to the loop of
2198 linux_nat_wait_1 and there if we get called my_waitpid gets called
2199 again before it gets to sigsuspend so we can safely let the handlers
2200 get executed here. */
2202 if (debug_linux_nat)
2203 fprintf_unfiltered (gdb_stdlog, "WL: about to sigsuspend\n");
2204 sigsuspend (&suspend_mask);
2207 restore_child_signals_mask (&prev_mask);
2211 gdb_assert (pid == ptid_get_lwp (lp->ptid));
2213 if (debug_linux_nat)
2215 fprintf_unfiltered (gdb_stdlog,
2216 "WL: waitpid %s received %s\n",
2217 target_pid_to_str (lp->ptid),
2218 status_to_str (status));
2221 /* Check if the thread has exited. */
2222 if (WIFEXITED (status) || WIFSIGNALED (status))
2225 if (debug_linux_nat)
2226 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2227 target_pid_to_str (lp->ptid));
2237 gdb_assert (WIFSTOPPED (status));
2240 if (lp->must_set_ptrace_flags)
2242 struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
2244 linux_enable_event_reporting (ptid_get_lwp (lp->ptid), inf->attach_flag);
2245 lp->must_set_ptrace_flags = 0;
2248 /* Handle GNU/Linux's syscall SIGTRAPs. */
2249 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2251 /* No longer need the sysgood bit. The ptrace event ends up
2252 recorded in lp->waitstatus if we care for it. We can carry
2253 on handling the event like a regular SIGTRAP from here
2255 status = W_STOPCODE (SIGTRAP);
2256 if (linux_handle_syscall_trap (lp, 1))
2257 return wait_lwp (lp);
2260 /* Handle GNU/Linux's extended waitstatus for trace events. */
2261 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2262 && linux_is_extended_waitstatus (status))
2264 if (debug_linux_nat)
2265 fprintf_unfiltered (gdb_stdlog,
2266 "WL: Handling extended status 0x%06x\n",
2268 if (linux_handle_extended_wait (lp, status, 1))
2269 return wait_lwp (lp);
2275 /* Send a SIGSTOP to LP. */
2278 stop_callback (struct lwp_info *lp, void *data)
2280 if (!lp->stopped && !lp->signalled)
2284 if (debug_linux_nat)
2286 fprintf_unfiltered (gdb_stdlog,
2287 "SC: kill %s **<SIGSTOP>**\n",
2288 target_pid_to_str (lp->ptid));
2291 ret = kill_lwp (ptid_get_lwp (lp->ptid), SIGSTOP);
2292 if (debug_linux_nat)
2294 fprintf_unfiltered (gdb_stdlog,
2295 "SC: lwp kill %d %s\n",
2297 errno ? safe_strerror (errno) : "ERRNO-OK");
2301 gdb_assert (lp->status == 0);
2307 /* Request a stop on LWP. */
2310 linux_stop_lwp (struct lwp_info *lwp)
2312 stop_callback (lwp, NULL);
2315 /* Return non-zero if LWP PID has a pending SIGINT. */
2318 linux_nat_has_pending_sigint (int pid)
2320 sigset_t pending, blocked, ignored;
2322 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2324 if (sigismember (&pending, SIGINT)
2325 && !sigismember (&ignored, SIGINT))
2331 /* Set a flag in LP indicating that we should ignore its next SIGINT. */
2334 set_ignore_sigint (struct lwp_info *lp, void *data)
2336 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2337 flag to consume the next one. */
2338 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2339 && WSTOPSIG (lp->status) == SIGINT)
2342 lp->ignore_sigint = 1;
2347 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2348 This function is called after we know the LWP has stopped; if the LWP
2349 stopped before the expected SIGINT was delivered, then it will never have
2350 arrived. Also, if the signal was delivered to a shared queue and consumed
2351 by a different thread, it will never be delivered to this LWP. */
2354 maybe_clear_ignore_sigint (struct lwp_info *lp)
2356 if (!lp->ignore_sigint)
2359 if (!linux_nat_has_pending_sigint (ptid_get_lwp (lp->ptid)))
2361 if (debug_linux_nat)
2362 fprintf_unfiltered (gdb_stdlog,
2363 "MCIS: Clearing bogus flag for %s\n",
2364 target_pid_to_str (lp->ptid));
2365 lp->ignore_sigint = 0;
2369 /* Fetch the possible triggered data watchpoint info and store it in
2372 On some archs, like x86, that use debug registers to set
2373 watchpoints, it's possible that the way to know which watched
2374 address trapped, is to check the register that is used to select
2375 which address to watch. Problem is, between setting the watchpoint
2376 and reading back which data address trapped, the user may change
2377 the set of watchpoints, and, as a consequence, GDB changes the
2378 debug registers in the inferior. To avoid reading back a stale
2379 stopped-data-address when that happens, we cache in LP the fact
2380 that a watchpoint trapped, and the corresponding data address, as
2381 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2382 registers meanwhile, we have the cached data we can rely on. */
2385 save_sigtrap (struct lwp_info *lp)
2387 struct cleanup *old_chain;
2389 if (linux_ops->to_stopped_by_watchpoint == NULL)
2391 lp->stopped_by_watchpoint = 0;
2395 old_chain = save_inferior_ptid ();
2396 inferior_ptid = lp->ptid;
2398 lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint (linux_ops);
2400 if (lp->stopped_by_watchpoint)
2402 if (linux_ops->to_stopped_data_address != NULL)
2403 lp->stopped_data_address_p =
2404 linux_ops->to_stopped_data_address (¤t_target,
2405 &lp->stopped_data_address);
2407 lp->stopped_data_address_p = 0;
2410 do_cleanups (old_chain);
2413 /* See save_sigtrap. */
2416 linux_nat_stopped_by_watchpoint (struct target_ops *ops)
2418 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2420 gdb_assert (lp != NULL);
2422 return lp->stopped_by_watchpoint;
2426 linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2428 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2430 gdb_assert (lp != NULL);
2432 *addr_p = lp->stopped_data_address;
2434 return lp->stopped_data_address_p;
2437 /* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2440 sigtrap_is_event (int status)
2442 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2445 /* SIGTRAP-like events recognizer. */
2447 static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
2449 /* Check for SIGTRAP-like events in LP. */
2452 linux_nat_lp_status_is_event (struct lwp_info *lp)
2454 /* We check for lp->waitstatus in addition to lp->status, because we can
2455 have pending process exits recorded in lp->status
2456 and W_EXITCODE(0,0) == 0. We should probably have an additional
2457 lp->status_p flag. */
2459 return (lp->waitstatus.kind == TARGET_WAITKIND_IGNORE
2460 && linux_nat_status_is_event (lp->status));
2463 /* Set alternative SIGTRAP-like events recognizer. If
2464 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2468 linux_nat_set_status_is_event (struct target_ops *t,
2469 int (*status_is_event) (int status))
2471 linux_nat_status_is_event = status_is_event;
2474 /* Wait until LP is stopped. */
2477 stop_wait_callback (struct lwp_info *lp, void *data)
2479 struct inferior *inf = find_inferior_ptid (lp->ptid);
2481 /* If this is a vfork parent, bail out, it is not going to report
2482 any SIGSTOP until the vfork is done with. */
2483 if (inf->vfork_child != NULL)
2490 status = wait_lwp (lp);
2494 if (lp->ignore_sigint && WIFSTOPPED (status)
2495 && WSTOPSIG (status) == SIGINT)
2497 lp->ignore_sigint = 0;
2500 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
2502 if (debug_linux_nat)
2503 fprintf_unfiltered (gdb_stdlog,
2504 "PTRACE_CONT %s, 0, 0 (%s) "
2505 "(discarding SIGINT)\n",
2506 target_pid_to_str (lp->ptid),
2507 errno ? safe_strerror (errno) : "OK");
2509 return stop_wait_callback (lp, NULL);
2512 maybe_clear_ignore_sigint (lp);
2514 if (WSTOPSIG (status) != SIGSTOP)
2516 /* The thread was stopped with a signal other than SIGSTOP. */
2520 if (debug_linux_nat)
2521 fprintf_unfiltered (gdb_stdlog,
2522 "SWC: Pending event %s in %s\n",
2523 status_to_str ((int) status),
2524 target_pid_to_str (lp->ptid));
2526 /* Save the sigtrap event. */
2527 lp->status = status;
2528 gdb_assert (lp->signalled);
2532 /* We caught the SIGSTOP that we intended to catch, so
2533 there's no SIGSTOP pending. */
2535 if (debug_linux_nat)
2536 fprintf_unfiltered (gdb_stdlog,
2537 "SWC: Delayed SIGSTOP caught for %s.\n",
2538 target_pid_to_str (lp->ptid));
2540 /* Reset SIGNALLED only after the stop_wait_callback call
2541 above as it does gdb_assert on SIGNALLED. */
2549 /* Return non-zero if LP has a wait status pending. */
2552 status_callback (struct lwp_info *lp, void *data)
2554 /* Only report a pending wait status if we pretend that this has
2555 indeed been resumed. */
2559 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2561 /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event,
2562 or a pending process exit. Note that `W_EXITCODE(0,0) ==
2563 0', so a clean process exit can not be stored pending in
2564 lp->status, it is indistinguishable from
2565 no-pending-status. */
2569 if (lp->status != 0)
2575 /* Return non-zero if LP isn't stopped. */
2578 running_callback (struct lwp_info *lp, void *data)
2580 return (!lp->stopped
2581 || ((lp->status != 0
2582 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2586 /* Count the LWP's that have had events. */
2589 count_events_callback (struct lwp_info *lp, void *data)
2593 gdb_assert (count != NULL);
2595 /* Count only resumed LWPs that have a SIGTRAP event pending. */
2596 if (lp->resumed && linux_nat_lp_status_is_event (lp))
2602 /* Select the LWP (if any) that is currently being single-stepped. */
2605 select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2607 if (lp->last_resume_kind == resume_step
2614 /* Select the Nth LWP that has had a SIGTRAP event. */
2617 select_event_lwp_callback (struct lwp_info *lp, void *data)
2619 int *selector = data;
2621 gdb_assert (selector != NULL);
2623 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2624 if (lp->resumed && linux_nat_lp_status_is_event (lp))
2625 if ((*selector)-- == 0)
2632 cancel_breakpoint (struct lwp_info *lp)
2634 /* Arrange for a breakpoint to be hit again later. We don't keep
2635 the SIGTRAP status and don't forward the SIGTRAP signal to the
2636 LWP. We will handle the current event, eventually we will resume
2637 this LWP, and this breakpoint will trap again.
2639 If we do not do this, then we run the risk that the user will
2640 delete or disable the breakpoint, but the LWP will have already
2643 struct regcache *regcache = get_thread_regcache (lp->ptid);
2644 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2647 pc = regcache_read_pc (regcache) - target_decr_pc_after_break (gdbarch);
2648 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
2650 if (debug_linux_nat)
2651 fprintf_unfiltered (gdb_stdlog,
2652 "CB: Push back breakpoint for %s\n",
2653 target_pid_to_str (lp->ptid));
2655 /* Back up the PC if necessary. */
2656 if (target_decr_pc_after_break (gdbarch))
2657 regcache_write_pc (regcache, pc);
2665 cancel_breakpoints_callback (struct lwp_info *lp, void *data)
2667 struct lwp_info *event_lp = data;
2669 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2673 /* If a LWP other than the LWP that we're reporting an event for has
2674 hit a GDB breakpoint (as opposed to some random trap signal),
2675 then just arrange for it to hit it again later. We don't keep
2676 the SIGTRAP status and don't forward the SIGTRAP signal to the
2677 LWP. We will handle the current event, eventually we will resume
2678 all LWPs, and this one will get its breakpoint trap again.
2680 If we do not do this, then we run the risk that the user will
2681 delete or disable the breakpoint, but the LWP will have already
2684 if (linux_nat_lp_status_is_event (lp)
2685 && cancel_breakpoint (lp))
2686 /* Throw away the SIGTRAP. */
2692 /* Select one LWP out of those that have events pending. */
2695 select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
2698 int random_selector;
2699 struct lwp_info *event_lp;
2701 /* Record the wait status for the original LWP. */
2702 (*orig_lp)->status = *status;
2704 /* Give preference to any LWP that is being single-stepped. */
2705 event_lp = iterate_over_lwps (filter,
2706 select_singlestep_lwp_callback, NULL);
2707 if (event_lp != NULL)
2709 if (debug_linux_nat)
2710 fprintf_unfiltered (gdb_stdlog,
2711 "SEL: Select single-step %s\n",
2712 target_pid_to_str (event_lp->ptid));
2716 /* No single-stepping LWP. Select one at random, out of those
2717 which have had SIGTRAP events. */
2719 /* First see how many SIGTRAP events we have. */
2720 iterate_over_lwps (filter, count_events_callback, &num_events);
2722 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2723 random_selector = (int)
2724 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2726 if (debug_linux_nat && num_events > 1)
2727 fprintf_unfiltered (gdb_stdlog,
2728 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2729 num_events, random_selector);
2731 event_lp = iterate_over_lwps (filter,
2732 select_event_lwp_callback,
2736 if (event_lp != NULL)
2738 /* Switch the event LWP. */
2739 *orig_lp = event_lp;
2740 *status = event_lp->status;
2743 /* Flush the wait status for the event LWP. */
2744 (*orig_lp)->status = 0;
2747 /* Return non-zero if LP has been resumed. */
2750 resumed_callback (struct lwp_info *lp, void *data)
2755 /* Stop an active thread, verify it still exists, then resume it. If
2756 the thread ends up with a pending status, then it is not resumed,
2757 and *DATA (really a pointer to int), is set. */
2760 stop_and_resume_callback (struct lwp_info *lp, void *data)
2762 int *new_pending_p = data;
2766 ptid_t ptid = lp->ptid;
2768 stop_callback (lp, NULL);
2769 stop_wait_callback (lp, NULL);
2771 /* Resume if the lwp still exists, and the core wanted it
2773 lp = find_lwp_pid (ptid);
2776 if (lp->last_resume_kind == resume_stop
2779 /* The core wanted the LWP to stop. Even if it stopped
2780 cleanly (with SIGSTOP), leave the event pending. */
2781 if (debug_linux_nat)
2782 fprintf_unfiltered (gdb_stdlog,
2783 "SARC: core wanted LWP %ld stopped "
2784 "(leaving SIGSTOP pending)\n",
2785 ptid_get_lwp (lp->ptid));
2786 lp->status = W_STOPCODE (SIGSTOP);
2789 if (lp->status == 0)
2791 if (debug_linux_nat)
2792 fprintf_unfiltered (gdb_stdlog,
2793 "SARC: re-resuming LWP %ld\n",
2794 ptid_get_lwp (lp->ptid));
2795 resume_lwp (lp, lp->step, GDB_SIGNAL_0);
2799 if (debug_linux_nat)
2800 fprintf_unfiltered (gdb_stdlog,
2801 "SARC: not re-resuming LWP %ld "
2803 ptid_get_lwp (lp->ptid));
2812 /* Check if we should go on and pass this event to common code.
2813 Return the affected lwp if we are, or NULL otherwise. If we stop
2814 all lwps temporarily, we may end up with new pending events in some
2815 other lwp. In that case set *NEW_PENDING_P to true. */
2817 static struct lwp_info *
2818 linux_nat_filter_event (int lwpid, int status, int *new_pending_p)
2820 struct lwp_info *lp;
2821 int event = linux_ptrace_get_extended_event (status);
2825 lp = find_lwp_pid (pid_to_ptid (lwpid));
2827 /* Check for stop events reported by a process we didn't already
2828 know about - anything not already in our LWP list.
2830 If we're expecting to receive stopped processes after
2831 fork, vfork, and clone events, then we'll just add the
2832 new one to our list and go back to waiting for the event
2833 to be reported - the stopped process might be returned
2834 from waitpid before or after the event is.
2836 But note the case of a non-leader thread exec'ing after the
2837 leader having exited, and gone from our lists. The non-leader
2838 thread changes its tid to the tgid. */
2840 if (WIFSTOPPED (status) && lp == NULL
2841 && (WSTOPSIG (status) == SIGTRAP && event == PTRACE_EVENT_EXEC))
2843 /* A multi-thread exec after we had seen the leader exiting. */
2844 if (debug_linux_nat)
2845 fprintf_unfiltered (gdb_stdlog,
2846 "LLW: Re-adding thread group leader LWP %d.\n",
2849 lp = add_lwp (ptid_build (lwpid, lwpid, 0));
2852 add_thread (lp->ptid);
2855 if (WIFSTOPPED (status) && !lp)
2857 add_to_pid_list (&stopped_pids, lwpid, status);
2861 /* Make sure we don't report an event for the exit of an LWP not in
2862 our list, i.e. not part of the current process. This can happen
2863 if we detach from a program we originally forked and then it
2865 if (!WIFSTOPPED (status) && !lp)
2868 /* This LWP is stopped now. (And if dead, this prevents it from
2869 ever being continued.) */
2872 if (WIFSTOPPED (status) && lp->must_set_ptrace_flags)
2874 struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
2876 linux_enable_event_reporting (ptid_get_lwp (lp->ptid), inf->attach_flag);
2877 lp->must_set_ptrace_flags = 0;
2880 /* Handle GNU/Linux's syscall SIGTRAPs. */
2881 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2883 /* No longer need the sysgood bit. The ptrace event ends up
2884 recorded in lp->waitstatus if we care for it. We can carry
2885 on handling the event like a regular SIGTRAP from here
2887 status = W_STOPCODE (SIGTRAP);
2888 if (linux_handle_syscall_trap (lp, 0))
2892 /* Handle GNU/Linux's extended waitstatus for trace events. */
2893 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2894 && linux_is_extended_waitstatus (status))
2896 if (debug_linux_nat)
2897 fprintf_unfiltered (gdb_stdlog,
2898 "LLW: Handling extended status 0x%06x\n",
2900 if (linux_handle_extended_wait (lp, status, 0))
2904 if (linux_nat_status_is_event (status))
2907 /* Check if the thread has exited. */
2908 if ((WIFEXITED (status) || WIFSIGNALED (status))
2909 && num_lwps (ptid_get_pid (lp->ptid)) > 1)
2911 /* If this is the main thread, we must stop all threads and verify
2912 if they are still alive. This is because in the nptl thread model
2913 on Linux 2.4, there is no signal issued for exiting LWPs
2914 other than the main thread. We only get the main thread exit
2915 signal once all child threads have already exited. If we
2916 stop all the threads and use the stop_wait_callback to check
2917 if they have exited we can determine whether this signal
2918 should be ignored or whether it means the end of the debugged
2919 application, regardless of which threading model is being
2921 if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid))
2923 iterate_over_lwps (pid_to_ptid (ptid_get_pid (lp->ptid)),
2924 stop_and_resume_callback, new_pending_p);
2927 if (debug_linux_nat)
2928 fprintf_unfiltered (gdb_stdlog,
2929 "LLW: %s exited.\n",
2930 target_pid_to_str (lp->ptid));
2932 if (num_lwps (ptid_get_pid (lp->ptid)) > 1)
2934 /* If there is at least one more LWP, then the exit signal
2935 was not the end of the debugged application and should be
2942 /* Check if the current LWP has previously exited. In the nptl
2943 thread model, LWPs other than the main thread do not issue
2944 signals when they exit so we must check whenever the thread has
2945 stopped. A similar check is made in stop_wait_callback(). */
2946 if (num_lwps (ptid_get_pid (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
2948 ptid_t ptid = pid_to_ptid (ptid_get_pid (lp->ptid));
2950 if (debug_linux_nat)
2951 fprintf_unfiltered (gdb_stdlog,
2952 "LLW: %s exited.\n",
2953 target_pid_to_str (lp->ptid));
2957 /* Make sure there is at least one thread running. */
2958 gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
2960 /* Discard the event. */
2964 /* Make sure we don't report a SIGSTOP that we sent ourselves in
2965 an attempt to stop an LWP. */
2967 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2969 if (debug_linux_nat)
2970 fprintf_unfiltered (gdb_stdlog,
2971 "LLW: Delayed SIGSTOP caught for %s.\n",
2972 target_pid_to_str (lp->ptid));
2976 if (lp->last_resume_kind != resume_stop)
2978 /* This is a delayed SIGSTOP. */
2980 registers_changed ();
2982 if (linux_nat_prepare_to_resume != NULL)
2983 linux_nat_prepare_to_resume (lp);
2984 linux_ops->to_resume (linux_ops,
2985 pid_to_ptid (ptid_get_lwp (lp->ptid)),
2986 lp->step, GDB_SIGNAL_0);
2987 if (debug_linux_nat)
2988 fprintf_unfiltered (gdb_stdlog,
2989 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
2991 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2992 target_pid_to_str (lp->ptid));
2995 gdb_assert (lp->resumed);
2997 /* Discard the event. */
3002 /* Make sure we don't report a SIGINT that we have already displayed
3003 for another thread. */
3004 if (lp->ignore_sigint
3005 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3007 if (debug_linux_nat)
3008 fprintf_unfiltered (gdb_stdlog,
3009 "LLW: Delayed SIGINT caught for %s.\n",
3010 target_pid_to_str (lp->ptid));
3012 /* This is a delayed SIGINT. */
3013 lp->ignore_sigint = 0;
3015 registers_changed ();
3016 if (linux_nat_prepare_to_resume != NULL)
3017 linux_nat_prepare_to_resume (lp);
3018 linux_ops->to_resume (linux_ops, pid_to_ptid (ptid_get_lwp (lp->ptid)),
3019 lp->step, GDB_SIGNAL_0);
3020 if (debug_linux_nat)
3021 fprintf_unfiltered (gdb_stdlog,
3022 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3024 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3025 target_pid_to_str (lp->ptid));
3028 gdb_assert (lp->resumed);
3030 /* Discard the event. */
3034 /* An interesting event. */
3036 lp->status = status;
3040 /* Detect zombie thread group leaders, and "exit" them. We can't reap
3041 their exits until all other threads in the group have exited. */
3044 check_zombie_leaders (void)
3046 struct inferior *inf;
3050 struct lwp_info *leader_lp;
3055 leader_lp = find_lwp_pid (pid_to_ptid (inf->pid));
3056 if (leader_lp != NULL
3057 /* Check if there are other threads in the group, as we may
3058 have raced with the inferior simply exiting. */
3059 && num_lwps (inf->pid) > 1
3060 && linux_proc_pid_is_zombie (inf->pid))
3062 if (debug_linux_nat)
3063 fprintf_unfiltered (gdb_stdlog,
3064 "CZL: Thread group leader %d zombie "
3065 "(it exited, or another thread execd).\n",
3068 /* A leader zombie can mean one of two things:
3070 - It exited, and there's an exit status pending
3071 available, or only the leader exited (not the whole
3072 program). In the latter case, we can't waitpid the
3073 leader's exit status until all other threads are gone.
3075 - There are 3 or more threads in the group, and a thread
3076 other than the leader exec'd. On an exec, the Linux
3077 kernel destroys all other threads (except the execing
3078 one) in the thread group, and resets the execing thread's
3079 tid to the tgid. No exit notification is sent for the
3080 execing thread -- from the ptracer's perspective, it
3081 appears as though the execing thread just vanishes.
3082 Until we reap all other threads except the leader and the
3083 execing thread, the leader will be zombie, and the
3084 execing thread will be in `D (disc sleep)'. As soon as
3085 all other threads are reaped, the execing thread changes
3086 it's tid to the tgid, and the previous (zombie) leader
3087 vanishes, giving place to the "new" leader. We could try
3088 distinguishing the exit and exec cases, by waiting once
3089 more, and seeing if something comes out, but it doesn't
3090 sound useful. The previous leader _does_ go away, and
3091 we'll re-add the new one once we see the exec event
3092 (which is just the same as what would happen if the
3093 previous leader did exit voluntarily before some other
3096 if (debug_linux_nat)
3097 fprintf_unfiltered (gdb_stdlog,
3098 "CZL: Thread group leader %d vanished.\n",
3100 exit_lwp (leader_lp);
3106 linux_nat_wait_1 (struct target_ops *ops,
3107 ptid_t ptid, struct target_waitstatus *ourstatus,
3111 enum resume_kind last_resume_kind;
3112 struct lwp_info *lp;
3115 if (debug_linux_nat)
3116 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3118 /* The first time we get here after starting a new inferior, we may
3119 not have added it to the LWP list yet - this is the earliest
3120 moment at which we know its PID. */
3121 if (ptid_is_pid (inferior_ptid))
3123 /* Upgrade the main thread's ptid. */
3124 thread_change_ptid (inferior_ptid,
3125 ptid_build (ptid_get_pid (inferior_ptid),
3126 ptid_get_pid (inferior_ptid), 0));
3128 lp = add_initial_lwp (inferior_ptid);
3132 /* Make sure SIGCHLD is blocked until the sigsuspend below. */
3133 block_child_signals (&prev_mask);
3139 /* First check if there is a LWP with a wait status pending. */
3140 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3142 /* Any LWP in the PTID group that's been resumed will do. */
3143 lp = iterate_over_lwps (ptid, status_callback, NULL);
3146 if (debug_linux_nat && lp->status)
3147 fprintf_unfiltered (gdb_stdlog,
3148 "LLW: Using pending wait status %s for %s.\n",
3149 status_to_str (lp->status),
3150 target_pid_to_str (lp->ptid));
3153 else if (ptid_lwp_p (ptid))
3155 if (debug_linux_nat)
3156 fprintf_unfiltered (gdb_stdlog,
3157 "LLW: Waiting for specific LWP %s.\n",
3158 target_pid_to_str (ptid));
3160 /* We have a specific LWP to check. */
3161 lp = find_lwp_pid (ptid);
3164 if (debug_linux_nat && lp->status)
3165 fprintf_unfiltered (gdb_stdlog,
3166 "LLW: Using pending wait status %s for %s.\n",
3167 status_to_str (lp->status),
3168 target_pid_to_str (lp->ptid));
3170 /* We check for lp->waitstatus in addition to lp->status,
3171 because we can have pending process exits recorded in
3172 lp->status and W_EXITCODE(0,0) == 0. We should probably have
3173 an additional lp->status_p flag. */
3174 if (lp->status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3178 if (!target_can_async_p ())
3180 /* Causes SIGINT to be passed on to the attached process. */
3184 /* But if we don't find a pending event, we'll have to wait. */
3190 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3193 - If the thread group leader exits while other threads in the
3194 thread group still exist, waitpid(TGID, ...) hangs. That
3195 waitpid won't return an exit status until the other threads
3196 in the group are reapped.
3198 - When a non-leader thread execs, that thread just vanishes
3199 without reporting an exit (so we'd hang if we waited for it
3200 explicitly in that case). The exec event is reported to
3204 lwpid = my_waitpid (-1, &status, __WCLONE | WNOHANG);
3205 if (lwpid == 0 || (lwpid == -1 && errno == ECHILD))
3206 lwpid = my_waitpid (-1, &status, WNOHANG);
3208 if (debug_linux_nat)
3209 fprintf_unfiltered (gdb_stdlog,
3210 "LNW: waitpid(-1, ...) returned %d, %s\n",
3211 lwpid, errno ? safe_strerror (errno) : "ERRNO-OK");
3215 /* If this is true, then we paused LWPs momentarily, and may
3216 now have pending events to handle. */
3219 if (debug_linux_nat)
3221 fprintf_unfiltered (gdb_stdlog,
3222 "LLW: waitpid %ld received %s\n",
3223 (long) lwpid, status_to_str (status));
3226 lp = linux_nat_filter_event (lwpid, status, &new_pending);
3228 /* STATUS is now no longer valid, use LP->STATUS instead. */
3231 if (lp && !ptid_match (lp->ptid, ptid))
3233 gdb_assert (lp->resumed);
3235 if (debug_linux_nat)
3236 fprintf_unfiltered (gdb_stdlog,
3237 "LWP %ld got an event %06x, "
3238 "leaving pending.\n",
3239 ptid_get_lwp (lp->ptid), lp->status);
3241 if (WIFSTOPPED (lp->status))
3243 if (WSTOPSIG (lp->status) != SIGSTOP)
3245 /* Cancel breakpoint hits. The breakpoint may
3246 be removed before we fetch events from this
3247 process to report to the core. It is best
3248 not to assume the moribund breakpoints
3249 heuristic always handles these cases --- it
3250 could be too many events go through to the
3251 core before this one is handled. All-stop
3252 always cancels breakpoint hits in all
3255 && linux_nat_lp_status_is_event (lp)
3256 && cancel_breakpoint (lp))
3258 /* Throw away the SIGTRAP. */
3261 if (debug_linux_nat)
3262 fprintf_unfiltered (gdb_stdlog,
3263 "LLW: LWP %ld hit a "
3265 "waiting for another "
3268 ptid_get_lwp (lp->ptid));
3274 else if (WIFEXITED (lp->status) || WIFSIGNALED (lp->status))
3276 if (debug_linux_nat)
3277 fprintf_unfiltered (gdb_stdlog,
3278 "Process %ld exited while stopping "
3280 ptid_get_lwp (lp->ptid));
3282 /* This was the last lwp in the process. Since
3283 events are serialized to GDB core, and we can't
3284 report this one right now, but GDB core and the
3285 other target layers will want to be notified
3286 about the exit code/signal, leave the status
3287 pending for the next time we're able to report
3290 /* Dead LWP's aren't expected to reported a pending
3294 /* Store the pending event in the waitstatus as
3295 well, because W_EXITCODE(0,0) == 0. */
3296 store_waitstatus (&lp->waitstatus, lp->status);
3305 /* Some LWP now has a pending event. Go all the way
3306 back to check it. */
3312 /* We got an event to report to the core. */
3316 /* Retry until nothing comes out of waitpid. A single
3317 SIGCHLD can indicate more than one child stopped. */
3321 /* Check for zombie thread group leaders. Those can't be reaped
3322 until all other threads in the thread group are. */
3323 check_zombie_leaders ();
3325 /* If there are no resumed children left, bail. We'd be stuck
3326 forever in the sigsuspend call below otherwise. */
3327 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3329 if (debug_linux_nat)
3330 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
3332 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3334 if (!target_can_async_p ())
3335 clear_sigint_trap ();
3337 restore_child_signals_mask (&prev_mask);
3338 return minus_one_ptid;
3341 /* No interesting event to report to the core. */
3343 if (target_options & TARGET_WNOHANG)
3345 if (debug_linux_nat)
3346 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3348 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3349 restore_child_signals_mask (&prev_mask);
3350 return minus_one_ptid;
3353 /* We shouldn't end up here unless we want to try again. */
3354 gdb_assert (lp == NULL);
3356 /* Block until we get an event reported with SIGCHLD. */
3357 if (debug_linux_nat)
3358 fprintf_unfiltered (gdb_stdlog, "LNW: about to sigsuspend\n");
3359 sigsuspend (&suspend_mask);
3362 if (!target_can_async_p ())
3363 clear_sigint_trap ();
3367 status = lp->status;
3370 /* Don't report signals that GDB isn't interested in, such as
3371 signals that are neither printed nor stopped upon. Stopping all
3372 threads can be a bit time-consuming so if we want decent
3373 performance with heavily multi-threaded programs, especially when
3374 they're using a high frequency timer, we'd better avoid it if we
3377 if (WIFSTOPPED (status))
3379 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
3381 /* When using hardware single-step, we need to report every signal.
3382 Otherwise, signals in pass_mask may be short-circuited. */
3384 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status)))
3386 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
3387 here? It is not clear we should. GDB may not expect
3388 other threads to run. On the other hand, not resuming
3389 newly attached threads may cause an unwanted delay in
3390 getting them running. */
3391 registers_changed ();
3392 if (linux_nat_prepare_to_resume != NULL)
3393 linux_nat_prepare_to_resume (lp);
3394 linux_ops->to_resume (linux_ops,
3395 pid_to_ptid (ptid_get_lwp (lp->ptid)),
3397 if (debug_linux_nat)
3398 fprintf_unfiltered (gdb_stdlog,
3399 "LLW: %s %s, %s (preempt 'handle')\n",
3401 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3402 target_pid_to_str (lp->ptid),
3403 (signo != GDB_SIGNAL_0
3404 ? strsignal (gdb_signal_to_host (signo))
3412 /* Only do the below in all-stop, as we currently use SIGINT
3413 to implement target_stop (see linux_nat_stop) in
3415 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
3417 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3418 forwarded to the entire process group, that is, all LWPs
3419 will receive it - unless they're using CLONE_THREAD to
3420 share signals. Since we only want to report it once, we
3421 mark it as ignored for all LWPs except this one. */
3422 iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid)),
3423 set_ignore_sigint, NULL);
3424 lp->ignore_sigint = 0;
3427 maybe_clear_ignore_sigint (lp);
3431 /* This LWP is stopped now. */
3434 if (debug_linux_nat)
3435 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3436 status_to_str (status), target_pid_to_str (lp->ptid));
3440 /* Now stop all other LWP's ... */
3441 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
3443 /* ... and wait until all of them have reported back that
3444 they're no longer running. */
3445 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
3447 /* If we're not waiting for a specific LWP, choose an event LWP
3448 from among those that have had events. Giving equal priority
3449 to all LWPs that have had events helps prevent
3451 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3452 select_event_lwp (ptid, &lp, &status);
3454 /* Now that we've selected our final event LWP, cancel any
3455 breakpoints in other LWPs that have hit a GDB breakpoint.
3456 See the comment in cancel_breakpoints_callback to find out
3458 iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
3460 /* We'll need this to determine whether to report a SIGSTOP as
3461 TARGET_WAITKIND_0. Need to take a copy because
3462 resume_clear_callback clears it. */
3463 last_resume_kind = lp->last_resume_kind;
3465 /* In all-stop, from the core's perspective, all LWPs are now
3466 stopped until a new resume action is sent over. */
3467 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3472 last_resume_kind = lp->last_resume_kind;
3473 resume_clear_callback (lp, NULL);
3476 if (linux_nat_status_is_event (status))
3478 if (debug_linux_nat)
3479 fprintf_unfiltered (gdb_stdlog,
3480 "LLW: trap ptid is %s.\n",
3481 target_pid_to_str (lp->ptid));
3484 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3486 *ourstatus = lp->waitstatus;
3487 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3490 store_waitstatus (ourstatus, status);
3492 if (debug_linux_nat)
3493 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3495 restore_child_signals_mask (&prev_mask);
3497 if (last_resume_kind == resume_stop
3498 && ourstatus->kind == TARGET_WAITKIND_STOPPED
3499 && WSTOPSIG (status) == SIGSTOP)
3501 /* A thread that has been requested to stop by GDB with
3502 target_stop, and it stopped cleanly, so report as SIG0. The
3503 use of SIGSTOP is an implementation detail. */
3504 ourstatus->value.sig = GDB_SIGNAL_0;
3507 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3508 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3511 lp->core = linux_common_core_of_thread (lp->ptid);
3516 /* Resume LWPs that are currently stopped without any pending status
3517 to report, but are resumed from the core's perspective. */
3520 resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3522 ptid_t *wait_ptid_p = data;
3527 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3529 struct regcache *regcache = get_thread_regcache (lp->ptid);
3530 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3531 CORE_ADDR pc = regcache_read_pc (regcache);
3533 gdb_assert (is_executing (lp->ptid));
3535 /* Don't bother if there's a breakpoint at PC that we'd hit
3536 immediately, and we're not waiting for this LWP. */
3537 if (!ptid_match (lp->ptid, *wait_ptid_p))
3539 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3543 if (debug_linux_nat)
3544 fprintf_unfiltered (gdb_stdlog,
3545 "RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
3546 target_pid_to_str (lp->ptid),
3547 paddress (gdbarch, pc),
3550 registers_changed ();
3551 if (linux_nat_prepare_to_resume != NULL)
3552 linux_nat_prepare_to_resume (lp);
3553 linux_ops->to_resume (linux_ops, pid_to_ptid (ptid_get_lwp (lp->ptid)),
3554 lp->step, GDB_SIGNAL_0);
3556 lp->stopped_by_watchpoint = 0;
3563 linux_nat_wait (struct target_ops *ops,
3564 ptid_t ptid, struct target_waitstatus *ourstatus,
3569 if (debug_linux_nat)
3571 char *options_string;
3573 options_string = target_options_to_string (target_options);
3574 fprintf_unfiltered (gdb_stdlog,
3575 "linux_nat_wait: [%s], [%s]\n",
3576 target_pid_to_str (ptid),
3578 xfree (options_string);
3581 /* Flush the async file first. */
3582 if (target_can_async_p ())
3583 async_file_flush ();
3585 /* Resume LWPs that are currently stopped without any pending status
3586 to report, but are resumed from the core's perspective. LWPs get
3587 in this state if we find them stopping at a time we're not
3588 interested in reporting the event (target_wait on a
3589 specific_process, for example, see linux_nat_wait_1), and
3590 meanwhile the event became uninteresting. Don't bother resuming
3591 LWPs we're not going to wait for if they'd stop immediately. */
3593 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
3595 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
3597 /* If we requested any event, and something came out, assume there
3598 may be more. If we requested a specific lwp or process, also
3599 assume there may be more. */
3600 if (target_can_async_p ()
3601 && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
3602 && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
3603 || !ptid_equal (ptid, minus_one_ptid)))
3606 /* Get ready for the next event. */
3607 if (target_can_async_p ())
3608 target_async (inferior_event_handler, 0);
3614 kill_callback (struct lwp_info *lp, void *data)
3616 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3619 kill_lwp (ptid_get_lwp (lp->ptid), SIGKILL);
3620 if (debug_linux_nat)
3622 int save_errno = errno;
3624 fprintf_unfiltered (gdb_stdlog,
3625 "KC: kill (SIGKILL) %s, 0, 0 (%s)\n",
3626 target_pid_to_str (lp->ptid),
3627 save_errno ? safe_strerror (save_errno) : "OK");
3630 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3633 ptrace (PTRACE_KILL, ptid_get_lwp (lp->ptid), 0, 0);
3634 if (debug_linux_nat)
3636 int save_errno = errno;
3638 fprintf_unfiltered (gdb_stdlog,
3639 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
3640 target_pid_to_str (lp->ptid),
3641 save_errno ? safe_strerror (save_errno) : "OK");
3648 kill_wait_callback (struct lwp_info *lp, void *data)
3652 /* We must make sure that there are no pending events (delayed
3653 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3654 program doesn't interfere with any following debugging session. */
3656 /* For cloned processes we must check both with __WCLONE and
3657 without, since the exit status of a cloned process isn't reported
3663 pid = my_waitpid (ptid_get_lwp (lp->ptid), NULL, __WCLONE);
3664 if (pid != (pid_t) -1)
3666 if (debug_linux_nat)
3667 fprintf_unfiltered (gdb_stdlog,
3668 "KWC: wait %s received unknown.\n",
3669 target_pid_to_str (lp->ptid));
3670 /* The Linux kernel sometimes fails to kill a thread
3671 completely after PTRACE_KILL; that goes from the stop
3672 point in do_fork out to the one in
3673 get_signal_to_deliever and waits again. So kill it
3675 kill_callback (lp, NULL);
3678 while (pid == ptid_get_lwp (lp->ptid));
3680 gdb_assert (pid == -1 && errno == ECHILD);
3685 pid = my_waitpid (ptid_get_lwp (lp->ptid), NULL, 0);
3686 if (pid != (pid_t) -1)
3688 if (debug_linux_nat)
3689 fprintf_unfiltered (gdb_stdlog,
3690 "KWC: wait %s received unk.\n",
3691 target_pid_to_str (lp->ptid));
3692 /* See the call to kill_callback above. */
3693 kill_callback (lp, NULL);
3696 while (pid == ptid_get_lwp (lp->ptid));
3698 gdb_assert (pid == -1 && errno == ECHILD);
3703 linux_nat_kill (struct target_ops *ops)
3705 struct target_waitstatus last;
3709 /* If we're stopped while forking and we haven't followed yet,
3710 kill the other task. We need to do this first because the
3711 parent will be sleeping if this is a vfork. */
3713 get_last_target_status (&last_ptid, &last);
3715 if (last.kind == TARGET_WAITKIND_FORKED
3716 || last.kind == TARGET_WAITKIND_VFORKED)
3718 ptrace (PT_KILL, ptid_get_pid (last.value.related_pid), 0, 0);
3721 /* Let the arch-specific native code know this process is
3723 linux_nat_forget_process (ptid_get_pid (last.value.related_pid));
3726 if (forks_exist_p ())
3727 linux_fork_killall ();
3730 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
3732 /* Stop all threads before killing them, since ptrace requires
3733 that the thread is stopped to sucessfully PTRACE_KILL. */
3734 iterate_over_lwps (ptid, stop_callback, NULL);
3735 /* ... and wait until all of them have reported back that
3736 they're no longer running. */
3737 iterate_over_lwps (ptid, stop_wait_callback, NULL);
3739 /* Kill all LWP's ... */
3740 iterate_over_lwps (ptid, kill_callback, NULL);
3742 /* ... and wait until we've flushed all events. */
3743 iterate_over_lwps (ptid, kill_wait_callback, NULL);
3746 target_mourn_inferior ();
3750 linux_nat_mourn_inferior (struct target_ops *ops)
3752 int pid = ptid_get_pid (inferior_ptid);
3754 purge_lwp_list (pid);
3756 if (! forks_exist_p ())
3757 /* Normal case, no other forks available. */
3758 linux_ops->to_mourn_inferior (ops);
3760 /* Multi-fork case. The current inferior_ptid has exited, but
3761 there are other viable forks to debug. Delete the exiting
3762 one and context-switch to the first available. */
3763 linux_fork_mourn_inferior ();
3765 /* Let the arch-specific native code know this process is gone. */
3766 linux_nat_forget_process (pid);
3769 /* Convert a native/host siginfo object, into/from the siginfo in the
3770 layout of the inferiors' architecture. */
3773 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
3777 if (linux_nat_siginfo_fixup != NULL)
3778 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
3780 /* If there was no callback, or the callback didn't do anything,
3781 then just do a straight memcpy. */
3785 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
3787 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
3791 static enum target_xfer_status
3792 linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
3793 const char *annex, gdb_byte *readbuf,
3794 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3795 ULONGEST *xfered_len)
3799 gdb_byte inf_siginfo[sizeof (siginfo_t)];
3801 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3802 gdb_assert (readbuf || writebuf);
3804 pid = ptid_get_lwp (inferior_ptid);
3806 pid = ptid_get_pid (inferior_ptid);
3808 if (offset > sizeof (siginfo))
3809 return TARGET_XFER_E_IO;
3812 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3814 return TARGET_XFER_E_IO;
3816 /* When GDB is built as a 64-bit application, ptrace writes into
3817 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3818 inferior with a 64-bit GDB should look the same as debugging it
3819 with a 32-bit GDB, we need to convert it. GDB core always sees
3820 the converted layout, so any read/write will have to be done
3822 siginfo_fixup (&siginfo, inf_siginfo, 0);
3824 if (offset + len > sizeof (siginfo))
3825 len = sizeof (siginfo) - offset;
3827 if (readbuf != NULL)
3828 memcpy (readbuf, inf_siginfo + offset, len);
3831 memcpy (inf_siginfo + offset, writebuf, len);
3833 /* Convert back to ptrace layout before flushing it out. */
3834 siginfo_fixup (&siginfo, inf_siginfo, 1);
3837 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3839 return TARGET_XFER_E_IO;
3843 return TARGET_XFER_OK;
3846 static enum target_xfer_status
3847 linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
3848 const char *annex, gdb_byte *readbuf,
3849 const gdb_byte *writebuf,
3850 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
3852 struct cleanup *old_chain;
3853 enum target_xfer_status xfer;
3855 if (object == TARGET_OBJECT_SIGNAL_INFO)
3856 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
3857 offset, len, xfered_len);
3859 /* The target is connected but no live inferior is selected. Pass
3860 this request down to a lower stratum (e.g., the executable
3862 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
3863 return TARGET_XFER_EOF;
3865 old_chain = save_inferior_ptid ();
3867 if (ptid_lwp_p (inferior_ptid))
3868 inferior_ptid = pid_to_ptid (ptid_get_lwp (inferior_ptid));
3870 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
3871 offset, len, xfered_len);
3873 do_cleanups (old_chain);
3878 linux_thread_alive (ptid_t ptid)
3882 gdb_assert (ptid_lwp_p (ptid));
3884 /* Send signal 0 instead of anything ptrace, because ptracing a
3885 running thread errors out claiming that the thread doesn't
3887 err = kill_lwp (ptid_get_lwp (ptid), 0);
3889 if (debug_linux_nat)
3890 fprintf_unfiltered (gdb_stdlog,
3891 "LLTA: KILL(SIG0) %s (%s)\n",
3892 target_pid_to_str (ptid),
3893 err ? safe_strerror (tmp_errno) : "OK");
3902 linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
3904 return linux_thread_alive (ptid);
3908 linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
3910 static char buf[64];
3912 if (ptid_lwp_p (ptid)
3913 && (ptid_get_pid (ptid) != ptid_get_lwp (ptid)
3914 || num_lwps (ptid_get_pid (ptid)) > 1))
3916 snprintf (buf, sizeof (buf), "LWP %ld", ptid_get_lwp (ptid));
3920 return normal_pid_to_str (ptid);
3924 linux_nat_thread_name (struct target_ops *self, struct thread_info *thr)
3926 int pid = ptid_get_pid (thr->ptid);
3927 long lwp = ptid_get_lwp (thr->ptid);
3928 #define FORMAT "/proc/%d/task/%ld/comm"
3929 char buf[sizeof (FORMAT) + 30];
3931 char *result = NULL;
3933 snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
3934 comm_file = gdb_fopen_cloexec (buf, "r");
3937 /* Not exported by the kernel, so we define it here. */
3939 static char line[COMM_LEN + 1];
3941 if (fgets (line, sizeof (line), comm_file))
3943 char *nl = strchr (line, '\n');
3960 /* Accepts an integer PID; Returns a string representing a file that
3961 can be opened to get the symbols for the child process. */
3964 linux_child_pid_to_exec_file (struct target_ops *self, int pid)
3966 static char buf[PATH_MAX];
3967 char name[PATH_MAX];
3969 xsnprintf (name, PATH_MAX, "/proc/%d/exe", pid);
3970 memset (buf, 0, PATH_MAX);
3971 if (readlink (name, buf, PATH_MAX - 1) <= 0)
3977 /* Implement the to_xfer_partial interface for memory reads using the /proc
3978 filesystem. Because we can use a single read() call for /proc, this
3979 can be much more efficient than banging away at PTRACE_PEEKTEXT,
3980 but it doesn't support writes. */
3982 static enum target_xfer_status
3983 linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
3984 const char *annex, gdb_byte *readbuf,
3985 const gdb_byte *writebuf,
3986 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
3992 if (object != TARGET_OBJECT_MEMORY || !readbuf)
3995 /* Don't bother for one word. */
3996 if (len < 3 * sizeof (long))
3997 return TARGET_XFER_EOF;
3999 /* We could keep this file open and cache it - possibly one per
4000 thread. That requires some juggling, but is even faster. */
4001 xsnprintf (filename, sizeof filename, "/proc/%d/mem",
4002 ptid_get_pid (inferior_ptid));
4003 fd = gdb_open_cloexec (filename, O_RDONLY | O_LARGEFILE, 0);
4005 return TARGET_XFER_EOF;
4007 /* If pread64 is available, use it. It's faster if the kernel
4008 supports it (only one syscall), and it's 64-bit safe even on
4009 32-bit platforms (for instance, SPARC debugging a SPARC64
4012 if (pread64 (fd, readbuf, len, offset) != len)
4014 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
4023 return TARGET_XFER_EOF;
4027 return TARGET_XFER_OK;
4032 /* Enumerate spufs IDs for process PID. */
4034 spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, ULONGEST len)
4036 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
4038 LONGEST written = 0;
4041 struct dirent *entry;
4043 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4044 dir = opendir (path);
4049 while ((entry = readdir (dir)) != NULL)
4055 fd = atoi (entry->d_name);
4059 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4060 if (stat (path, &st) != 0)
4062 if (!S_ISDIR (st.st_mode))
4065 if (statfs (path, &stfs) != 0)
4067 if (stfs.f_type != SPUFS_MAGIC)
4070 if (pos >= offset && pos + 4 <= offset + len)
4072 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
4082 /* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
4083 object type, using the /proc file system. */
4085 static enum target_xfer_status
4086 linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
4087 const char *annex, gdb_byte *readbuf,
4088 const gdb_byte *writebuf,
4089 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
4094 int pid = ptid_get_pid (inferior_ptid);
4099 return TARGET_XFER_E_IO;
4102 LONGEST l = spu_enumerate_spu_ids (pid, readbuf, offset, len);
4105 return TARGET_XFER_E_IO;
4107 return TARGET_XFER_EOF;
4110 *xfered_len = (ULONGEST) l;
4111 return TARGET_XFER_OK;
4116 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
4117 fd = gdb_open_cloexec (buf, writebuf? O_WRONLY : O_RDONLY, 0);
4119 return TARGET_XFER_E_IO;
4122 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4125 return TARGET_XFER_EOF;
4129 ret = write (fd, writebuf, (size_t) len);
4131 ret = read (fd, readbuf, (size_t) len);
4136 return TARGET_XFER_E_IO;
4138 return TARGET_XFER_EOF;
4141 *xfered_len = (ULONGEST) ret;
4142 return TARGET_XFER_OK;
4147 /* Parse LINE as a signal set and add its set bits to SIGS. */
4150 add_line_to_sigset (const char *line, sigset_t *sigs)
4152 int len = strlen (line) - 1;
4156 if (line[len] != '\n')
4157 error (_("Could not parse signal set: %s"), line);
4165 if (*p >= '0' && *p <= '9')
4167 else if (*p >= 'a' && *p <= 'f')
4168 digit = *p - 'a' + 10;
4170 error (_("Could not parse signal set: %s"), line);
4175 sigaddset (sigs, signum + 1);
4177 sigaddset (sigs, signum + 2);
4179 sigaddset (sigs, signum + 3);
4181 sigaddset (sigs, signum + 4);
4187 /* Find process PID's pending signals from /proc/pid/status and set
4191 linux_proc_pending_signals (int pid, sigset_t *pending,
4192 sigset_t *blocked, sigset_t *ignored)
4195 char buffer[PATH_MAX], fname[PATH_MAX];
4196 struct cleanup *cleanup;
4198 sigemptyset (pending);
4199 sigemptyset (blocked);
4200 sigemptyset (ignored);
4201 xsnprintf (fname, sizeof fname, "/proc/%d/status", pid);
4202 procfile = gdb_fopen_cloexec (fname, "r");
4203 if (procfile == NULL)
4204 error (_("Could not open %s"), fname);
4205 cleanup = make_cleanup_fclose (procfile);
4207 while (fgets (buffer, PATH_MAX, procfile) != NULL)
4209 /* Normal queued signals are on the SigPnd line in the status
4210 file. However, 2.6 kernels also have a "shared" pending
4211 queue for delivering signals to a thread group, so check for
4214 Unfortunately some Red Hat kernels include the shared pending
4215 queue but not the ShdPnd status field. */
4217 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
4218 add_line_to_sigset (buffer + 8, pending);
4219 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
4220 add_line_to_sigset (buffer + 8, pending);
4221 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
4222 add_line_to_sigset (buffer + 8, blocked);
4223 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
4224 add_line_to_sigset (buffer + 8, ignored);
4227 do_cleanups (cleanup);
4230 static enum target_xfer_status
4231 linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
4232 const char *annex, gdb_byte *readbuf,
4233 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4234 ULONGEST *xfered_len)
4236 gdb_assert (object == TARGET_OBJECT_OSDATA);
4238 *xfered_len = linux_common_xfer_osdata (annex, readbuf, offset, len);
4239 if (*xfered_len == 0)
4240 return TARGET_XFER_EOF;
4242 return TARGET_XFER_OK;
4245 static enum target_xfer_status
4246 linux_xfer_partial (struct target_ops *ops, enum target_object object,
4247 const char *annex, gdb_byte *readbuf,
4248 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4249 ULONGEST *xfered_len)
4251 enum target_xfer_status xfer;
4253 if (object == TARGET_OBJECT_AUXV)
4254 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
4255 offset, len, xfered_len);
4257 if (object == TARGET_OBJECT_OSDATA)
4258 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
4259 offset, len, xfered_len);
4261 if (object == TARGET_OBJECT_SPU)
4262 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
4263 offset, len, xfered_len);
4265 /* GDB calculates all the addresses in possibly larget width of the address.
4266 Address width needs to be masked before its final use - either by
4267 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
4269 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
4271 if (object == TARGET_OBJECT_MEMORY)
4273 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
4275 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
4276 offset &= ((ULONGEST) 1 << addr_bit) - 1;
4279 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
4280 offset, len, xfered_len);
4281 if (xfer != TARGET_XFER_EOF)
4284 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
4285 offset, len, xfered_len);
4289 cleanup_target_stop (void *arg)
4291 ptid_t *ptid = (ptid_t *) arg;
4293 gdb_assert (arg != NULL);
4296 target_resume (*ptid, 0, GDB_SIGNAL_0);
4299 static VEC(static_tracepoint_marker_p) *
4300 linux_child_static_tracepoint_markers_by_strid (struct target_ops *self,
4303 char s[IPA_CMD_BUF_SIZE];
4304 struct cleanup *old_chain;
4305 int pid = ptid_get_pid (inferior_ptid);
4306 VEC(static_tracepoint_marker_p) *markers = NULL;
4307 struct static_tracepoint_marker *marker = NULL;
4309 ptid_t ptid = ptid_build (pid, 0, 0);
4314 memcpy (s, "qTfSTM", sizeof ("qTfSTM"));
4315 s[sizeof ("qTfSTM")] = 0;
4317 agent_run_command (pid, s, strlen (s) + 1);
4319 old_chain = make_cleanup (free_current_marker, &marker);
4320 make_cleanup (cleanup_target_stop, &ptid);
4325 marker = XCNEW (struct static_tracepoint_marker);
4329 parse_static_tracepoint_marker_definition (p, &p, marker);
4331 if (strid == NULL || strcmp (strid, marker->str_id) == 0)
4333 VEC_safe_push (static_tracepoint_marker_p,
4339 release_static_tracepoint_marker (marker);
4340 memset (marker, 0, sizeof (*marker));
4343 while (*p++ == ','); /* comma-separated list */
4345 memcpy (s, "qTsSTM", sizeof ("qTsSTM"));
4346 s[sizeof ("qTsSTM")] = 0;
4347 agent_run_command (pid, s, strlen (s) + 1);
4351 do_cleanups (old_chain);
4356 /* Create a prototype generic GNU/Linux target. The client can override
4357 it with local methods. */
4360 linux_target_install_ops (struct target_ops *t)
4362 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
4363 t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint;
4364 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
4365 t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint;
4366 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
4367 t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint;
4368 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
4369 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
4370 t->to_post_startup_inferior = linux_child_post_startup_inferior;
4371 t->to_post_attach = linux_child_post_attach;
4372 t->to_follow_fork = linux_child_follow_fork;
4374 super_xfer_partial = t->to_xfer_partial;
4375 t->to_xfer_partial = linux_xfer_partial;
4377 t->to_static_tracepoint_markers_by_strid
4378 = linux_child_static_tracepoint_markers_by_strid;
4384 struct target_ops *t;
4386 t = inf_ptrace_target ();
4387 linux_target_install_ops (t);
4393 linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
4395 struct target_ops *t;
4397 t = inf_ptrace_trad_target (register_u_offset);
4398 linux_target_install_ops (t);
4403 /* target_is_async_p implementation. */
4406 linux_nat_is_async_p (struct target_ops *ops)
4408 /* NOTE: palves 2008-03-21: We're only async when the user requests
4409 it explicitly with the "set target-async" command.
4410 Someday, linux will always be async. */
4411 return target_async_permitted;
4414 /* target_can_async_p implementation. */
4417 linux_nat_can_async_p (struct target_ops *ops)
4419 /* NOTE: palves 2008-03-21: We're only async when the user requests
4420 it explicitly with the "set target-async" command.
4421 Someday, linux will always be async. */
4422 return target_async_permitted;
4426 linux_nat_supports_non_stop (struct target_ops *self)
4431 /* True if we want to support multi-process. To be removed when GDB
4432 supports multi-exec. */
4434 int linux_multi_process = 1;
4437 linux_nat_supports_multi_process (struct target_ops *self)
4439 return linux_multi_process;
4443 linux_nat_supports_disable_randomization (struct target_ops *self)
4445 #ifdef HAVE_PERSONALITY
4452 static int async_terminal_is_ours = 1;
4454 /* target_terminal_inferior implementation.
4456 This is a wrapper around child_terminal_inferior to add async support. */
4459 linux_nat_terminal_inferior (struct target_ops *self)
4461 if (!target_is_async_p ())
4463 /* Async mode is disabled. */
4464 child_terminal_inferior (self);
4468 child_terminal_inferior (self);
4470 /* Calls to target_terminal_*() are meant to be idempotent. */
4471 if (!async_terminal_is_ours)
4474 delete_file_handler (input_fd);
4475 async_terminal_is_ours = 0;
4479 /* target_terminal_ours implementation.
4481 This is a wrapper around child_terminal_ours to add async support (and
4482 implement the target_terminal_ours vs target_terminal_ours_for_output
4483 distinction). child_terminal_ours is currently no different than
4484 child_terminal_ours_for_output.
4485 We leave target_terminal_ours_for_output alone, leaving it to
4486 child_terminal_ours_for_output. */
4489 linux_nat_terminal_ours (struct target_ops *self)
4491 if (!target_is_async_p ())
4493 /* Async mode is disabled. */
4494 child_terminal_ours (self);
4498 /* GDB should never give the terminal to the inferior if the
4499 inferior is running in the background (run&, continue&, etc.),
4500 but claiming it sure should. */
4501 child_terminal_ours (self);
4503 if (async_terminal_is_ours)
4506 clear_sigint_trap ();
4507 add_file_handler (input_fd, stdin_event_handler, 0);
4508 async_terminal_is_ours = 1;
4511 static void (*async_client_callback) (enum inferior_event_type event_type,
4513 static void *async_client_context;
4515 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4516 so we notice when any child changes state, and notify the
4517 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4518 above to wait for the arrival of a SIGCHLD. */
4521 sigchld_handler (int signo)
4523 int old_errno = errno;
4525 if (debug_linux_nat)
4526 ui_file_write_async_safe (gdb_stdlog,
4527 "sigchld\n", sizeof ("sigchld\n") - 1);
4529 if (signo == SIGCHLD
4530 && linux_nat_event_pipe[0] != -1)
4531 async_file_mark (); /* Let the event loop know that there are
4532 events to handle. */
4537 /* Callback registered with the target events file descriptor. */
4540 handle_target_event (int error, gdb_client_data client_data)
4542 (*async_client_callback) (INF_REG_EVENT, async_client_context);
4545 /* Create/destroy the target events pipe. Returns previous state. */
4548 linux_async_pipe (int enable)
4550 int previous = (linux_nat_event_pipe[0] != -1);
4552 if (previous != enable)
4556 /* Block child signals while we create/destroy the pipe, as
4557 their handler writes to it. */
4558 block_child_signals (&prev_mask);
4562 if (gdb_pipe_cloexec (linux_nat_event_pipe) == -1)
4563 internal_error (__FILE__, __LINE__,
4564 "creating event pipe failed.");
4566 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4567 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4571 close (linux_nat_event_pipe[0]);
4572 close (linux_nat_event_pipe[1]);
4573 linux_nat_event_pipe[0] = -1;
4574 linux_nat_event_pipe[1] = -1;
4577 restore_child_signals_mask (&prev_mask);
4583 /* target_async implementation. */
4586 linux_nat_async (struct target_ops *ops,
4587 void (*callback) (enum inferior_event_type event_type,
4591 if (callback != NULL)
4593 async_client_callback = callback;
4594 async_client_context = context;
4595 if (!linux_async_pipe (1))
4597 add_file_handler (linux_nat_event_pipe[0],
4598 handle_target_event, NULL);
4599 /* There may be pending events to handle. Tell the event loop
4606 async_client_callback = callback;
4607 async_client_context = context;
4608 delete_file_handler (linux_nat_event_pipe[0]);
4609 linux_async_pipe (0);
4614 /* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
4618 linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4622 if (debug_linux_nat)
4623 fprintf_unfiltered (gdb_stdlog,
4624 "LNSL: running -> suspending %s\n",
4625 target_pid_to_str (lwp->ptid));
4628 if (lwp->last_resume_kind == resume_stop)
4630 if (debug_linux_nat)
4631 fprintf_unfiltered (gdb_stdlog,
4632 "linux-nat: already stopping LWP %ld at "
4634 ptid_get_lwp (lwp->ptid));
4638 stop_callback (lwp, NULL);
4639 lwp->last_resume_kind = resume_stop;
4643 /* Already known to be stopped; do nothing. */
4645 if (debug_linux_nat)
4647 if (find_thread_ptid (lwp->ptid)->stop_requested)
4648 fprintf_unfiltered (gdb_stdlog,
4649 "LNSL: already stopped/stop_requested %s\n",
4650 target_pid_to_str (lwp->ptid));
4652 fprintf_unfiltered (gdb_stdlog,
4653 "LNSL: already stopped/no "
4654 "stop_requested yet %s\n",
4655 target_pid_to_str (lwp->ptid));
4662 linux_nat_stop (struct target_ops *self, ptid_t ptid)
4665 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
4667 linux_ops->to_stop (linux_ops, ptid);
4671 linux_nat_close (struct target_ops *self)
4673 /* Unregister from the event loop. */
4674 if (linux_nat_is_async_p (self))
4675 linux_nat_async (self, NULL, NULL);
4677 if (linux_ops->to_close)
4678 linux_ops->to_close (linux_ops);
4683 /* When requests are passed down from the linux-nat layer to the
4684 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
4685 used. The address space pointer is stored in the inferior object,
4686 but the common code that is passed such ptid can't tell whether
4687 lwpid is a "main" process id or not (it assumes so). We reverse
4688 look up the "main" process id from the lwp here. */
4690 static struct address_space *
4691 linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
4693 struct lwp_info *lwp;
4694 struct inferior *inf;
4697 if (ptid_get_lwp (ptid) == 0)
4699 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
4701 lwp = find_lwp_pid (ptid);
4702 pid = ptid_get_pid (lwp->ptid);
4706 /* A (pid,lwpid,0) ptid. */
4707 pid = ptid_get_pid (ptid);
4710 inf = find_inferior_pid (pid);
4711 gdb_assert (inf != NULL);
4715 /* Return the cached value of the processor core for thread PTID. */
4718 linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
4720 struct lwp_info *info = find_lwp_pid (ptid);
4728 linux_nat_add_target (struct target_ops *t)
4730 /* Save the provided single-threaded target. We save this in a separate
4731 variable because another target we've inherited from (e.g. inf-ptrace)
4732 may have saved a pointer to T; we want to use it for the final
4733 process stratum target. */
4734 linux_ops_saved = *t;
4735 linux_ops = &linux_ops_saved;
4737 /* Override some methods for multithreading. */
4738 t->to_create_inferior = linux_nat_create_inferior;
4739 t->to_attach = linux_nat_attach;
4740 t->to_detach = linux_nat_detach;
4741 t->to_resume = linux_nat_resume;
4742 t->to_wait = linux_nat_wait;
4743 t->to_pass_signals = linux_nat_pass_signals;
4744 t->to_xfer_partial = linux_nat_xfer_partial;
4745 t->to_kill = linux_nat_kill;
4746 t->to_mourn_inferior = linux_nat_mourn_inferior;
4747 t->to_thread_alive = linux_nat_thread_alive;
4748 t->to_pid_to_str = linux_nat_pid_to_str;
4749 t->to_thread_name = linux_nat_thread_name;
4750 t->to_has_thread_control = tc_schedlock;
4751 t->to_thread_address_space = linux_nat_thread_address_space;
4752 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
4753 t->to_stopped_data_address = linux_nat_stopped_data_address;
4755 t->to_can_async_p = linux_nat_can_async_p;
4756 t->to_is_async_p = linux_nat_is_async_p;
4757 t->to_supports_non_stop = linux_nat_supports_non_stop;
4758 t->to_async = linux_nat_async;
4759 t->to_terminal_inferior = linux_nat_terminal_inferior;
4760 t->to_terminal_ours = linux_nat_terminal_ours;
4762 super_close = t->to_close;
4763 t->to_close = linux_nat_close;
4765 /* Methods for non-stop support. */
4766 t->to_stop = linux_nat_stop;
4768 t->to_supports_multi_process = linux_nat_supports_multi_process;
4770 t->to_supports_disable_randomization
4771 = linux_nat_supports_disable_randomization;
4773 t->to_core_of_thread = linux_nat_core_of_thread;
4775 /* We don't change the stratum; this target will sit at
4776 process_stratum and thread_db will set at thread_stratum. This
4777 is a little strange, since this is a multi-threaded-capable
4778 target, but we want to be on the stack below thread_db, and we
4779 also want to be used for single-threaded processes. */
4784 /* Register a method to call whenever a new thread is attached. */
4786 linux_nat_set_new_thread (struct target_ops *t,
4787 void (*new_thread) (struct lwp_info *))
4789 /* Save the pointer. We only support a single registered instance
4790 of the GNU/Linux native target, so we do not need to map this to
4792 linux_nat_new_thread = new_thread;
4795 /* See declaration in linux-nat.h. */
4798 linux_nat_set_new_fork (struct target_ops *t,
4799 linux_nat_new_fork_ftype *new_fork)
4801 /* Save the pointer. */
4802 linux_nat_new_fork = new_fork;
4805 /* See declaration in linux-nat.h. */
4808 linux_nat_set_forget_process (struct target_ops *t,
4809 linux_nat_forget_process_ftype *fn)
4811 /* Save the pointer. */
4812 linux_nat_forget_process_hook = fn;
4815 /* See declaration in linux-nat.h. */
4818 linux_nat_forget_process (pid_t pid)
4820 if (linux_nat_forget_process_hook != NULL)
4821 linux_nat_forget_process_hook (pid);
4824 /* Register a method that converts a siginfo object between the layout
4825 that ptrace returns, and the layout in the architecture of the
4828 linux_nat_set_siginfo_fixup (struct target_ops *t,
4829 int (*siginfo_fixup) (siginfo_t *,
4833 /* Save the pointer. */
4834 linux_nat_siginfo_fixup = siginfo_fixup;
4837 /* Register a method to call prior to resuming a thread. */
4840 linux_nat_set_prepare_to_resume (struct target_ops *t,
4841 void (*prepare_to_resume) (struct lwp_info *))
4843 /* Save the pointer. */
4844 linux_nat_prepare_to_resume = prepare_to_resume;
4847 /* See linux-nat.h. */
4850 linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
4854 pid = ptid_get_lwp (ptid);
4856 pid = ptid_get_pid (ptid);
4859 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo);
4862 memset (siginfo, 0, sizeof (*siginfo));
4868 /* Provide a prototype to silence -Wmissing-prototypes. */
4869 extern initialize_file_ftype _initialize_linux_nat;
4872 _initialize_linux_nat (void)
4874 add_setshow_zuinteger_cmd ("lin-lwp", class_maintenance,
4875 &debug_linux_nat, _("\
4876 Set debugging of GNU/Linux lwp module."), _("\
4877 Show debugging of GNU/Linux lwp module."), _("\
4878 Enables printf debugging output."),
4880 show_debug_linux_nat,
4881 &setdebuglist, &showdebuglist);
4883 /* Save this mask as the default. */
4884 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
4886 /* Install a SIGCHLD handler. */
4887 sigchld_action.sa_handler = sigchld_handler;
4888 sigemptyset (&sigchld_action.sa_mask);
4889 sigchld_action.sa_flags = SA_RESTART;
4891 /* Make it the default. */
4892 sigaction (SIGCHLD, &sigchld_action, NULL);
4894 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4895 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
4896 sigdelset (&suspend_mask, SIGCHLD);
4898 sigemptyset (&blocked_mask);
4900 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to
4901 support read-only process state. */
4902 linux_ptrace_set_additional_flags (PTRACE_O_TRACESYSGOOD
4903 | PTRACE_O_TRACEVFORKDONE
4904 | PTRACE_O_TRACEVFORK
4905 | PTRACE_O_TRACEFORK
4906 | PTRACE_O_TRACEEXEC);
4910 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4911 the GNU/Linux Threads library and therefore doesn't really belong
4914 /* Read variable NAME in the target and return its value if found.
4915 Otherwise return zero. It is assumed that the type of the variable
4919 get_signo (const char *name)
4921 struct bound_minimal_symbol ms;
4924 ms = lookup_minimal_symbol (name, NULL, NULL);
4925 if (ms.minsym == NULL)
4928 if (target_read_memory (BMSYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
4929 sizeof (signo)) != 0)
4935 /* Return the set of signals used by the threads library in *SET. */
4938 lin_thread_get_thread_signals (sigset_t *set)
4940 struct sigaction action;
4941 int restart, cancel;
4943 sigemptyset (&blocked_mask);
4946 restart = get_signo ("__pthread_sig_restart");
4947 cancel = get_signo ("__pthread_sig_cancel");
4949 /* LinuxThreads normally uses the first two RT signals, but in some legacy
4950 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
4951 not provide any way for the debugger to query the signal numbers -
4952 fortunately they don't change! */
4955 restart = __SIGRTMIN;
4958 cancel = __SIGRTMIN + 1;
4960 sigaddset (set, restart);
4961 sigaddset (set, cancel);
4963 /* The GNU/Linux Threads library makes terminating threads send a
4964 special "cancel" signal instead of SIGCHLD. Make sure we catch
4965 those (to prevent them from terminating GDB itself, which is
4966 likely to be their default action) and treat them the same way as
4969 action.sa_handler = sigchld_handler;
4970 sigemptyset (&action.sa_mask);
4971 action.sa_flags = SA_RESTART;
4972 sigaction (cancel, &action, NULL);
4974 /* We block the "cancel" signal throughout this code ... */
4975 sigaddset (&blocked_mask, cancel);
4976 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
4978 /* ... except during a sigsuspend. */
4979 sigdelset (&suspend_mask, cancel);