1 /* GNU/Linux native-dependent code common to multiple platforms.
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011 Free Software Foundation, Inc.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdb_string.h"
26 #include "gdb_assert.h"
27 #ifdef HAVE_TKILL_SYSCALL
29 #include <sys/syscall.h>
31 #include <sys/ptrace.h>
32 #include "linux-nat.h"
33 #include "linux-ptrace.h"
34 #include "linux-procfs.h"
35 #include "linux-fork.h"
36 #include "gdbthread.h"
40 #include "inf-ptrace.h"
42 #include <sys/param.h> /* for MAXPATHLEN */
43 #include <sys/procfs.h> /* for elf_gregset etc. */
44 #include "elf-bfd.h" /* for elfcore_write_* */
45 #include "gregset.h" /* for gregset */
46 #include "gdbcore.h" /* for get_exec_file */
47 #include <ctype.h> /* for isdigit */
48 #include "gdbthread.h" /* for struct thread_info etc. */
49 #include "gdb_stat.h" /* for struct stat */
50 #include <fcntl.h> /* for O_RDONLY */
52 #include "event-loop.h"
53 #include "event-top.h"
55 #include <sys/types.h>
56 #include "gdb_dirent.h"
57 #include "xml-support.h"
61 #include "linux-osdata.h"
62 #include "cli/cli-utils.h"
65 #define SPUFS_MAGIC 0x23c9b64e
68 #ifdef HAVE_PERSONALITY
69 # include <sys/personality.h>
70 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
71 # define ADDR_NO_RANDOMIZE 0x0040000
73 #endif /* HAVE_PERSONALITY */
75 /* This comment documents high-level logic of this file.
77 Waiting for events in sync mode
78 ===============================
80 When waiting for an event in a specific thread, we just use waitpid, passing
81 the specific pid, and not passing WNOHANG.
83 When waiting for an event in all threads, waitpid is not quite good. Prior to
84 version 2.4, Linux can either wait for event in main thread, or in secondary
85 threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
86 miss an event. The solution is to use non-blocking waitpid, together with
87 sigsuspend. First, we use non-blocking waitpid to get an event in the main
88 process, if any. Second, we use non-blocking waitpid with the __WCLONED
89 flag to check for events in cloned processes. If nothing is found, we use
90 sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
91 happened to a child process -- and SIGCHLD will be delivered both for events
92 in main debugged process and in cloned processes. As soon as we know there's
93 an event, we get back to calling nonblocking waitpid with and without
96 Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
97 so that we don't miss a signal. If SIGCHLD arrives in between, when it's
98 blocked, the signal becomes pending and sigsuspend immediately
99 notices it and returns.
101 Waiting for events in async mode
102 ================================
104 In async mode, GDB should always be ready to handle both user input
105 and target events, so neither blocking waitpid nor sigsuspend are
106 viable options. Instead, we should asynchronously notify the GDB main
107 event loop whenever there's an unprocessed event from the target. We
108 detect asynchronous target events by handling SIGCHLD signals. To
109 notify the event loop about target events, the self-pipe trick is used
110 --- a pipe is registered as waitable event source in the event loop,
111 the event loop select/poll's on the read end of this pipe (as well on
112 other event sources, e.g., stdin), and the SIGCHLD handler writes a
113 byte to this pipe. This is more portable than relying on
114 pselect/ppoll, since on kernels that lack those syscalls, libc
115 emulates them with select/poll+sigprocmask, and that is racy
116 (a.k.a. plain broken).
118 Obviously, if we fail to notify the event loop if there's a target
119 event, it's bad. OTOH, if we notify the event loop when there's no
120 event from the target, linux_nat_wait will detect that there's no real
121 event to report, and return event of type TARGET_WAITKIND_IGNORE.
122 This is mostly harmless, but it will waste time and is better avoided.
124 The main design point is that every time GDB is outside linux-nat.c,
125 we have a SIGCHLD handler installed that is called when something
126 happens to the target and notifies the GDB event loop. Whenever GDB
127 core decides to handle the event, and calls into linux-nat.c, we
128 process things as in sync mode, except that the we never block in
131 While processing an event, we may end up momentarily blocked in
132 waitpid calls. Those waitpid calls, while blocking, are guarantied to
133 return quickly. E.g., in all-stop mode, before reporting to the core
134 that an LWP hit a breakpoint, all LWPs are stopped by sending them
135 SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
136 Note that this is different from blocking indefinitely waiting for the
137 next event --- here, we're already handling an event.
142 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
143 signal is not entirely significant; we just need for a signal to be delivered,
144 so that we can intercept it. SIGSTOP's advantage is that it can not be
145 blocked. A disadvantage is that it is not a real-time signal, so it can only
146 be queued once; we do not keep track of other sources of SIGSTOP.
148 Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
149 use them, because they have special behavior when the signal is generated -
150 not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
151 kills the entire thread group.
153 A delivered SIGSTOP would stop the entire thread group, not just the thread we
154 tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
155 cancel it (by PTRACE_CONT without passing SIGSTOP).
157 We could use a real-time signal instead. This would solve those problems; we
158 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
159 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
160 generates it, and there are races with trying to find a signal that is not
164 #define O_LARGEFILE 0
167 /* Unlike other extended result codes, WSTOPSIG (status) on
168 PTRACE_O_TRACESYSGOOD syscall events doesn't return SIGTRAP, but
169 instead SIGTRAP with bit 7 set. */
170 #define SYSCALL_SIGTRAP (SIGTRAP | 0x80)
172 /* The single-threaded native GNU/Linux target_ops. We save a pointer for
173 the use of the multi-threaded target. */
174 static struct target_ops *linux_ops;
175 static struct target_ops linux_ops_saved;
177 /* The method to call, if any, when a new thread is attached. */
178 static void (*linux_nat_new_thread) (struct lwp_info *);
180 /* Hook to call prior to resuming a thread. */
181 static void (*linux_nat_prepare_to_resume) (struct lwp_info *);
183 /* The method to call, if any, when the siginfo object needs to be
184 converted between the layout returned by ptrace, and the layout in
185 the architecture of the inferior. */
186 static int (*linux_nat_siginfo_fixup) (struct siginfo *,
190 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
191 Called by our to_xfer_partial. */
192 static LONGEST (*super_xfer_partial) (struct target_ops *,
194 const char *, gdb_byte *,
198 static int debug_linux_nat;
200 show_debug_linux_nat (struct ui_file *file, int from_tty,
201 struct cmd_list_element *c, const char *value)
203 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
207 struct simple_pid_list
211 struct simple_pid_list *next;
213 struct simple_pid_list *stopped_pids;
215 /* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
216 can not be used, 1 if it can. */
218 static int linux_supports_tracefork_flag = -1;
220 /* This variable is a tri-state flag: -1 for unknown, 0 if
221 PTRACE_O_TRACESYSGOOD can not be used, 1 if it can. */
223 static int linux_supports_tracesysgood_flag = -1;
225 /* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
226 PTRACE_O_TRACEVFORKDONE. */
228 static int linux_supports_tracevforkdone_flag = -1;
230 /* Stores the current used ptrace() options. */
231 static int current_ptrace_options = 0;
233 /* Async mode support. */
235 /* The read/write ends of the pipe registered as waitable file in the
237 static int linux_nat_event_pipe[2] = { -1, -1 };
239 /* Flush the event pipe. */
242 async_file_flush (void)
249 ret = read (linux_nat_event_pipe[0], &buf, 1);
251 while (ret >= 0 || (ret == -1 && errno == EINTR));
254 /* Put something (anything, doesn't matter what, or how much) in event
255 pipe, so that the select/poll in the event-loop realizes we have
256 something to process. */
259 async_file_mark (void)
263 /* It doesn't really matter what the pipe contains, as long we end
264 up with something in it. Might as well flush the previous
270 ret = write (linux_nat_event_pipe[1], "+", 1);
272 while (ret == -1 && errno == EINTR);
274 /* Ignore EAGAIN. If the pipe is full, the event loop will already
275 be awakened anyway. */
278 static void linux_nat_async (void (*callback)
279 (enum inferior_event_type event_type,
282 static int kill_lwp (int lwpid, int signo);
284 static int stop_callback (struct lwp_info *lp, void *data);
286 static void block_child_signals (sigset_t *prev_mask);
287 static void restore_child_signals_mask (sigset_t *prev_mask);
290 static struct lwp_info *add_lwp (ptid_t ptid);
291 static void purge_lwp_list (int pid);
292 static struct lwp_info *find_lwp_pid (ptid_t ptid);
295 /* Trivial list manipulation functions to keep track of a list of
296 new stopped processes. */
298 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
300 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
303 new_pid->status = status;
304 new_pid->next = *listp;
309 in_pid_list_p (struct simple_pid_list *list, int pid)
311 struct simple_pid_list *p;
313 for (p = list; p != NULL; p = p->next)
320 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
322 struct simple_pid_list **p;
324 for (p = listp; *p != NULL; p = &(*p)->next)
325 if ((*p)->pid == pid)
327 struct simple_pid_list *next = (*p)->next;
329 *statusp = (*p)->status;
338 /* A helper function for linux_test_for_tracefork, called after fork (). */
341 linux_tracefork_child (void)
343 ptrace (PTRACE_TRACEME, 0, 0, 0);
344 kill (getpid (), SIGSTOP);
349 /* Wrapper function for waitpid which handles EINTR. */
352 my_waitpid (int pid, int *statusp, int flags)
358 ret = waitpid (pid, statusp, flags);
360 while (ret == -1 && errno == EINTR);
365 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
367 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
368 we know that the feature is not available. This may change the tracing
369 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
371 However, if it succeeds, we don't know for sure that the feature is
372 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
373 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
374 fork tracing, and let it fork. If the process exits, we assume that we
375 can't use TRACEFORK; if we get the fork notification, and we can extract
376 the new child's PID, then we assume that we can. */
379 linux_test_for_tracefork (int original_pid)
381 int child_pid, ret, status;
385 /* We don't want those ptrace calls to be interrupted. */
386 block_child_signals (&prev_mask);
388 linux_supports_tracefork_flag = 0;
389 linux_supports_tracevforkdone_flag = 0;
391 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
394 restore_child_signals_mask (&prev_mask);
400 perror_with_name (("fork"));
403 linux_tracefork_child ();
405 ret = my_waitpid (child_pid, &status, 0);
407 perror_with_name (("waitpid"));
408 else if (ret != child_pid)
409 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
410 if (! WIFSTOPPED (status))
411 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."),
414 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
417 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
420 warning (_("linux_test_for_tracefork: failed to kill child"));
421 restore_child_signals_mask (&prev_mask);
425 ret = my_waitpid (child_pid, &status, 0);
426 if (ret != child_pid)
427 warning (_("linux_test_for_tracefork: failed "
428 "to wait for killed child"));
429 else if (!WIFSIGNALED (status))
430 warning (_("linux_test_for_tracefork: unexpected "
431 "wait status 0x%x from killed child"), status);
433 restore_child_signals_mask (&prev_mask);
437 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
438 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
439 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
440 linux_supports_tracevforkdone_flag = (ret == 0);
442 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
444 warning (_("linux_test_for_tracefork: failed to resume child"));
446 ret = my_waitpid (child_pid, &status, 0);
448 if (ret == child_pid && WIFSTOPPED (status)
449 && status >> 16 == PTRACE_EVENT_FORK)
452 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
453 if (ret == 0 && second_pid != 0)
457 linux_supports_tracefork_flag = 1;
458 my_waitpid (second_pid, &second_status, 0);
459 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
461 warning (_("linux_test_for_tracefork: "
462 "failed to kill second child"));
463 my_waitpid (second_pid, &status, 0);
467 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
468 "(%d, status 0x%x)"), ret, status);
470 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
472 warning (_("linux_test_for_tracefork: failed to kill child"));
473 my_waitpid (child_pid, &status, 0);
475 restore_child_signals_mask (&prev_mask);
478 /* Determine if PTRACE_O_TRACESYSGOOD can be used to follow syscalls.
480 We try to enable syscall tracing on ORIGINAL_PID. If this fails,
481 we know that the feature is not available. This may change the tracing
482 options for ORIGINAL_PID, but we'll be setting them shortly anyway. */
485 linux_test_for_tracesysgood (int original_pid)
490 /* We don't want those ptrace calls to be interrupted. */
491 block_child_signals (&prev_mask);
493 linux_supports_tracesysgood_flag = 0;
495 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACESYSGOOD);
499 linux_supports_tracesysgood_flag = 1;
501 restore_child_signals_mask (&prev_mask);
504 /* Determine wether we support PTRACE_O_TRACESYSGOOD option available.
505 This function also sets linux_supports_tracesysgood_flag. */
508 linux_supports_tracesysgood (int pid)
510 if (linux_supports_tracesysgood_flag == -1)
511 linux_test_for_tracesysgood (pid);
512 return linux_supports_tracesysgood_flag;
515 /* Return non-zero iff we have tracefork functionality available.
516 This function also sets linux_supports_tracefork_flag. */
519 linux_supports_tracefork (int pid)
521 if (linux_supports_tracefork_flag == -1)
522 linux_test_for_tracefork (pid);
523 return linux_supports_tracefork_flag;
527 linux_supports_tracevforkdone (int pid)
529 if (linux_supports_tracefork_flag == -1)
530 linux_test_for_tracefork (pid);
531 return linux_supports_tracevforkdone_flag;
535 linux_enable_tracesysgood (ptid_t ptid)
537 int pid = ptid_get_lwp (ptid);
540 pid = ptid_get_pid (ptid);
542 if (linux_supports_tracesysgood (pid) == 0)
545 current_ptrace_options |= PTRACE_O_TRACESYSGOOD;
547 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
552 linux_enable_event_reporting (ptid_t ptid)
554 int pid = ptid_get_lwp (ptid);
557 pid = ptid_get_pid (ptid);
559 if (! linux_supports_tracefork (pid))
562 current_ptrace_options |= PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK
563 | PTRACE_O_TRACEEXEC | PTRACE_O_TRACECLONE;
565 if (linux_supports_tracevforkdone (pid))
566 current_ptrace_options |= PTRACE_O_TRACEVFORKDONE;
568 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
569 read-only process state. */
571 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
575 linux_child_post_attach (int pid)
577 linux_enable_event_reporting (pid_to_ptid (pid));
578 linux_enable_tracesysgood (pid_to_ptid (pid));
582 linux_child_post_startup_inferior (ptid_t ptid)
584 linux_enable_event_reporting (ptid);
585 linux_enable_tracesysgood (ptid);
589 linux_child_follow_fork (struct target_ops *ops, int follow_child)
593 int parent_pid, child_pid;
595 block_child_signals (&prev_mask);
597 has_vforked = (inferior_thread ()->pending_follow.kind
598 == TARGET_WAITKIND_VFORKED);
599 parent_pid = ptid_get_lwp (inferior_ptid);
601 parent_pid = ptid_get_pid (inferior_ptid);
602 child_pid = PIDGET (inferior_thread ()->pending_follow.value.related_pid);
605 linux_enable_event_reporting (pid_to_ptid (child_pid));
608 && !non_stop /* Non-stop always resumes both branches. */
609 && (!target_is_async_p () || sync_execution)
610 && !(follow_child || detach_fork || sched_multi))
612 /* The parent stays blocked inside the vfork syscall until the
613 child execs or exits. If we don't let the child run, then
614 the parent stays blocked. If we're telling the parent to run
615 in the foreground, the user will not be able to ctrl-c to get
616 back the terminal, effectively hanging the debug session. */
617 fprintf_filtered (gdb_stderr, _("\
618 Can not resume the parent process over vfork in the foreground while\n\
619 holding the child stopped. Try \"set detach-on-fork\" or \
620 \"set schedule-multiple\".\n"));
621 /* FIXME output string > 80 columns. */
627 struct lwp_info *child_lp = NULL;
629 /* We're already attached to the parent, by default. */
631 /* Detach new forked process? */
634 /* Before detaching from the child, remove all breakpoints
635 from it. If we forked, then this has already been taken
636 care of by infrun.c. If we vforked however, any
637 breakpoint inserted in the parent is visible in the
638 child, even those added while stopped in a vfork
639 catchpoint. This will remove the breakpoints from the
640 parent also, but they'll be reinserted below. */
643 /* keep breakpoints list in sync. */
644 remove_breakpoints_pid (GET_PID (inferior_ptid));
647 if (info_verbose || debug_linux_nat)
649 target_terminal_ours ();
650 fprintf_filtered (gdb_stdlog,
651 "Detaching after fork from "
652 "child process %d.\n",
656 ptrace (PTRACE_DETACH, child_pid, 0, 0);
660 struct inferior *parent_inf, *child_inf;
661 struct cleanup *old_chain;
663 /* Add process to GDB's tables. */
664 child_inf = add_inferior (child_pid);
666 parent_inf = current_inferior ();
667 child_inf->attach_flag = parent_inf->attach_flag;
668 copy_terminal_info (child_inf, parent_inf);
670 old_chain = save_inferior_ptid ();
671 save_current_program_space ();
673 inferior_ptid = ptid_build (child_pid, child_pid, 0);
674 add_thread (inferior_ptid);
675 child_lp = add_lwp (inferior_ptid);
676 child_lp->stopped = 1;
677 child_lp->last_resume_kind = resume_stop;
679 /* If this is a vfork child, then the address-space is
680 shared with the parent. */
683 child_inf->pspace = parent_inf->pspace;
684 child_inf->aspace = parent_inf->aspace;
686 /* The parent will be frozen until the child is done
687 with the shared region. Keep track of the
689 child_inf->vfork_parent = parent_inf;
690 child_inf->pending_detach = 0;
691 parent_inf->vfork_child = child_inf;
692 parent_inf->pending_detach = 0;
696 child_inf->aspace = new_address_space ();
697 child_inf->pspace = add_program_space (child_inf->aspace);
698 child_inf->removable = 1;
699 set_current_program_space (child_inf->pspace);
700 clone_program_space (child_inf->pspace, parent_inf->pspace);
702 /* Let the shared library layer (solib-svr4) learn about
703 this new process, relocate the cloned exec, pull in
704 shared libraries, and install the solib event
705 breakpoint. If a "cloned-VM" event was propagated
706 better throughout the core, this wouldn't be
708 solib_create_inferior_hook (0);
711 /* Let the thread_db layer learn about this new process. */
712 check_for_thread_db ();
714 do_cleanups (old_chain);
719 struct lwp_info *parent_lp;
720 struct inferior *parent_inf;
722 parent_inf = current_inferior ();
724 /* If we detached from the child, then we have to be careful
725 to not insert breakpoints in the parent until the child
726 is done with the shared memory region. However, if we're
727 staying attached to the child, then we can and should
728 insert breakpoints, so that we can debug it. A
729 subsequent child exec or exit is enough to know when does
730 the child stops using the parent's address space. */
731 parent_inf->waiting_for_vfork_done = detach_fork;
732 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
734 parent_lp = find_lwp_pid (pid_to_ptid (parent_pid));
735 gdb_assert (linux_supports_tracefork_flag >= 0);
737 if (linux_supports_tracevforkdone (0))
740 fprintf_unfiltered (gdb_stdlog,
741 "LCFF: waiting for VFORK_DONE on %d\n",
743 parent_lp->stopped = 1;
745 /* We'll handle the VFORK_DONE event like any other
746 event, in target_wait. */
750 /* We can't insert breakpoints until the child has
751 finished with the shared memory region. We need to
752 wait until that happens. Ideal would be to just
754 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
755 - waitpid (parent_pid, &status, __WALL);
756 However, most architectures can't handle a syscall
757 being traced on the way out if it wasn't traced on
760 We might also think to loop, continuing the child
761 until it exits or gets a SIGTRAP. One problem is
762 that the child might call ptrace with PTRACE_TRACEME.
764 There's no simple and reliable way to figure out when
765 the vforked child will be done with its copy of the
766 shared memory. We could step it out of the syscall,
767 two instructions, let it go, and then single-step the
768 parent once. When we have hardware single-step, this
769 would work; with software single-step it could still
770 be made to work but we'd have to be able to insert
771 single-step breakpoints in the child, and we'd have
772 to insert -just- the single-step breakpoint in the
773 parent. Very awkward.
775 In the end, the best we can do is to make sure it
776 runs for a little while. Hopefully it will be out of
777 range of any breakpoints we reinsert. Usually this
778 is only the single-step breakpoint at vfork's return
782 fprintf_unfiltered (gdb_stdlog,
783 "LCFF: no VFORK_DONE "
784 "support, sleeping a bit\n");
788 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
789 and leave it pending. The next linux_nat_resume call
790 will notice a pending event, and bypasses actually
791 resuming the inferior. */
792 parent_lp->status = 0;
793 parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
794 parent_lp->stopped = 1;
796 /* If we're in async mode, need to tell the event loop
797 there's something here to process. */
798 if (target_can_async_p ())
805 struct inferior *parent_inf, *child_inf;
806 struct lwp_info *child_lp;
807 struct program_space *parent_pspace;
809 if (info_verbose || debug_linux_nat)
811 target_terminal_ours ();
813 fprintf_filtered (gdb_stdlog,
814 _("Attaching after process %d "
815 "vfork to child process %d.\n"),
816 parent_pid, child_pid);
818 fprintf_filtered (gdb_stdlog,
819 _("Attaching after process %d "
820 "fork to child process %d.\n"),
821 parent_pid, child_pid);
824 /* Add the new inferior first, so that the target_detach below
825 doesn't unpush the target. */
827 child_inf = add_inferior (child_pid);
829 parent_inf = current_inferior ();
830 child_inf->attach_flag = parent_inf->attach_flag;
831 copy_terminal_info (child_inf, parent_inf);
833 parent_pspace = parent_inf->pspace;
835 /* If we're vforking, we want to hold on to the parent until the
836 child exits or execs. At child exec or exit time we can
837 remove the old breakpoints from the parent and detach or
838 resume debugging it. Otherwise, detach the parent now; we'll
839 want to reuse it's program/address spaces, but we can't set
840 them to the child before removing breakpoints from the
841 parent, otherwise, the breakpoints module could decide to
842 remove breakpoints from the wrong process (since they'd be
843 assigned to the same address space). */
847 gdb_assert (child_inf->vfork_parent == NULL);
848 gdb_assert (parent_inf->vfork_child == NULL);
849 child_inf->vfork_parent = parent_inf;
850 child_inf->pending_detach = 0;
851 parent_inf->vfork_child = child_inf;
852 parent_inf->pending_detach = detach_fork;
853 parent_inf->waiting_for_vfork_done = 0;
855 else if (detach_fork)
856 target_detach (NULL, 0);
858 /* Note that the detach above makes PARENT_INF dangling. */
860 /* Add the child thread to the appropriate lists, and switch to
861 this new thread, before cloning the program space, and
862 informing the solib layer about this new process. */
864 inferior_ptid = ptid_build (child_pid, child_pid, 0);
865 add_thread (inferior_ptid);
866 child_lp = add_lwp (inferior_ptid);
867 child_lp->stopped = 1;
868 child_lp->last_resume_kind = resume_stop;
870 /* If this is a vfork child, then the address-space is shared
871 with the parent. If we detached from the parent, then we can
872 reuse the parent's program/address spaces. */
873 if (has_vforked || detach_fork)
875 child_inf->pspace = parent_pspace;
876 child_inf->aspace = child_inf->pspace->aspace;
880 child_inf->aspace = new_address_space ();
881 child_inf->pspace = add_program_space (child_inf->aspace);
882 child_inf->removable = 1;
883 set_current_program_space (child_inf->pspace);
884 clone_program_space (child_inf->pspace, parent_pspace);
886 /* Let the shared library layer (solib-svr4) learn about
887 this new process, relocate the cloned exec, pull in
888 shared libraries, and install the solib event breakpoint.
889 If a "cloned-VM" event was propagated better throughout
890 the core, this wouldn't be required. */
891 solib_create_inferior_hook (0);
894 /* Let the thread_db layer learn about this new process. */
895 check_for_thread_db ();
898 restore_child_signals_mask (&prev_mask);
904 linux_child_insert_fork_catchpoint (int pid)
906 return !linux_supports_tracefork (pid);
910 linux_child_remove_fork_catchpoint (int pid)
916 linux_child_insert_vfork_catchpoint (int pid)
918 return !linux_supports_tracefork (pid);
922 linux_child_remove_vfork_catchpoint (int pid)
928 linux_child_insert_exec_catchpoint (int pid)
930 return !linux_supports_tracefork (pid);
934 linux_child_remove_exec_catchpoint (int pid)
940 linux_child_set_syscall_catchpoint (int pid, int needed, int any_count,
941 int table_size, int *table)
943 if (!linux_supports_tracesysgood (pid))
946 /* On GNU/Linux, we ignore the arguments. It means that we only
947 enable the syscall catchpoints, but do not disable them.
949 Also, we do not use the `table' information because we do not
950 filter system calls here. We let GDB do the logic for us. */
954 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
955 are processes sharing the same VM space. A multi-threaded process
956 is basically a group of such processes. However, such a grouping
957 is almost entirely a user-space issue; the kernel doesn't enforce
958 such a grouping at all (this might change in the future). In
959 general, we'll rely on the threads library (i.e. the GNU/Linux
960 Threads library) to provide such a grouping.
962 It is perfectly well possible to write a multi-threaded application
963 without the assistance of a threads library, by using the clone
964 system call directly. This module should be able to give some
965 rudimentary support for debugging such applications if developers
966 specify the CLONE_PTRACE flag in the clone system call, and are
967 using the Linux kernel 2.4 or above.
969 Note that there are some peculiarities in GNU/Linux that affect
972 - In general one should specify the __WCLONE flag to waitpid in
973 order to make it report events for any of the cloned processes
974 (and leave it out for the initial process). However, if a cloned
975 process has exited the exit status is only reported if the
976 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
977 we cannot use it since GDB must work on older systems too.
979 - When a traced, cloned process exits and is waited for by the
980 debugger, the kernel reassigns it to the original parent and
981 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
982 library doesn't notice this, which leads to the "zombie problem":
983 When debugged a multi-threaded process that spawns a lot of
984 threads will run out of processes, even if the threads exit,
985 because the "zombies" stay around. */
987 /* List of known LWPs. */
988 struct lwp_info *lwp_list;
991 /* Original signal mask. */
992 static sigset_t normal_mask;
994 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
995 _initialize_linux_nat. */
996 static sigset_t suspend_mask;
998 /* Signals to block to make that sigsuspend work. */
999 static sigset_t blocked_mask;
1001 /* SIGCHLD action. */
1002 struct sigaction sigchld_action;
1004 /* Block child signals (SIGCHLD and linux threads signals), and store
1005 the previous mask in PREV_MASK. */
1008 block_child_signals (sigset_t *prev_mask)
1010 /* Make sure SIGCHLD is blocked. */
1011 if (!sigismember (&blocked_mask, SIGCHLD))
1012 sigaddset (&blocked_mask, SIGCHLD);
1014 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
1017 /* Restore child signals mask, previously returned by
1018 block_child_signals. */
1021 restore_child_signals_mask (sigset_t *prev_mask)
1023 sigprocmask (SIG_SETMASK, prev_mask, NULL);
1026 /* Mask of signals to pass directly to the inferior. */
1027 static sigset_t pass_mask;
1029 /* Update signals to pass to the inferior. */
1031 linux_nat_pass_signals (int numsigs, unsigned char *pass_signals)
1035 sigemptyset (&pass_mask);
1037 for (signo = 1; signo < NSIG; signo++)
1039 int target_signo = target_signal_from_host (signo);
1040 if (target_signo < numsigs && pass_signals[target_signo])
1041 sigaddset (&pass_mask, signo);
1047 /* Prototypes for local functions. */
1048 static int stop_wait_callback (struct lwp_info *lp, void *data);
1049 static int linux_thread_alive (ptid_t ptid);
1050 static char *linux_child_pid_to_exec_file (int pid);
1053 /* Convert wait status STATUS to a string. Used for printing debug
1057 status_to_str (int status)
1059 static char buf[64];
1061 if (WIFSTOPPED (status))
1063 if (WSTOPSIG (status) == SYSCALL_SIGTRAP)
1064 snprintf (buf, sizeof (buf), "%s (stopped at syscall)",
1065 strsignal (SIGTRAP));
1067 snprintf (buf, sizeof (buf), "%s (stopped)",
1068 strsignal (WSTOPSIG (status)));
1070 else if (WIFSIGNALED (status))
1071 snprintf (buf, sizeof (buf), "%s (terminated)",
1072 strsignal (WTERMSIG (status)));
1074 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
1079 /* Destroy and free LP. */
1082 lwp_free (struct lwp_info *lp)
1084 xfree (lp->arch_private);
1088 /* Remove all LWPs belong to PID from the lwp list. */
1091 purge_lwp_list (int pid)
1093 struct lwp_info *lp, *lpprev, *lpnext;
1097 for (lp = lwp_list; lp; lp = lpnext)
1101 if (ptid_get_pid (lp->ptid) == pid)
1104 lwp_list = lp->next;
1106 lpprev->next = lp->next;
1115 /* Return the number of known LWPs in the tgid given by PID. */
1121 struct lwp_info *lp;
1123 for (lp = lwp_list; lp; lp = lp->next)
1124 if (ptid_get_pid (lp->ptid) == pid)
1130 /* Add the LWP specified by PID to the list. Return a pointer to the
1131 structure describing the new LWP. The LWP should already be stopped
1132 (with an exception for the very first LWP). */
1134 static struct lwp_info *
1135 add_lwp (ptid_t ptid)
1137 struct lwp_info *lp;
1139 gdb_assert (is_lwp (ptid));
1141 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
1143 memset (lp, 0, sizeof (struct lwp_info));
1145 lp->last_resume_kind = resume_continue;
1146 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1151 lp->next = lwp_list;
1154 if (linux_nat_new_thread != NULL)
1155 linux_nat_new_thread (lp);
1160 /* Remove the LWP specified by PID from the list. */
1163 delete_lwp (ptid_t ptid)
1165 struct lwp_info *lp, *lpprev;
1169 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
1170 if (ptid_equal (lp->ptid, ptid))
1177 lpprev->next = lp->next;
1179 lwp_list = lp->next;
1184 /* Return a pointer to the structure describing the LWP corresponding
1185 to PID. If no corresponding LWP could be found, return NULL. */
1187 static struct lwp_info *
1188 find_lwp_pid (ptid_t ptid)
1190 struct lwp_info *lp;
1194 lwp = GET_LWP (ptid);
1196 lwp = GET_PID (ptid);
1198 for (lp = lwp_list; lp; lp = lp->next)
1199 if (lwp == GET_LWP (lp->ptid))
1205 /* Call CALLBACK with its second argument set to DATA for every LWP in
1206 the list. If CALLBACK returns 1 for a particular LWP, return a
1207 pointer to the structure describing that LWP immediately.
1208 Otherwise return NULL. */
1211 iterate_over_lwps (ptid_t filter,
1212 int (*callback) (struct lwp_info *, void *),
1215 struct lwp_info *lp, *lpnext;
1217 for (lp = lwp_list; lp; lp = lpnext)
1221 if (ptid_match (lp->ptid, filter))
1223 if ((*callback) (lp, data))
1231 /* Update our internal state when changing from one checkpoint to
1232 another indicated by NEW_PTID. We can only switch single-threaded
1233 applications, so we only create one new LWP, and the previous list
1237 linux_nat_switch_fork (ptid_t new_ptid)
1239 struct lwp_info *lp;
1241 purge_lwp_list (GET_PID (inferior_ptid));
1243 lp = add_lwp (new_ptid);
1246 /* This changes the thread's ptid while preserving the gdb thread
1247 num. Also changes the inferior pid, while preserving the
1249 thread_change_ptid (inferior_ptid, new_ptid);
1251 /* We've just told GDB core that the thread changed target id, but,
1252 in fact, it really is a different thread, with different register
1254 registers_changed ();
1257 /* Handle the exit of a single thread LP. */
1260 exit_lwp (struct lwp_info *lp)
1262 struct thread_info *th = find_thread_ptid (lp->ptid);
1266 if (print_thread_events)
1267 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1269 delete_thread (lp->ptid);
1272 delete_lwp (lp->ptid);
1275 /* Detect `T (stopped)' in `/proc/PID/status'.
1276 Other states including `T (tracing stop)' are reported as false. */
1279 pid_is_stopped (pid_t pid)
1285 snprintf (buf, sizeof (buf), "/proc/%d/status", (int) pid);
1286 status_file = fopen (buf, "r");
1287 if (status_file != NULL)
1291 while (fgets (buf, sizeof (buf), status_file))
1293 if (strncmp (buf, "State:", 6) == 0)
1299 if (have_state && strstr (buf, "T (stopped)") != NULL)
1301 fclose (status_file);
1306 /* Wait for the LWP specified by LP, which we have just attached to.
1307 Returns a wait status for that LWP, to cache. */
1310 linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1313 pid_t new_pid, pid = GET_LWP (ptid);
1316 if (pid_is_stopped (pid))
1318 if (debug_linux_nat)
1319 fprintf_unfiltered (gdb_stdlog,
1320 "LNPAW: Attaching to a stopped process\n");
1322 /* The process is definitely stopped. It is in a job control
1323 stop, unless the kernel predates the TASK_STOPPED /
1324 TASK_TRACED distinction, in which case it might be in a
1325 ptrace stop. Make sure it is in a ptrace stop; from there we
1326 can kill it, signal it, et cetera.
1328 First make sure there is a pending SIGSTOP. Since we are
1329 already attached, the process can not transition from stopped
1330 to running without a PTRACE_CONT; so we know this signal will
1331 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1332 probably already in the queue (unless this kernel is old
1333 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1334 is not an RT signal, it can only be queued once. */
1335 kill_lwp (pid, SIGSTOP);
1337 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1338 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1339 ptrace (PTRACE_CONT, pid, 0, 0);
1342 /* Make sure the initial process is stopped. The user-level threads
1343 layer might want to poke around in the inferior, and that won't
1344 work if things haven't stabilized yet. */
1345 new_pid = my_waitpid (pid, &status, 0);
1346 if (new_pid == -1 && errno == ECHILD)
1349 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1351 /* Try again with __WCLONE to check cloned processes. */
1352 new_pid = my_waitpid (pid, &status, __WCLONE);
1356 gdb_assert (pid == new_pid);
1358 if (!WIFSTOPPED (status))
1360 /* The pid we tried to attach has apparently just exited. */
1361 if (debug_linux_nat)
1362 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
1363 pid, status_to_str (status));
1367 if (WSTOPSIG (status) != SIGSTOP)
1370 if (debug_linux_nat)
1371 fprintf_unfiltered (gdb_stdlog,
1372 "LNPAW: Received %s after attaching\n",
1373 status_to_str (status));
1379 /* Attach to the LWP specified by PID. Return 0 if successful, -1 if
1380 the new LWP could not be attached, or 1 if we're already auto
1381 attached to this thread, but haven't processed the
1382 PTRACE_EVENT_CLONE event of its parent thread, so we just ignore
1383 its existance, without considering it an error. */
1386 lin_lwp_attach_lwp (ptid_t ptid)
1388 struct lwp_info *lp;
1392 gdb_assert (is_lwp (ptid));
1394 block_child_signals (&prev_mask);
1396 lp = find_lwp_pid (ptid);
1397 lwpid = GET_LWP (ptid);
1399 /* We assume that we're already attached to any LWP that has an id
1400 equal to the overall process id, and to any LWP that is already
1401 in our list of LWPs. If we're not seeing exit events from threads
1402 and we've had PID wraparound since we last tried to stop all threads,
1403 this assumption might be wrong; fortunately, this is very unlikely
1405 if (lwpid != GET_PID (ptid) && lp == NULL)
1407 int status, cloned = 0, signalled = 0;
1409 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1411 if (linux_supports_tracefork_flag)
1413 /* If we haven't stopped all threads when we get here,
1414 we may have seen a thread listed in thread_db's list,
1415 but not processed the PTRACE_EVENT_CLONE yet. If
1416 that's the case, ignore this new thread, and let
1417 normal event handling discover it later. */
1418 if (in_pid_list_p (stopped_pids, lwpid))
1420 /* We've already seen this thread stop, but we
1421 haven't seen the PTRACE_EVENT_CLONE extended
1423 restore_child_signals_mask (&prev_mask);
1431 /* See if we've got a stop for this new child
1432 pending. If so, we're already attached. */
1433 new_pid = my_waitpid (lwpid, &status, WNOHANG);
1434 if (new_pid == -1 && errno == ECHILD)
1435 new_pid = my_waitpid (lwpid, &status, __WCLONE | WNOHANG);
1438 if (WIFSTOPPED (status))
1439 add_to_pid_list (&stopped_pids, lwpid, status);
1441 restore_child_signals_mask (&prev_mask);
1447 /* If we fail to attach to the thread, issue a warning,
1448 but continue. One way this can happen is if thread
1449 creation is interrupted; as of Linux kernel 2.6.19, a
1450 bug may place threads in the thread list and then fail
1452 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1453 safe_strerror (errno));
1454 restore_child_signals_mask (&prev_mask);
1458 if (debug_linux_nat)
1459 fprintf_unfiltered (gdb_stdlog,
1460 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1461 target_pid_to_str (ptid));
1463 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
1464 if (!WIFSTOPPED (status))
1466 restore_child_signals_mask (&prev_mask);
1470 lp = add_lwp (ptid);
1472 lp->cloned = cloned;
1473 lp->signalled = signalled;
1474 if (WSTOPSIG (status) != SIGSTOP)
1477 lp->status = status;
1480 target_post_attach (GET_LWP (lp->ptid));
1482 if (debug_linux_nat)
1484 fprintf_unfiltered (gdb_stdlog,
1485 "LLAL: waitpid %s received %s\n",
1486 target_pid_to_str (ptid),
1487 status_to_str (status));
1492 /* We assume that the LWP representing the original process is
1493 already stopped. Mark it as stopped in the data structure
1494 that the GNU/linux ptrace layer uses to keep track of
1495 threads. Note that this won't have already been done since
1496 the main thread will have, we assume, been stopped by an
1497 attach from a different layer. */
1499 lp = add_lwp (ptid);
1503 lp->last_resume_kind = resume_stop;
1504 restore_child_signals_mask (&prev_mask);
1509 linux_nat_create_inferior (struct target_ops *ops,
1510 char *exec_file, char *allargs, char **env,
1513 #ifdef HAVE_PERSONALITY
1514 int personality_orig = 0, personality_set = 0;
1515 #endif /* HAVE_PERSONALITY */
1517 /* The fork_child mechanism is synchronous and calls target_wait, so
1518 we have to mask the async mode. */
1520 #ifdef HAVE_PERSONALITY
1521 if (disable_randomization)
1524 personality_orig = personality (0xffffffff);
1525 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1527 personality_set = 1;
1528 personality (personality_orig | ADDR_NO_RANDOMIZE);
1530 if (errno != 0 || (personality_set
1531 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1532 warning (_("Error disabling address space randomization: %s"),
1533 safe_strerror (errno));
1535 #endif /* HAVE_PERSONALITY */
1537 /* Make sure we report all signals during startup. */
1538 linux_nat_pass_signals (0, NULL);
1540 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
1542 #ifdef HAVE_PERSONALITY
1543 if (personality_set)
1546 personality (personality_orig);
1548 warning (_("Error restoring address space randomization: %s"),
1549 safe_strerror (errno));
1551 #endif /* HAVE_PERSONALITY */
1555 linux_nat_attach (struct target_ops *ops, char *args, int from_tty)
1557 struct lwp_info *lp;
1561 /* Make sure we report all signals during attach. */
1562 linux_nat_pass_signals (0, NULL);
1564 linux_ops->to_attach (ops, args, from_tty);
1566 /* The ptrace base target adds the main thread with (pid,0,0)
1567 format. Decorate it with lwp info. */
1568 ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1569 thread_change_ptid (inferior_ptid, ptid);
1571 /* Add the initial process as the first LWP to the list. */
1572 lp = add_lwp (ptid);
1574 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1576 if (!WIFSTOPPED (status))
1578 if (WIFEXITED (status))
1580 int exit_code = WEXITSTATUS (status);
1582 target_terminal_ours ();
1583 target_mourn_inferior ();
1585 error (_("Unable to attach: program exited normally."));
1587 error (_("Unable to attach: program exited with code %d."),
1590 else if (WIFSIGNALED (status))
1592 enum target_signal signo;
1594 target_terminal_ours ();
1595 target_mourn_inferior ();
1597 signo = target_signal_from_host (WTERMSIG (status));
1598 error (_("Unable to attach: program terminated with signal "
1600 target_signal_to_name (signo),
1601 target_signal_to_string (signo));
1604 internal_error (__FILE__, __LINE__,
1605 _("unexpected status %d for PID %ld"),
1606 status, (long) GET_LWP (ptid));
1611 /* Save the wait status to report later. */
1613 if (debug_linux_nat)
1614 fprintf_unfiltered (gdb_stdlog,
1615 "LNA: waitpid %ld, saving status %s\n",
1616 (long) GET_PID (lp->ptid), status_to_str (status));
1618 lp->status = status;
1620 if (target_can_async_p ())
1621 target_async (inferior_event_handler, 0);
1624 /* Get pending status of LP. */
1626 get_pending_status (struct lwp_info *lp, int *status)
1628 enum target_signal signo = TARGET_SIGNAL_0;
1630 /* If we paused threads momentarily, we may have stored pending
1631 events in lp->status or lp->waitstatus (see stop_wait_callback),
1632 and GDB core hasn't seen any signal for those threads.
1633 Otherwise, the last signal reported to the core is found in the
1634 thread object's stop_signal.
1636 There's a corner case that isn't handled here at present. Only
1637 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1638 stop_signal make sense as a real signal to pass to the inferior.
1639 Some catchpoint related events, like
1640 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1641 to TARGET_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1642 those traps are debug API (ptrace in our case) related and
1643 induced; the inferior wouldn't see them if it wasn't being
1644 traced. Hence, we should never pass them to the inferior, even
1645 when set to pass state. Since this corner case isn't handled by
1646 infrun.c when proceeding with a signal, for consistency, neither
1647 do we handle it here (or elsewhere in the file we check for
1648 signal pass state). Normally SIGTRAP isn't set to pass state, so
1649 this is really a corner case. */
1651 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1652 signo = TARGET_SIGNAL_0; /* a pending ptrace event, not a real signal. */
1653 else if (lp->status)
1654 signo = target_signal_from_host (WSTOPSIG (lp->status));
1655 else if (non_stop && !is_executing (lp->ptid))
1657 struct thread_info *tp = find_thread_ptid (lp->ptid);
1659 signo = tp->suspend.stop_signal;
1663 struct target_waitstatus last;
1666 get_last_target_status (&last_ptid, &last);
1668 if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1670 struct thread_info *tp = find_thread_ptid (lp->ptid);
1672 signo = tp->suspend.stop_signal;
1678 if (signo == TARGET_SIGNAL_0)
1680 if (debug_linux_nat)
1681 fprintf_unfiltered (gdb_stdlog,
1682 "GPT: lwp %s has no pending signal\n",
1683 target_pid_to_str (lp->ptid));
1685 else if (!signal_pass_state (signo))
1687 if (debug_linux_nat)
1688 fprintf_unfiltered (gdb_stdlog,
1689 "GPT: lwp %s had signal %s, "
1690 "but it is in no pass state\n",
1691 target_pid_to_str (lp->ptid),
1692 target_signal_to_string (signo));
1696 *status = W_STOPCODE (target_signal_to_host (signo));
1698 if (debug_linux_nat)
1699 fprintf_unfiltered (gdb_stdlog,
1700 "GPT: lwp %s has pending signal %s\n",
1701 target_pid_to_str (lp->ptid),
1702 target_signal_to_string (signo));
1709 detach_callback (struct lwp_info *lp, void *data)
1711 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1713 if (debug_linux_nat && lp->status)
1714 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1715 strsignal (WSTOPSIG (lp->status)),
1716 target_pid_to_str (lp->ptid));
1718 /* If there is a pending SIGSTOP, get rid of it. */
1721 if (debug_linux_nat)
1722 fprintf_unfiltered (gdb_stdlog,
1723 "DC: Sending SIGCONT to %s\n",
1724 target_pid_to_str (lp->ptid));
1726 kill_lwp (GET_LWP (lp->ptid), SIGCONT);
1730 /* We don't actually detach from the LWP that has an id equal to the
1731 overall process id just yet. */
1732 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1736 /* Pass on any pending signal for this LWP. */
1737 get_pending_status (lp, &status);
1739 if (linux_nat_prepare_to_resume != NULL)
1740 linux_nat_prepare_to_resume (lp);
1742 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
1743 WSTOPSIG (status)) < 0)
1744 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
1745 safe_strerror (errno));
1747 if (debug_linux_nat)
1748 fprintf_unfiltered (gdb_stdlog,
1749 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1750 target_pid_to_str (lp->ptid),
1751 strsignal (WSTOPSIG (status)));
1753 delete_lwp (lp->ptid);
1760 linux_nat_detach (struct target_ops *ops, char *args, int from_tty)
1764 struct lwp_info *main_lwp;
1766 pid = GET_PID (inferior_ptid);
1768 if (target_can_async_p ())
1769 linux_nat_async (NULL, 0);
1771 /* Stop all threads before detaching. ptrace requires that the
1772 thread is stopped to sucessfully detach. */
1773 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
1774 /* ... and wait until all of them have reported back that
1775 they're no longer running. */
1776 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
1778 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
1780 /* Only the initial process should be left right now. */
1781 gdb_assert (num_lwps (GET_PID (inferior_ptid)) == 1);
1783 main_lwp = find_lwp_pid (pid_to_ptid (pid));
1785 /* Pass on any pending signal for the last LWP. */
1786 if ((args == NULL || *args == '\0')
1787 && get_pending_status (main_lwp, &status) != -1
1788 && WIFSTOPPED (status))
1790 /* Put the signal number in ARGS so that inf_ptrace_detach will
1791 pass it along with PTRACE_DETACH. */
1793 sprintf (args, "%d", (int) WSTOPSIG (status));
1794 if (debug_linux_nat)
1795 fprintf_unfiltered (gdb_stdlog,
1796 "LND: Sending signal %s to %s\n",
1798 target_pid_to_str (main_lwp->ptid));
1801 if (linux_nat_prepare_to_resume != NULL)
1802 linux_nat_prepare_to_resume (main_lwp);
1803 delete_lwp (main_lwp->ptid);
1805 if (forks_exist_p ())
1807 /* Multi-fork case. The current inferior_ptid is being detached
1808 from, but there are other viable forks to debug. Detach from
1809 the current fork, and context-switch to the first
1811 linux_fork_detach (args, from_tty);
1813 if (non_stop && target_can_async_p ())
1814 target_async (inferior_event_handler, 0);
1817 linux_ops->to_detach (ops, args, from_tty);
1823 resume_lwp (struct lwp_info *lp, int step)
1827 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
1829 if (inf->vfork_child != NULL)
1831 if (debug_linux_nat)
1832 fprintf_unfiltered (gdb_stdlog,
1833 "RC: Not resuming %s (vfork parent)\n",
1834 target_pid_to_str (lp->ptid));
1836 else if (lp->status == 0
1837 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
1839 if (debug_linux_nat)
1840 fprintf_unfiltered (gdb_stdlog,
1841 "RC: PTRACE_CONT %s, 0, 0 (resuming sibling)\n",
1842 target_pid_to_str (lp->ptid));
1844 if (linux_nat_prepare_to_resume != NULL)
1845 linux_nat_prepare_to_resume (lp);
1846 linux_ops->to_resume (linux_ops,
1847 pid_to_ptid (GET_LWP (lp->ptid)),
1848 step, TARGET_SIGNAL_0);
1851 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1852 lp->stopped_by_watchpoint = 0;
1856 if (debug_linux_nat)
1857 fprintf_unfiltered (gdb_stdlog,
1858 "RC: Not resuming sibling %s (has pending)\n",
1859 target_pid_to_str (lp->ptid));
1864 if (debug_linux_nat)
1865 fprintf_unfiltered (gdb_stdlog,
1866 "RC: Not resuming sibling %s (not stopped)\n",
1867 target_pid_to_str (lp->ptid));
1872 resume_callback (struct lwp_info *lp, void *data)
1879 resume_clear_callback (struct lwp_info *lp, void *data)
1882 lp->last_resume_kind = resume_stop;
1887 resume_set_callback (struct lwp_info *lp, void *data)
1890 lp->last_resume_kind = resume_continue;
1895 linux_nat_resume (struct target_ops *ops,
1896 ptid_t ptid, int step, enum target_signal signo)
1899 struct lwp_info *lp;
1902 if (debug_linux_nat)
1903 fprintf_unfiltered (gdb_stdlog,
1904 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1905 step ? "step" : "resume",
1906 target_pid_to_str (ptid),
1907 (signo != TARGET_SIGNAL_0
1908 ? strsignal (target_signal_to_host (signo)) : "0"),
1909 target_pid_to_str (inferior_ptid));
1911 block_child_signals (&prev_mask);
1913 /* A specific PTID means `step only this process id'. */
1914 resume_many = (ptid_equal (minus_one_ptid, ptid)
1915 || ptid_is_pid (ptid));
1917 /* Mark the lwps we're resuming as resumed. */
1918 iterate_over_lwps (ptid, resume_set_callback, NULL);
1920 /* See if it's the current inferior that should be handled
1923 lp = find_lwp_pid (inferior_ptid);
1925 lp = find_lwp_pid (ptid);
1926 gdb_assert (lp != NULL);
1928 /* Remember if we're stepping. */
1930 lp->last_resume_kind = step ? resume_step : resume_continue;
1932 /* If we have a pending wait status for this thread, there is no
1933 point in resuming the process. But first make sure that
1934 linux_nat_wait won't preemptively handle the event - we
1935 should never take this short-circuit if we are going to
1936 leave LP running, since we have skipped resuming all the
1937 other threads. This bit of code needs to be synchronized
1938 with linux_nat_wait. */
1940 if (lp->status && WIFSTOPPED (lp->status))
1943 && WSTOPSIG (lp->status)
1944 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
1946 if (debug_linux_nat)
1947 fprintf_unfiltered (gdb_stdlog,
1948 "LLR: Not short circuiting for ignored "
1949 "status 0x%x\n", lp->status);
1951 /* FIXME: What should we do if we are supposed to continue
1952 this thread with a signal? */
1953 gdb_assert (signo == TARGET_SIGNAL_0);
1954 signo = target_signal_from_host (WSTOPSIG (lp->status));
1959 if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1961 /* FIXME: What should we do if we are supposed to continue
1962 this thread with a signal? */
1963 gdb_assert (signo == TARGET_SIGNAL_0);
1965 if (debug_linux_nat)
1966 fprintf_unfiltered (gdb_stdlog,
1967 "LLR: Short circuiting for status 0x%x\n",
1970 restore_child_signals_mask (&prev_mask);
1971 if (target_can_async_p ())
1973 target_async (inferior_event_handler, 0);
1974 /* Tell the event loop we have something to process. */
1980 /* Mark LWP as not stopped to prevent it from being continued by
1985 iterate_over_lwps (ptid, resume_callback, NULL);
1987 /* Convert to something the lower layer understands. */
1988 ptid = pid_to_ptid (GET_LWP (lp->ptid));
1990 if (linux_nat_prepare_to_resume != NULL)
1991 linux_nat_prepare_to_resume (lp);
1992 linux_ops->to_resume (linux_ops, ptid, step, signo);
1993 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1994 lp->stopped_by_watchpoint = 0;
1996 if (debug_linux_nat)
1997 fprintf_unfiltered (gdb_stdlog,
1998 "LLR: %s %s, %s (resume event thread)\n",
1999 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2000 target_pid_to_str (ptid),
2001 (signo != TARGET_SIGNAL_0
2002 ? strsignal (target_signal_to_host (signo)) : "0"));
2004 restore_child_signals_mask (&prev_mask);
2005 if (target_can_async_p ())
2006 target_async (inferior_event_handler, 0);
2009 /* Send a signal to an LWP. */
2012 kill_lwp (int lwpid, int signo)
2014 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2015 fails, then we are not using nptl threads and we should be using kill. */
2017 #ifdef HAVE_TKILL_SYSCALL
2019 static int tkill_failed;
2026 ret = syscall (__NR_tkill, lwpid, signo);
2027 if (errno != ENOSYS)
2034 return kill (lwpid, signo);
2037 /* Handle a GNU/Linux syscall trap wait response. If we see a syscall
2038 event, check if the core is interested in it: if not, ignore the
2039 event, and keep waiting; otherwise, we need to toggle the LWP's
2040 syscall entry/exit status, since the ptrace event itself doesn't
2041 indicate it, and report the trap to higher layers. */
2044 linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
2046 struct target_waitstatus *ourstatus = &lp->waitstatus;
2047 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
2048 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
2052 /* If we're stopping threads, there's a SIGSTOP pending, which
2053 makes it so that the LWP reports an immediate syscall return,
2054 followed by the SIGSTOP. Skip seeing that "return" using
2055 PTRACE_CONT directly, and let stop_wait_callback collect the
2056 SIGSTOP. Later when the thread is resumed, a new syscall
2057 entry event. If we didn't do this (and returned 0), we'd
2058 leave a syscall entry pending, and our caller, by using
2059 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
2060 itself. Later, when the user re-resumes this LWP, we'd see
2061 another syscall entry event and we'd mistake it for a return.
2063 If stop_wait_callback didn't force the SIGSTOP out of the LWP
2064 (leaving immediately with LWP->signalled set, without issuing
2065 a PTRACE_CONT), it would still be problematic to leave this
2066 syscall enter pending, as later when the thread is resumed,
2067 it would then see the same syscall exit mentioned above,
2068 followed by the delayed SIGSTOP, while the syscall didn't
2069 actually get to execute. It seems it would be even more
2070 confusing to the user. */
2072 if (debug_linux_nat)
2073 fprintf_unfiltered (gdb_stdlog,
2074 "LHST: ignoring syscall %d "
2075 "for LWP %ld (stopping threads), "
2076 "resuming with PTRACE_CONT for SIGSTOP\n",
2078 GET_LWP (lp->ptid));
2080 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2081 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2085 if (catch_syscall_enabled ())
2087 /* Always update the entry/return state, even if this particular
2088 syscall isn't interesting to the core now. In async mode,
2089 the user could install a new catchpoint for this syscall
2090 between syscall enter/return, and we'll need to know to
2091 report a syscall return if that happens. */
2092 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2093 ? TARGET_WAITKIND_SYSCALL_RETURN
2094 : TARGET_WAITKIND_SYSCALL_ENTRY);
2096 if (catching_syscall_number (syscall_number))
2098 /* Alright, an event to report. */
2099 ourstatus->kind = lp->syscall_state;
2100 ourstatus->value.syscall_number = syscall_number;
2102 if (debug_linux_nat)
2103 fprintf_unfiltered (gdb_stdlog,
2104 "LHST: stopping for %s of syscall %d"
2107 == TARGET_WAITKIND_SYSCALL_ENTRY
2108 ? "entry" : "return",
2110 GET_LWP (lp->ptid));
2114 if (debug_linux_nat)
2115 fprintf_unfiltered (gdb_stdlog,
2116 "LHST: ignoring %s of syscall %d "
2118 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2119 ? "entry" : "return",
2121 GET_LWP (lp->ptid));
2125 /* If we had been syscall tracing, and hence used PT_SYSCALL
2126 before on this LWP, it could happen that the user removes all
2127 syscall catchpoints before we get to process this event.
2128 There are two noteworthy issues here:
2130 - When stopped at a syscall entry event, resuming with
2131 PT_STEP still resumes executing the syscall and reports a
2134 - Only PT_SYSCALL catches syscall enters. If we last
2135 single-stepped this thread, then this event can't be a
2136 syscall enter. If we last single-stepped this thread, this
2137 has to be a syscall exit.
2139 The points above mean that the next resume, be it PT_STEP or
2140 PT_CONTINUE, can not trigger a syscall trace event. */
2141 if (debug_linux_nat)
2142 fprintf_unfiltered (gdb_stdlog,
2143 "LHST: caught syscall event "
2144 "with no syscall catchpoints."
2145 " %d for LWP %ld, ignoring\n",
2147 GET_LWP (lp->ptid));
2148 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2151 /* The core isn't interested in this event. For efficiency, avoid
2152 stopping all threads only to have the core resume them all again.
2153 Since we're not stopping threads, if we're still syscall tracing
2154 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
2155 subsequent syscall. Simply resume using the inf-ptrace layer,
2156 which knows when to use PT_SYSCALL or PT_CONTINUE. */
2158 /* Note that gdbarch_get_syscall_number may access registers, hence
2160 registers_changed ();
2161 if (linux_nat_prepare_to_resume != NULL)
2162 linux_nat_prepare_to_resume (lp);
2163 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2164 lp->step, TARGET_SIGNAL_0);
2168 /* Handle a GNU/Linux extended wait response. If we see a clone
2169 event, we need to add the new LWP to our list (and not report the
2170 trap to higher layers). This function returns non-zero if the
2171 event should be ignored and we should wait again. If STOPPING is
2172 true, the new LWP remains stopped, otherwise it is continued. */
2175 linux_handle_extended_wait (struct lwp_info *lp, int status,
2178 int pid = GET_LWP (lp->ptid);
2179 struct target_waitstatus *ourstatus = &lp->waitstatus;
2180 int event = status >> 16;
2182 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
2183 || event == PTRACE_EVENT_CLONE)
2185 unsigned long new_pid;
2188 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
2190 /* If we haven't already seen the new PID stop, wait for it now. */
2191 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
2193 /* The new child has a pending SIGSTOP. We can't affect it until it
2194 hits the SIGSTOP, but we're already attached. */
2195 ret = my_waitpid (new_pid, &status,
2196 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
2198 perror_with_name (_("waiting for new child"));
2199 else if (ret != new_pid)
2200 internal_error (__FILE__, __LINE__,
2201 _("wait returned unexpected PID %d"), ret);
2202 else if (!WIFSTOPPED (status))
2203 internal_error (__FILE__, __LINE__,
2204 _("wait returned unexpected status 0x%x"), status);
2207 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
2209 if (event == PTRACE_EVENT_FORK
2210 && linux_fork_checkpointing_p (GET_PID (lp->ptid)))
2212 /* Handle checkpointing by linux-fork.c here as a special
2213 case. We don't want the follow-fork-mode or 'catch fork'
2214 to interfere with this. */
2216 /* This won't actually modify the breakpoint list, but will
2217 physically remove the breakpoints from the child. */
2218 detach_breakpoints (new_pid);
2220 /* Retain child fork in ptrace (stopped) state. */
2221 if (!find_fork_pid (new_pid))
2224 /* Report as spurious, so that infrun doesn't want to follow
2225 this fork. We're actually doing an infcall in
2227 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2228 linux_enable_event_reporting (pid_to_ptid (new_pid));
2230 /* Report the stop to the core. */
2234 if (event == PTRACE_EVENT_FORK)
2235 ourstatus->kind = TARGET_WAITKIND_FORKED;
2236 else if (event == PTRACE_EVENT_VFORK)
2237 ourstatus->kind = TARGET_WAITKIND_VFORKED;
2240 struct lwp_info *new_lp;
2242 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2244 if (debug_linux_nat)
2245 fprintf_unfiltered (gdb_stdlog,
2246 "LHEW: Got clone event "
2247 "from LWP %d, new child is LWP %ld\n",
2250 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (lp->ptid)));
2252 new_lp->stopped = 1;
2254 if (WSTOPSIG (status) != SIGSTOP)
2256 /* This can happen if someone starts sending signals to
2257 the new thread before it gets a chance to run, which
2258 have a lower number than SIGSTOP (e.g. SIGUSR1).
2259 This is an unlikely case, and harder to handle for
2260 fork / vfork than for clone, so we do not try - but
2261 we handle it for clone events here. We'll send
2262 the other signal on to the thread below. */
2264 new_lp->signalled = 1;
2268 struct thread_info *tp;
2270 /* When we stop for an event in some other thread, and
2271 pull the thread list just as this thread has cloned,
2272 we'll have seen the new thread in the thread_db list
2273 before handling the CLONE event (glibc's
2274 pthread_create adds the new thread to the thread list
2275 before clone'ing, and has the kernel fill in the
2276 thread's tid on the clone call with
2277 CLONE_PARENT_SETTID). If that happened, and the core
2278 had requested the new thread to stop, we'll have
2279 killed it with SIGSTOP. But since SIGSTOP is not an
2280 RT signal, it can only be queued once. We need to be
2281 careful to not resume the LWP if we wanted it to
2282 stop. In that case, we'll leave the SIGSTOP pending.
2283 It will later be reported as TARGET_SIGNAL_0. */
2284 tp = find_thread_ptid (new_lp->ptid);
2285 if (tp != NULL && tp->stop_requested)
2286 new_lp->last_resume_kind = resume_stop;
2293 /* Add the new thread to GDB's lists as soon as possible
2296 1) the frontend doesn't have to wait for a stop to
2299 2) we tag it with the correct running state. */
2301 /* If the thread_db layer is active, let it know about
2302 this new thread, and add it to GDB's list. */
2303 if (!thread_db_attach_lwp (new_lp->ptid))
2305 /* We're not using thread_db. Add it to GDB's
2307 target_post_attach (GET_LWP (new_lp->ptid));
2308 add_thread (new_lp->ptid);
2313 set_running (new_lp->ptid, 1);
2314 set_executing (new_lp->ptid, 1);
2315 /* thread_db_attach_lwp -> lin_lwp_attach_lwp forced
2317 new_lp->last_resume_kind = resume_continue;
2323 /* We created NEW_LP so it cannot yet contain STATUS. */
2324 gdb_assert (new_lp->status == 0);
2326 /* Save the wait status to report later. */
2327 if (debug_linux_nat)
2328 fprintf_unfiltered (gdb_stdlog,
2329 "LHEW: waitpid of new LWP %ld, "
2330 "saving status %s\n",
2331 (long) GET_LWP (new_lp->ptid),
2332 status_to_str (status));
2333 new_lp->status = status;
2336 /* Note the need to use the low target ops to resume, to
2337 handle resuming with PT_SYSCALL if we have syscall
2341 new_lp->resumed = 1;
2345 gdb_assert (new_lp->last_resume_kind == resume_continue);
2346 if (debug_linux_nat)
2347 fprintf_unfiltered (gdb_stdlog,
2348 "LHEW: resuming new LWP %ld\n",
2349 GET_LWP (new_lp->ptid));
2350 if (linux_nat_prepare_to_resume != NULL)
2351 linux_nat_prepare_to_resume (new_lp);
2352 linux_ops->to_resume (linux_ops, pid_to_ptid (new_pid),
2353 0, TARGET_SIGNAL_0);
2354 new_lp->stopped = 0;
2358 if (debug_linux_nat)
2359 fprintf_unfiltered (gdb_stdlog,
2360 "LHEW: resuming parent LWP %d\n", pid);
2361 if (linux_nat_prepare_to_resume != NULL)
2362 linux_nat_prepare_to_resume (lp);
2363 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2364 0, TARGET_SIGNAL_0);
2372 if (event == PTRACE_EVENT_EXEC)
2374 if (debug_linux_nat)
2375 fprintf_unfiltered (gdb_stdlog,
2376 "LHEW: Got exec event from LWP %ld\n",
2377 GET_LWP (lp->ptid));
2379 ourstatus->kind = TARGET_WAITKIND_EXECD;
2380 ourstatus->value.execd_pathname
2381 = xstrdup (linux_child_pid_to_exec_file (pid));
2386 if (event == PTRACE_EVENT_VFORK_DONE)
2388 if (current_inferior ()->waiting_for_vfork_done)
2390 if (debug_linux_nat)
2391 fprintf_unfiltered (gdb_stdlog,
2392 "LHEW: Got expected PTRACE_EVENT_"
2393 "VFORK_DONE from LWP %ld: stopping\n",
2394 GET_LWP (lp->ptid));
2396 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2400 if (debug_linux_nat)
2401 fprintf_unfiltered (gdb_stdlog,
2402 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
2403 "from LWP %ld: resuming\n",
2404 GET_LWP (lp->ptid));
2405 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2409 internal_error (__FILE__, __LINE__,
2410 _("unknown ptrace event %d"), event);
2413 /* Return non-zero if LWP is a zombie. */
2416 linux_lwp_is_zombie (long lwp)
2418 char buffer[MAXPATHLEN];
2423 xsnprintf (buffer, sizeof (buffer), "/proc/%ld/status", lwp);
2424 procfile = fopen (buffer, "r");
2425 if (procfile == NULL)
2427 warning (_("unable to open /proc file '%s'"), buffer);
2432 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
2433 if (strncmp (buffer, "State:", 6) == 0)
2438 retval = (have_state
2439 && strcmp (buffer, "State:\tZ (zombie)\n") == 0);
2444 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2448 wait_lwp (struct lwp_info *lp)
2452 int thread_dead = 0;
2455 gdb_assert (!lp->stopped);
2456 gdb_assert (lp->status == 0);
2458 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2459 block_child_signals (&prev_mask);
2463 /* If my_waitpid returns 0 it means the __WCLONE vs. non-__WCLONE kind
2464 was right and we should just call sigsuspend. */
2466 pid = my_waitpid (GET_LWP (lp->ptid), &status, WNOHANG);
2467 if (pid == -1 && errno == ECHILD)
2468 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE | WNOHANG);
2469 if (pid == -1 && errno == ECHILD)
2471 /* The thread has previously exited. We need to delete it
2472 now because, for some vendor 2.4 kernels with NPTL
2473 support backported, there won't be an exit event unless
2474 it is the main thread. 2.6 kernels will report an exit
2475 event for each thread that exits, as expected. */
2477 if (debug_linux_nat)
2478 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2479 target_pid_to_str (lp->ptid));
2484 /* Bugs 10970, 12702.
2485 Thread group leader may have exited in which case we'll lock up in
2486 waitpid if there are other threads, even if they are all zombies too.
2487 Basically, we're not supposed to use waitpid this way.
2488 __WCLONE is not applicable for the leader so we can't use that.
2489 LINUX_NAT_THREAD_ALIVE cannot be used here as it requires a STOPPED
2490 process; it gets ESRCH both for the zombie and for running processes.
2492 As a workaround, check if we're waiting for the thread group leader and
2493 if it's a zombie, and avoid calling waitpid if it is.
2495 This is racy, what if the tgl becomes a zombie right after we check?
2496 Therefore always use WNOHANG with sigsuspend - it is equivalent to
2497 waiting waitpid but the linux_lwp_is_zombie is safe this way. */
2499 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid)
2500 && linux_lwp_is_zombie (GET_LWP (lp->ptid)))
2503 if (debug_linux_nat)
2504 fprintf_unfiltered (gdb_stdlog,
2505 "WL: Thread group leader %s vanished.\n",
2506 target_pid_to_str (lp->ptid));
2510 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2511 get invoked despite our caller had them intentionally blocked by
2512 block_child_signals. This is sensitive only to the loop of
2513 linux_nat_wait_1 and there if we get called my_waitpid gets called
2514 again before it gets to sigsuspend so we can safely let the handlers
2515 get executed here. */
2517 sigsuspend (&suspend_mask);
2520 restore_child_signals_mask (&prev_mask);
2524 gdb_assert (pid == GET_LWP (lp->ptid));
2526 if (debug_linux_nat)
2528 fprintf_unfiltered (gdb_stdlog,
2529 "WL: waitpid %s received %s\n",
2530 target_pid_to_str (lp->ptid),
2531 status_to_str (status));
2534 /* Check if the thread has exited. */
2535 if (WIFEXITED (status) || WIFSIGNALED (status))
2538 if (debug_linux_nat)
2539 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2540 target_pid_to_str (lp->ptid));
2550 gdb_assert (WIFSTOPPED (status));
2552 /* Handle GNU/Linux's syscall SIGTRAPs. */
2553 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2555 /* No longer need the sysgood bit. The ptrace event ends up
2556 recorded in lp->waitstatus if we care for it. We can carry
2557 on handling the event like a regular SIGTRAP from here
2559 status = W_STOPCODE (SIGTRAP);
2560 if (linux_handle_syscall_trap (lp, 1))
2561 return wait_lwp (lp);
2564 /* Handle GNU/Linux's extended waitstatus for trace events. */
2565 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2567 if (debug_linux_nat)
2568 fprintf_unfiltered (gdb_stdlog,
2569 "WL: Handling extended status 0x%06x\n",
2571 if (linux_handle_extended_wait (lp, status, 1))
2572 return wait_lwp (lp);
2578 /* Save the most recent siginfo for LP. This is currently only called
2579 for SIGTRAP; some ports use the si_addr field for
2580 target_stopped_data_address. In the future, it may also be used to
2581 restore the siginfo of requeued signals. */
2584 save_siginfo (struct lwp_info *lp)
2587 ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
2588 (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
2591 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2594 /* Send a SIGSTOP to LP. */
2597 stop_callback (struct lwp_info *lp, void *data)
2599 if (!lp->stopped && !lp->signalled)
2603 if (debug_linux_nat)
2605 fprintf_unfiltered (gdb_stdlog,
2606 "SC: kill %s **<SIGSTOP>**\n",
2607 target_pid_to_str (lp->ptid));
2610 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
2611 if (debug_linux_nat)
2613 fprintf_unfiltered (gdb_stdlog,
2614 "SC: lwp kill %d %s\n",
2616 errno ? safe_strerror (errno) : "ERRNO-OK");
2620 gdb_assert (lp->status == 0);
2626 /* Request a stop on LWP. */
2629 linux_stop_lwp (struct lwp_info *lwp)
2631 stop_callback (lwp, NULL);
2634 /* Return non-zero if LWP PID has a pending SIGINT. */
2637 linux_nat_has_pending_sigint (int pid)
2639 sigset_t pending, blocked, ignored;
2641 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2643 if (sigismember (&pending, SIGINT)
2644 && !sigismember (&ignored, SIGINT))
2650 /* Set a flag in LP indicating that we should ignore its next SIGINT. */
2653 set_ignore_sigint (struct lwp_info *lp, void *data)
2655 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2656 flag to consume the next one. */
2657 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2658 && WSTOPSIG (lp->status) == SIGINT)
2661 lp->ignore_sigint = 1;
2666 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2667 This function is called after we know the LWP has stopped; if the LWP
2668 stopped before the expected SIGINT was delivered, then it will never have
2669 arrived. Also, if the signal was delivered to a shared queue and consumed
2670 by a different thread, it will never be delivered to this LWP. */
2673 maybe_clear_ignore_sigint (struct lwp_info *lp)
2675 if (!lp->ignore_sigint)
2678 if (!linux_nat_has_pending_sigint (GET_LWP (lp->ptid)))
2680 if (debug_linux_nat)
2681 fprintf_unfiltered (gdb_stdlog,
2682 "MCIS: Clearing bogus flag for %s\n",
2683 target_pid_to_str (lp->ptid));
2684 lp->ignore_sigint = 0;
2688 /* Fetch the possible triggered data watchpoint info and store it in
2691 On some archs, like x86, that use debug registers to set
2692 watchpoints, it's possible that the way to know which watched
2693 address trapped, is to check the register that is used to select
2694 which address to watch. Problem is, between setting the watchpoint
2695 and reading back which data address trapped, the user may change
2696 the set of watchpoints, and, as a consequence, GDB changes the
2697 debug registers in the inferior. To avoid reading back a stale
2698 stopped-data-address when that happens, we cache in LP the fact
2699 that a watchpoint trapped, and the corresponding data address, as
2700 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2701 registers meanwhile, we have the cached data we can rely on. */
2704 save_sigtrap (struct lwp_info *lp)
2706 struct cleanup *old_chain;
2708 if (linux_ops->to_stopped_by_watchpoint == NULL)
2710 lp->stopped_by_watchpoint = 0;
2714 old_chain = save_inferior_ptid ();
2715 inferior_ptid = lp->ptid;
2717 lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint ();
2719 if (lp->stopped_by_watchpoint)
2721 if (linux_ops->to_stopped_data_address != NULL)
2722 lp->stopped_data_address_p =
2723 linux_ops->to_stopped_data_address (¤t_target,
2724 &lp->stopped_data_address);
2726 lp->stopped_data_address_p = 0;
2729 do_cleanups (old_chain);
2732 /* See save_sigtrap. */
2735 linux_nat_stopped_by_watchpoint (void)
2737 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2739 gdb_assert (lp != NULL);
2741 return lp->stopped_by_watchpoint;
2745 linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2747 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2749 gdb_assert (lp != NULL);
2751 *addr_p = lp->stopped_data_address;
2753 return lp->stopped_data_address_p;
2756 /* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2759 sigtrap_is_event (int status)
2761 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2764 /* SIGTRAP-like events recognizer. */
2766 static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
2768 /* Check for SIGTRAP-like events in LP. */
2771 linux_nat_lp_status_is_event (struct lwp_info *lp)
2773 /* We check for lp->waitstatus in addition to lp->status, because we can
2774 have pending process exits recorded in lp->status
2775 and W_EXITCODE(0,0) == 0. We should probably have an additional
2776 lp->status_p flag. */
2778 return (lp->waitstatus.kind == TARGET_WAITKIND_IGNORE
2779 && linux_nat_status_is_event (lp->status));
2782 /* Set alternative SIGTRAP-like events recognizer. If
2783 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2787 linux_nat_set_status_is_event (struct target_ops *t,
2788 int (*status_is_event) (int status))
2790 linux_nat_status_is_event = status_is_event;
2793 /* Wait until LP is stopped. */
2796 stop_wait_callback (struct lwp_info *lp, void *data)
2798 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
2800 /* If this is a vfork parent, bail out, it is not going to report
2801 any SIGSTOP until the vfork is done with. */
2802 if (inf->vfork_child != NULL)
2809 status = wait_lwp (lp);
2813 if (lp->ignore_sigint && WIFSTOPPED (status)
2814 && WSTOPSIG (status) == SIGINT)
2816 lp->ignore_sigint = 0;
2819 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2820 if (debug_linux_nat)
2821 fprintf_unfiltered (gdb_stdlog,
2822 "PTRACE_CONT %s, 0, 0 (%s) "
2823 "(discarding SIGINT)\n",
2824 target_pid_to_str (lp->ptid),
2825 errno ? safe_strerror (errno) : "OK");
2827 return stop_wait_callback (lp, NULL);
2830 maybe_clear_ignore_sigint (lp);
2832 if (WSTOPSIG (status) != SIGSTOP)
2834 if (linux_nat_status_is_event (status))
2836 /* If a LWP other than the LWP that we're reporting an
2837 event for has hit a GDB breakpoint (as opposed to
2838 some random trap signal), then just arrange for it to
2839 hit it again later. We don't keep the SIGTRAP status
2840 and don't forward the SIGTRAP signal to the LWP. We
2841 will handle the current event, eventually we will
2842 resume all LWPs, and this one will get its breakpoint
2845 If we do not do this, then we run the risk that the
2846 user will delete or disable the breakpoint, but the
2847 thread will have already tripped on it. */
2849 /* Save the trap's siginfo in case we need it later. */
2854 /* Now resume this LWP and get the SIGSTOP event. */
2856 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2857 if (debug_linux_nat)
2859 fprintf_unfiltered (gdb_stdlog,
2860 "PTRACE_CONT %s, 0, 0 (%s)\n",
2861 target_pid_to_str (lp->ptid),
2862 errno ? safe_strerror (errno) : "OK");
2864 fprintf_unfiltered (gdb_stdlog,
2865 "SWC: Candidate SIGTRAP event in %s\n",
2866 target_pid_to_str (lp->ptid));
2868 /* Hold this event/waitstatus while we check to see if
2869 there are any more (we still want to get that SIGSTOP). */
2870 stop_wait_callback (lp, NULL);
2872 /* Hold the SIGTRAP for handling by linux_nat_wait. If
2873 there's another event, throw it back into the
2877 if (debug_linux_nat)
2878 fprintf_unfiltered (gdb_stdlog,
2879 "SWC: kill %s, %s\n",
2880 target_pid_to_str (lp->ptid),
2881 status_to_str ((int) status));
2882 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
2885 /* Save the sigtrap event. */
2886 lp->status = status;
2891 /* The thread was stopped with a signal other than
2892 SIGSTOP, and didn't accidentally trip a breakpoint. */
2894 if (debug_linux_nat)
2896 fprintf_unfiltered (gdb_stdlog,
2897 "SWC: Pending event %s in %s\n",
2898 status_to_str ((int) status),
2899 target_pid_to_str (lp->ptid));
2901 /* Now resume this LWP and get the SIGSTOP event. */
2903 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2904 if (debug_linux_nat)
2905 fprintf_unfiltered (gdb_stdlog,
2906 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
2907 target_pid_to_str (lp->ptid),
2908 errno ? safe_strerror (errno) : "OK");
2910 /* Hold this event/waitstatus while we check to see if
2911 there are any more (we still want to get that SIGSTOP). */
2912 stop_wait_callback (lp, NULL);
2914 /* If the lp->status field is still empty, use it to
2915 hold this event. If not, then this event must be
2916 returned to the event queue of the LWP. */
2919 if (debug_linux_nat)
2921 fprintf_unfiltered (gdb_stdlog,
2922 "SWC: kill %s, %s\n",
2923 target_pid_to_str (lp->ptid),
2924 status_to_str ((int) status));
2926 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
2929 lp->status = status;
2935 /* We caught the SIGSTOP that we intended to catch, so
2936 there's no SIGSTOP pending. */
2945 /* Return non-zero if LP has a wait status pending. */
2948 status_callback (struct lwp_info *lp, void *data)
2950 /* Only report a pending wait status if we pretend that this has
2951 indeed been resumed. */
2955 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2957 /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event,
2958 or a pending process exit. Note that `W_EXITCODE(0,0) ==
2959 0', so a clean process exit can not be stored pending in
2960 lp->status, it is indistinguishable from
2961 no-pending-status. */
2965 if (lp->status != 0)
2971 /* Return non-zero if LP isn't stopped. */
2974 running_callback (struct lwp_info *lp, void *data)
2976 return (!lp->stopped
2977 || ((lp->status != 0
2978 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2982 /* Count the LWP's that have had events. */
2985 count_events_callback (struct lwp_info *lp, void *data)
2989 gdb_assert (count != NULL);
2991 /* Count only resumed LWPs that have a SIGTRAP event pending. */
2992 if (lp->resumed && linux_nat_lp_status_is_event (lp))
2998 /* Select the LWP (if any) that is currently being single-stepped. */
3001 select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
3003 if (lp->last_resume_kind == resume_step
3010 /* Select the Nth LWP that has had a SIGTRAP event. */
3013 select_event_lwp_callback (struct lwp_info *lp, void *data)
3015 int *selector = data;
3017 gdb_assert (selector != NULL);
3019 /* Select only resumed LWPs that have a SIGTRAP event pending. */
3020 if (lp->resumed && linux_nat_lp_status_is_event (lp))
3021 if ((*selector)-- == 0)
3028 cancel_breakpoint (struct lwp_info *lp)
3030 /* Arrange for a breakpoint to be hit again later. We don't keep
3031 the SIGTRAP status and don't forward the SIGTRAP signal to the
3032 LWP. We will handle the current event, eventually we will resume
3033 this LWP, and this breakpoint will trap again.
3035 If we do not do this, then we run the risk that the user will
3036 delete or disable the breakpoint, but the LWP will have already
3039 struct regcache *regcache = get_thread_regcache (lp->ptid);
3040 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3043 pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
3044 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3046 if (debug_linux_nat)
3047 fprintf_unfiltered (gdb_stdlog,
3048 "CB: Push back breakpoint for %s\n",
3049 target_pid_to_str (lp->ptid));
3051 /* Back up the PC if necessary. */
3052 if (gdbarch_decr_pc_after_break (gdbarch))
3053 regcache_write_pc (regcache, pc);
3061 cancel_breakpoints_callback (struct lwp_info *lp, void *data)
3063 struct lwp_info *event_lp = data;
3065 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
3069 /* If a LWP other than the LWP that we're reporting an event for has
3070 hit a GDB breakpoint (as opposed to some random trap signal),
3071 then just arrange for it to hit it again later. We don't keep
3072 the SIGTRAP status and don't forward the SIGTRAP signal to the
3073 LWP. We will handle the current event, eventually we will resume
3074 all LWPs, and this one will get its breakpoint trap again.
3076 If we do not do this, then we run the risk that the user will
3077 delete or disable the breakpoint, but the LWP will have already
3080 if (linux_nat_lp_status_is_event (lp)
3081 && cancel_breakpoint (lp))
3082 /* Throw away the SIGTRAP. */
3088 /* Select one LWP out of those that have events pending. */
3091 select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
3094 int random_selector;
3095 struct lwp_info *event_lp;
3097 /* Record the wait status for the original LWP. */
3098 (*orig_lp)->status = *status;
3100 /* Give preference to any LWP that is being single-stepped. */
3101 event_lp = iterate_over_lwps (filter,
3102 select_singlestep_lwp_callback, NULL);
3103 if (event_lp != NULL)
3105 if (debug_linux_nat)
3106 fprintf_unfiltered (gdb_stdlog,
3107 "SEL: Select single-step %s\n",
3108 target_pid_to_str (event_lp->ptid));
3112 /* No single-stepping LWP. Select one at random, out of those
3113 which have had SIGTRAP events. */
3115 /* First see how many SIGTRAP events we have. */
3116 iterate_over_lwps (filter, count_events_callback, &num_events);
3118 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
3119 random_selector = (int)
3120 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
3122 if (debug_linux_nat && num_events > 1)
3123 fprintf_unfiltered (gdb_stdlog,
3124 "SEL: Found %d SIGTRAP events, selecting #%d\n",
3125 num_events, random_selector);
3127 event_lp = iterate_over_lwps (filter,
3128 select_event_lwp_callback,
3132 if (event_lp != NULL)
3134 /* Switch the event LWP. */
3135 *orig_lp = event_lp;
3136 *status = event_lp->status;
3139 /* Flush the wait status for the event LWP. */
3140 (*orig_lp)->status = 0;
3143 /* Return non-zero if LP has been resumed. */
3146 resumed_callback (struct lwp_info *lp, void *data)
3151 /* Stop an active thread, verify it still exists, then resume it. If
3152 the thread ends up with a pending status, then it is not resumed,
3153 and *DATA (really a pointer to int), is set. */
3156 stop_and_resume_callback (struct lwp_info *lp, void *data)
3158 int *new_pending_p = data;
3162 ptid_t ptid = lp->ptid;
3164 stop_callback (lp, NULL);
3165 stop_wait_callback (lp, NULL);
3167 /* Resume if the lwp still exists, and the core wanted it
3169 lp = find_lwp_pid (ptid);
3172 if (lp->last_resume_kind == resume_stop
3175 /* The core wanted the LWP to stop. Even if it stopped
3176 cleanly (with SIGSTOP), leave the event pending. */
3177 if (debug_linux_nat)
3178 fprintf_unfiltered (gdb_stdlog,
3179 "SARC: core wanted LWP %ld stopped "
3180 "(leaving SIGSTOP pending)\n",
3181 GET_LWP (lp->ptid));
3182 lp->status = W_STOPCODE (SIGSTOP);
3185 if (lp->status == 0)
3187 if (debug_linux_nat)
3188 fprintf_unfiltered (gdb_stdlog,
3189 "SARC: re-resuming LWP %ld\n",
3190 GET_LWP (lp->ptid));
3191 resume_lwp (lp, lp->step);
3195 if (debug_linux_nat)
3196 fprintf_unfiltered (gdb_stdlog,
3197 "SARC: not re-resuming LWP %ld "
3199 GET_LWP (lp->ptid));
3208 /* Check if we should go on and pass this event to common code.
3209 Return the affected lwp if we are, or NULL otherwise. If we stop
3210 all lwps temporarily, we may end up with new pending events in some
3211 other lwp. In that case set *NEW_PENDING_P to true. */
3213 static struct lwp_info *
3214 linux_nat_filter_event (int lwpid, int status, int *new_pending_p)
3216 struct lwp_info *lp;
3220 lp = find_lwp_pid (pid_to_ptid (lwpid));
3222 /* Check for stop events reported by a process we didn't already
3223 know about - anything not already in our LWP list.
3225 If we're expecting to receive stopped processes after
3226 fork, vfork, and clone events, then we'll just add the
3227 new one to our list and go back to waiting for the event
3228 to be reported - the stopped process might be returned
3229 from waitpid before or after the event is.
3231 But note the case of a non-leader thread exec'ing after the
3232 leader having exited, and gone from our lists. The non-leader
3233 thread changes its tid to the tgid. */
3235 if (WIFSTOPPED (status) && lp == NULL
3236 && (WSTOPSIG (status) == SIGTRAP && status >> 16 == PTRACE_EVENT_EXEC))
3238 /* A multi-thread exec after we had seen the leader exiting. */
3239 if (debug_linux_nat)
3240 fprintf_unfiltered (gdb_stdlog,
3241 "LLW: Re-adding thread group leader LWP %d.\n",
3244 lp = add_lwp (BUILD_LWP (lwpid, lwpid));
3247 add_thread (lp->ptid);
3250 if (WIFSTOPPED (status) && !lp)
3252 add_to_pid_list (&stopped_pids, lwpid, status);
3256 /* Make sure we don't report an event for the exit of an LWP not in
3257 our list, i.e. not part of the current process. This can happen
3258 if we detach from a program we originally forked and then it
3260 if (!WIFSTOPPED (status) && !lp)
3263 /* Handle GNU/Linux's syscall SIGTRAPs. */
3264 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
3266 /* No longer need the sysgood bit. The ptrace event ends up
3267 recorded in lp->waitstatus if we care for it. We can carry
3268 on handling the event like a regular SIGTRAP from here
3270 status = W_STOPCODE (SIGTRAP);
3271 if (linux_handle_syscall_trap (lp, 0))
3275 /* Handle GNU/Linux's extended waitstatus for trace events. */
3276 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
3278 if (debug_linux_nat)
3279 fprintf_unfiltered (gdb_stdlog,
3280 "LLW: Handling extended status 0x%06x\n",
3282 if (linux_handle_extended_wait (lp, status, 0))
3286 if (linux_nat_status_is_event (status))
3288 /* Save the trap's siginfo in case we need it later. */
3294 /* Check if the thread has exited. */
3295 if ((WIFEXITED (status) || WIFSIGNALED (status))
3296 && num_lwps (GET_PID (lp->ptid)) > 1)
3298 /* If this is the main thread, we must stop all threads and verify
3299 if they are still alive. This is because in the nptl thread model
3300 on Linux 2.4, there is no signal issued for exiting LWPs
3301 other than the main thread. We only get the main thread exit
3302 signal once all child threads have already exited. If we
3303 stop all the threads and use the stop_wait_callback to check
3304 if they have exited we can determine whether this signal
3305 should be ignored or whether it means the end of the debugged
3306 application, regardless of which threading model is being
3308 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
3311 iterate_over_lwps (pid_to_ptid (GET_PID (lp->ptid)),
3312 stop_and_resume_callback, new_pending_p);
3315 if (debug_linux_nat)
3316 fprintf_unfiltered (gdb_stdlog,
3317 "LLW: %s exited.\n",
3318 target_pid_to_str (lp->ptid));
3320 if (num_lwps (GET_PID (lp->ptid)) > 1)
3322 /* If there is at least one more LWP, then the exit signal
3323 was not the end of the debugged application and should be
3330 /* Check if the current LWP has previously exited. In the nptl
3331 thread model, LWPs other than the main thread do not issue
3332 signals when they exit so we must check whenever the thread has
3333 stopped. A similar check is made in stop_wait_callback(). */
3334 if (num_lwps (GET_PID (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
3336 ptid_t ptid = pid_to_ptid (GET_PID (lp->ptid));
3338 if (debug_linux_nat)
3339 fprintf_unfiltered (gdb_stdlog,
3340 "LLW: %s exited.\n",
3341 target_pid_to_str (lp->ptid));
3345 /* Make sure there is at least one thread running. */
3346 gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
3348 /* Discard the event. */
3352 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3353 an attempt to stop an LWP. */
3355 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3357 if (debug_linux_nat)
3358 fprintf_unfiltered (gdb_stdlog,
3359 "LLW: Delayed SIGSTOP caught for %s.\n",
3360 target_pid_to_str (lp->ptid));
3364 if (lp->last_resume_kind != resume_stop)
3366 /* This is a delayed SIGSTOP. */
3368 registers_changed ();
3370 if (linux_nat_prepare_to_resume != NULL)
3371 linux_nat_prepare_to_resume (lp);
3372 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3373 lp->step, TARGET_SIGNAL_0);
3374 if (debug_linux_nat)
3375 fprintf_unfiltered (gdb_stdlog,
3376 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
3378 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3379 target_pid_to_str (lp->ptid));
3382 gdb_assert (lp->resumed);
3384 /* Discard the event. */
3389 /* Make sure we don't report a SIGINT that we have already displayed
3390 for another thread. */
3391 if (lp->ignore_sigint
3392 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3394 if (debug_linux_nat)
3395 fprintf_unfiltered (gdb_stdlog,
3396 "LLW: Delayed SIGINT caught for %s.\n",
3397 target_pid_to_str (lp->ptid));
3399 /* This is a delayed SIGINT. */
3400 lp->ignore_sigint = 0;
3402 registers_changed ();
3403 if (linux_nat_prepare_to_resume != NULL)
3404 linux_nat_prepare_to_resume (lp);
3405 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3406 lp->step, TARGET_SIGNAL_0);
3407 if (debug_linux_nat)
3408 fprintf_unfiltered (gdb_stdlog,
3409 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3411 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3412 target_pid_to_str (lp->ptid));
3415 gdb_assert (lp->resumed);
3417 /* Discard the event. */
3421 /* An interesting event. */
3423 lp->status = status;
3427 /* Detect zombie thread group leaders, and "exit" them. We can't reap
3428 their exits until all other threads in the group have exited. */
3431 check_zombie_leaders (void)
3433 struct inferior *inf;
3437 struct lwp_info *leader_lp;
3442 leader_lp = find_lwp_pid (pid_to_ptid (inf->pid));
3443 if (leader_lp != NULL
3444 /* Check if there are other threads in the group, as we may
3445 have raced with the inferior simply exiting. */
3446 && num_lwps (inf->pid) > 1
3447 && linux_lwp_is_zombie (inf->pid))
3449 if (debug_linux_nat)
3450 fprintf_unfiltered (gdb_stdlog,
3451 "CZL: Thread group leader %d zombie "
3452 "(it exited, or another thread execd).\n",
3455 /* A leader zombie can mean one of two things:
3457 - It exited, and there's an exit status pending
3458 available, or only the leader exited (not the whole
3459 program). In the latter case, we can't waitpid the
3460 leader's exit status until all other threads are gone.
3462 - There are 3 or more threads in the group, and a thread
3463 other than the leader exec'd. On an exec, the Linux
3464 kernel destroys all other threads (except the execing
3465 one) in the thread group, and resets the execing thread's
3466 tid to the tgid. No exit notification is sent for the
3467 execing thread -- from the ptracer's perspective, it
3468 appears as though the execing thread just vanishes.
3469 Until we reap all other threads except the leader and the
3470 execing thread, the leader will be zombie, and the
3471 execing thread will be in `D (disc sleep)'. As soon as
3472 all other threads are reaped, the execing thread changes
3473 it's tid to the tgid, and the previous (zombie) leader
3474 vanishes, giving place to the "new" leader. We could try
3475 distinguishing the exit and exec cases, by waiting once
3476 more, and seeing if something comes out, but it doesn't
3477 sound useful. The previous leader _does_ go away, and
3478 we'll re-add the new one once we see the exec event
3479 (which is just the same as what would happen if the
3480 previous leader did exit voluntarily before some other
3483 if (debug_linux_nat)
3484 fprintf_unfiltered (gdb_stdlog,
3485 "CZL: Thread group leader %d vanished.\n",
3487 exit_lwp (leader_lp);
3493 linux_nat_wait_1 (struct target_ops *ops,
3494 ptid_t ptid, struct target_waitstatus *ourstatus,
3497 static sigset_t prev_mask;
3498 enum resume_kind last_resume_kind;
3499 struct lwp_info *lp;
3502 if (debug_linux_nat)
3503 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3505 /* The first time we get here after starting a new inferior, we may
3506 not have added it to the LWP list yet - this is the earliest
3507 moment at which we know its PID. */
3508 if (ptid_is_pid (inferior_ptid))
3510 /* Upgrade the main thread's ptid. */
3511 thread_change_ptid (inferior_ptid,
3512 BUILD_LWP (GET_PID (inferior_ptid),
3513 GET_PID (inferior_ptid)));
3515 lp = add_lwp (inferior_ptid);
3519 /* Make sure SIGCHLD is blocked. */
3520 block_child_signals (&prev_mask);
3526 /* First check if there is a LWP with a wait status pending. */
3527 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3529 /* Any LWP in the PTID group that's been resumed will do. */
3530 lp = iterate_over_lwps (ptid, status_callback, NULL);
3533 if (debug_linux_nat && lp->status)
3534 fprintf_unfiltered (gdb_stdlog,
3535 "LLW: Using pending wait status %s for %s.\n",
3536 status_to_str (lp->status),
3537 target_pid_to_str (lp->ptid));
3540 else if (is_lwp (ptid))
3542 if (debug_linux_nat)
3543 fprintf_unfiltered (gdb_stdlog,
3544 "LLW: Waiting for specific LWP %s.\n",
3545 target_pid_to_str (ptid));
3547 /* We have a specific LWP to check. */
3548 lp = find_lwp_pid (ptid);
3551 if (debug_linux_nat && lp->status)
3552 fprintf_unfiltered (gdb_stdlog,
3553 "LLW: Using pending wait status %s for %s.\n",
3554 status_to_str (lp->status),
3555 target_pid_to_str (lp->ptid));
3557 /* We check for lp->waitstatus in addition to lp->status,
3558 because we can have pending process exits recorded in
3559 lp->status and W_EXITCODE(0,0) == 0. We should probably have
3560 an additional lp->status_p flag. */
3561 if (lp->status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3565 if (lp && lp->signalled && lp->last_resume_kind != resume_stop)
3567 /* A pending SIGSTOP may interfere with the normal stream of
3568 events. In a typical case where interference is a problem,
3569 we have a SIGSTOP signal pending for LWP A while
3570 single-stepping it, encounter an event in LWP B, and take the
3571 pending SIGSTOP while trying to stop LWP A. After processing
3572 the event in LWP B, LWP A is continued, and we'll never see
3573 the SIGTRAP associated with the last time we were
3574 single-stepping LWP A. */
3576 /* Resume the thread. It should halt immediately returning the
3578 registers_changed ();
3579 if (linux_nat_prepare_to_resume != NULL)
3580 linux_nat_prepare_to_resume (lp);
3581 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3582 lp->step, TARGET_SIGNAL_0);
3583 if (debug_linux_nat)
3584 fprintf_unfiltered (gdb_stdlog,
3585 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
3586 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3587 target_pid_to_str (lp->ptid));
3589 gdb_assert (lp->resumed);
3591 /* Catch the pending SIGSTOP. */
3592 status = lp->status;
3595 stop_wait_callback (lp, NULL);
3597 /* If the lp->status field isn't empty, we caught another signal
3598 while flushing the SIGSTOP. Return it back to the event
3599 queue of the LWP, as we already have an event to handle. */
3602 if (debug_linux_nat)
3603 fprintf_unfiltered (gdb_stdlog,
3604 "LLW: kill %s, %s\n",
3605 target_pid_to_str (lp->ptid),
3606 status_to_str (lp->status));
3607 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
3610 lp->status = status;
3613 if (!target_can_async_p ())
3615 /* Causes SIGINT to be passed on to the attached process. */
3619 /* But if we don't find a pending event, we'll have to wait. */
3625 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3628 - If the thread group leader exits while other threads in the
3629 thread group still exist, waitpid(TGID, ...) hangs. That
3630 waitpid won't return an exit status until the other threads
3631 in the group are reapped.
3633 - When a non-leader thread execs, that thread just vanishes
3634 without reporting an exit (so we'd hang if we waited for it
3635 explicitly in that case). The exec event is reported to
3639 lwpid = my_waitpid (-1, &status, __WCLONE | WNOHANG);
3640 if (lwpid == 0 || (lwpid == -1 && errno == ECHILD))
3641 lwpid = my_waitpid (-1, &status, WNOHANG);
3643 if (debug_linux_nat)
3644 fprintf_unfiltered (gdb_stdlog,
3645 "LNW: waitpid(-1, ...) returned %d, %s\n",
3646 lwpid, errno ? safe_strerror (errno) : "ERRNO-OK");
3650 /* If this is true, then we paused LWPs momentarily, and may
3651 now have pending events to handle. */
3654 if (debug_linux_nat)
3656 fprintf_unfiltered (gdb_stdlog,
3657 "LLW: waitpid %ld received %s\n",
3658 (long) lwpid, status_to_str (status));
3661 lp = linux_nat_filter_event (lwpid, status, &new_pending);
3663 /* STATUS is now no longer valid, use LP->STATUS instead. */
3666 if (lp && !ptid_match (lp->ptid, ptid))
3668 gdb_assert (lp->resumed);
3670 if (debug_linux_nat)
3672 "LWP %ld got an event %06x, leaving pending.\n",
3673 ptid_get_lwp (lp->ptid), lp->status);
3675 if (WIFSTOPPED (lp->status))
3677 if (WSTOPSIG (lp->status) != SIGSTOP)
3679 /* Cancel breakpoint hits. The breakpoint may
3680 be removed before we fetch events from this
3681 process to report to the core. It is best
3682 not to assume the moribund breakpoints
3683 heuristic always handles these cases --- it
3684 could be too many events go through to the
3685 core before this one is handled. All-stop
3686 always cancels breakpoint hits in all
3689 && linux_nat_lp_status_is_event (lp)
3690 && cancel_breakpoint (lp))
3692 /* Throw away the SIGTRAP. */
3695 if (debug_linux_nat)
3697 "LLW: LWP %ld hit a breakpoint while"
3698 " waiting for another process;"
3700 ptid_get_lwp (lp->ptid));
3710 else if (WIFEXITED (lp->status) || WIFSIGNALED (lp->status))
3712 if (debug_linux_nat)
3714 "Process %ld exited while stopping LWPs\n",
3715 ptid_get_lwp (lp->ptid));
3717 /* This was the last lwp in the process. Since
3718 events are serialized to GDB core, and we can't
3719 report this one right now, but GDB core and the
3720 other target layers will want to be notified
3721 about the exit code/signal, leave the status
3722 pending for the next time we're able to report
3725 /* Prevent trying to stop this thread again. We'll
3726 never try to resume it because it has a pending
3730 /* Dead LWP's aren't expected to reported a pending
3734 /* Store the pending event in the waitstatus as
3735 well, because W_EXITCODE(0,0) == 0. */
3736 store_waitstatus (&lp->waitstatus, lp->status);
3745 /* Some LWP now has a pending event. Go all the way
3746 back to check it. */
3752 /* We got an event to report to the core. */
3756 /* Retry until nothing comes out of waitpid. A single
3757 SIGCHLD can indicate more than one child stopped. */
3761 /* Check for zombie thread group leaders. Those can't be reaped
3762 until all other threads in the thread group are. */
3763 check_zombie_leaders ();
3765 /* If there are no resumed children left, bail. We'd be stuck
3766 forever in the sigsuspend call below otherwise. */
3767 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3769 if (debug_linux_nat)
3770 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
3772 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3774 if (!target_can_async_p ())
3775 clear_sigint_trap ();
3777 restore_child_signals_mask (&prev_mask);
3778 return minus_one_ptid;
3781 /* No interesting event to report to the core. */
3783 if (target_options & TARGET_WNOHANG)
3785 if (debug_linux_nat)
3786 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3788 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3789 restore_child_signals_mask (&prev_mask);
3790 return minus_one_ptid;
3793 /* We shouldn't end up here unless we want to try again. */
3794 gdb_assert (lp == NULL);
3796 /* Block until we get an event reported with SIGCHLD. */
3797 sigsuspend (&suspend_mask);
3800 if (!target_can_async_p ())
3801 clear_sigint_trap ();
3805 status = lp->status;
3808 /* Don't report signals that GDB isn't interested in, such as
3809 signals that are neither printed nor stopped upon. Stopping all
3810 threads can be a bit time-consuming so if we want decent
3811 performance with heavily multi-threaded programs, especially when
3812 they're using a high frequency timer, we'd better avoid it if we
3815 if (WIFSTOPPED (status))
3817 enum target_signal signo = target_signal_from_host (WSTOPSIG (status));
3819 /* When using hardware single-step, we need to report every signal.
3820 Otherwise, signals in pass_mask may be short-circuited. */
3822 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status)))
3824 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
3825 here? It is not clear we should. GDB may not expect
3826 other threads to run. On the other hand, not resuming
3827 newly attached threads may cause an unwanted delay in
3828 getting them running. */
3829 registers_changed ();
3830 if (linux_nat_prepare_to_resume != NULL)
3831 linux_nat_prepare_to_resume (lp);
3832 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3834 if (debug_linux_nat)
3835 fprintf_unfiltered (gdb_stdlog,
3836 "LLW: %s %s, %s (preempt 'handle')\n",
3838 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3839 target_pid_to_str (lp->ptid),
3840 (signo != TARGET_SIGNAL_0
3841 ? strsignal (target_signal_to_host (signo))
3849 /* Only do the below in all-stop, as we currently use SIGINT
3850 to implement target_stop (see linux_nat_stop) in
3852 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
3854 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3855 forwarded to the entire process group, that is, all LWPs
3856 will receive it - unless they're using CLONE_THREAD to
3857 share signals. Since we only want to report it once, we
3858 mark it as ignored for all LWPs except this one. */
3859 iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid)),
3860 set_ignore_sigint, NULL);
3861 lp->ignore_sigint = 0;
3864 maybe_clear_ignore_sigint (lp);
3868 /* This LWP is stopped now. */
3871 if (debug_linux_nat)
3872 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3873 status_to_str (status), target_pid_to_str (lp->ptid));
3877 /* Now stop all other LWP's ... */
3878 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
3880 /* ... and wait until all of them have reported back that
3881 they're no longer running. */
3882 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
3884 /* If we're not waiting for a specific LWP, choose an event LWP
3885 from among those that have had events. Giving equal priority
3886 to all LWPs that have had events helps prevent
3888 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3889 select_event_lwp (ptid, &lp, &status);
3891 /* Now that we've selected our final event LWP, cancel any
3892 breakpoints in other LWPs that have hit a GDB breakpoint.
3893 See the comment in cancel_breakpoints_callback to find out
3895 iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
3897 /* We'll need this to determine whether to report a SIGSTOP as
3898 TARGET_WAITKIND_0. Need to take a copy because
3899 resume_clear_callback clears it. */
3900 last_resume_kind = lp->last_resume_kind;
3902 /* In all-stop, from the core's perspective, all LWPs are now
3903 stopped until a new resume action is sent over. */
3904 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3909 last_resume_kind = lp->last_resume_kind;
3910 resume_clear_callback (lp, NULL);
3913 if (linux_nat_status_is_event (status))
3915 if (debug_linux_nat)
3916 fprintf_unfiltered (gdb_stdlog,
3917 "LLW: trap ptid is %s.\n",
3918 target_pid_to_str (lp->ptid));
3921 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3923 *ourstatus = lp->waitstatus;
3924 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3927 store_waitstatus (ourstatus, status);
3929 if (debug_linux_nat)
3930 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3932 restore_child_signals_mask (&prev_mask);
3934 if (last_resume_kind == resume_stop
3935 && ourstatus->kind == TARGET_WAITKIND_STOPPED
3936 && WSTOPSIG (status) == SIGSTOP)
3938 /* A thread that has been requested to stop by GDB with
3939 target_stop, and it stopped cleanly, so report as SIG0. The
3940 use of SIGSTOP is an implementation detail. */
3941 ourstatus->value.sig = TARGET_SIGNAL_0;
3944 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3945 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3948 lp->core = linux_nat_core_of_thread_1 (lp->ptid);
3953 /* Resume LWPs that are currently stopped without any pending status
3954 to report, but are resumed from the core's perspective. */
3957 resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3959 ptid_t *wait_ptid_p = data;
3964 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3966 struct regcache *regcache = get_thread_regcache (lp->ptid);
3967 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3968 CORE_ADDR pc = regcache_read_pc (regcache);
3970 gdb_assert (is_executing (lp->ptid));
3972 /* Don't bother if there's a breakpoint at PC that we'd hit
3973 immediately, and we're not waiting for this LWP. */
3974 if (!ptid_match (lp->ptid, *wait_ptid_p))
3976 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3980 if (debug_linux_nat)
3981 fprintf_unfiltered (gdb_stdlog,
3982 "RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
3983 target_pid_to_str (lp->ptid),
3984 paddress (gdbarch, pc),
3987 registers_changed ();
3988 if (linux_nat_prepare_to_resume != NULL)
3989 linux_nat_prepare_to_resume (lp);
3990 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3991 lp->step, TARGET_SIGNAL_0);
3993 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
3994 lp->stopped_by_watchpoint = 0;
4001 linux_nat_wait (struct target_ops *ops,
4002 ptid_t ptid, struct target_waitstatus *ourstatus,
4007 if (debug_linux_nat)
4008 fprintf_unfiltered (gdb_stdlog,
4009 "linux_nat_wait: [%s]\n", target_pid_to_str (ptid));
4011 /* Flush the async file first. */
4012 if (target_can_async_p ())
4013 async_file_flush ();
4015 /* Resume LWPs that are currently stopped without any pending status
4016 to report, but are resumed from the core's perspective. LWPs get
4017 in this state if we find them stopping at a time we're not
4018 interested in reporting the event (target_wait on a
4019 specific_process, for example, see linux_nat_wait_1), and
4020 meanwhile the event became uninteresting. Don't bother resuming
4021 LWPs we're not going to wait for if they'd stop immediately. */
4023 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
4025 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
4027 /* If we requested any event, and something came out, assume there
4028 may be more. If we requested a specific lwp or process, also
4029 assume there may be more. */
4030 if (target_can_async_p ()
4031 && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
4032 && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
4033 || !ptid_equal (ptid, minus_one_ptid)))
4036 /* Get ready for the next event. */
4037 if (target_can_async_p ())
4038 target_async (inferior_event_handler, 0);
4044 kill_callback (struct lwp_info *lp, void *data)
4046 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
4049 kill (GET_LWP (lp->ptid), SIGKILL);
4050 if (debug_linux_nat)
4051 fprintf_unfiltered (gdb_stdlog,
4052 "KC: kill (SIGKILL) %s, 0, 0 (%s)\n",
4053 target_pid_to_str (lp->ptid),
4054 errno ? safe_strerror (errno) : "OK");
4056 /* Some kernels ignore even SIGKILL for processes under ptrace. */
4059 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
4060 if (debug_linux_nat)
4061 fprintf_unfiltered (gdb_stdlog,
4062 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
4063 target_pid_to_str (lp->ptid),
4064 errno ? safe_strerror (errno) : "OK");
4070 kill_wait_callback (struct lwp_info *lp, void *data)
4074 /* We must make sure that there are no pending events (delayed
4075 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
4076 program doesn't interfere with any following debugging session. */
4078 /* For cloned processes we must check both with __WCLONE and
4079 without, since the exit status of a cloned process isn't reported
4085 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
4086 if (pid != (pid_t) -1)
4088 if (debug_linux_nat)
4089 fprintf_unfiltered (gdb_stdlog,
4090 "KWC: wait %s received unknown.\n",
4091 target_pid_to_str (lp->ptid));
4092 /* The Linux kernel sometimes fails to kill a thread
4093 completely after PTRACE_KILL; that goes from the stop
4094 point in do_fork out to the one in
4095 get_signal_to_deliever and waits again. So kill it
4097 kill_callback (lp, NULL);
4100 while (pid == GET_LWP (lp->ptid));
4102 gdb_assert (pid == -1 && errno == ECHILD);
4107 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
4108 if (pid != (pid_t) -1)
4110 if (debug_linux_nat)
4111 fprintf_unfiltered (gdb_stdlog,
4112 "KWC: wait %s received unk.\n",
4113 target_pid_to_str (lp->ptid));
4114 /* See the call to kill_callback above. */
4115 kill_callback (lp, NULL);
4118 while (pid == GET_LWP (lp->ptid));
4120 gdb_assert (pid == -1 && errno == ECHILD);
4125 linux_nat_kill (struct target_ops *ops)
4127 struct target_waitstatus last;
4131 /* If we're stopped while forking and we haven't followed yet,
4132 kill the other task. We need to do this first because the
4133 parent will be sleeping if this is a vfork. */
4135 get_last_target_status (&last_ptid, &last);
4137 if (last.kind == TARGET_WAITKIND_FORKED
4138 || last.kind == TARGET_WAITKIND_VFORKED)
4140 ptrace (PT_KILL, PIDGET (last.value.related_pid), 0, 0);
4144 if (forks_exist_p ())
4145 linux_fork_killall ();
4148 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
4150 /* Stop all threads before killing them, since ptrace requires
4151 that the thread is stopped to sucessfully PTRACE_KILL. */
4152 iterate_over_lwps (ptid, stop_callback, NULL);
4153 /* ... and wait until all of them have reported back that
4154 they're no longer running. */
4155 iterate_over_lwps (ptid, stop_wait_callback, NULL);
4157 /* Kill all LWP's ... */
4158 iterate_over_lwps (ptid, kill_callback, NULL);
4160 /* ... and wait until we've flushed all events. */
4161 iterate_over_lwps (ptid, kill_wait_callback, NULL);
4164 target_mourn_inferior ();
4168 linux_nat_mourn_inferior (struct target_ops *ops)
4170 purge_lwp_list (ptid_get_pid (inferior_ptid));
4172 if (! forks_exist_p ())
4173 /* Normal case, no other forks available. */
4174 linux_ops->to_mourn_inferior (ops);
4176 /* Multi-fork case. The current inferior_ptid has exited, but
4177 there are other viable forks to debug. Delete the exiting
4178 one and context-switch to the first available. */
4179 linux_fork_mourn_inferior ();
4182 /* Convert a native/host siginfo object, into/from the siginfo in the
4183 layout of the inferiors' architecture. */
4186 siginfo_fixup (struct siginfo *siginfo, gdb_byte *inf_siginfo, int direction)
4190 if (linux_nat_siginfo_fixup != NULL)
4191 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
4193 /* If there was no callback, or the callback didn't do anything,
4194 then just do a straight memcpy. */
4198 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
4200 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
4205 linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
4206 const char *annex, gdb_byte *readbuf,
4207 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4210 struct siginfo siginfo;
4211 gdb_byte inf_siginfo[sizeof (struct siginfo)];
4213 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
4214 gdb_assert (readbuf || writebuf);
4216 pid = GET_LWP (inferior_ptid);
4218 pid = GET_PID (inferior_ptid);
4220 if (offset > sizeof (siginfo))
4224 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4228 /* When GDB is built as a 64-bit application, ptrace writes into
4229 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4230 inferior with a 64-bit GDB should look the same as debugging it
4231 with a 32-bit GDB, we need to convert it. GDB core always sees
4232 the converted layout, so any read/write will have to be done
4234 siginfo_fixup (&siginfo, inf_siginfo, 0);
4236 if (offset + len > sizeof (siginfo))
4237 len = sizeof (siginfo) - offset;
4239 if (readbuf != NULL)
4240 memcpy (readbuf, inf_siginfo + offset, len);
4243 memcpy (inf_siginfo + offset, writebuf, len);
4245 /* Convert back to ptrace layout before flushing it out. */
4246 siginfo_fixup (&siginfo, inf_siginfo, 1);
4249 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4258 linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
4259 const char *annex, gdb_byte *readbuf,
4260 const gdb_byte *writebuf,
4261 ULONGEST offset, LONGEST len)
4263 struct cleanup *old_chain;
4266 if (object == TARGET_OBJECT_SIGNAL_INFO)
4267 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
4270 /* The target is connected but no live inferior is selected. Pass
4271 this request down to a lower stratum (e.g., the executable
4273 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
4276 old_chain = save_inferior_ptid ();
4278 if (is_lwp (inferior_ptid))
4279 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
4281 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
4284 do_cleanups (old_chain);
4289 linux_thread_alive (ptid_t ptid)
4293 gdb_assert (is_lwp (ptid));
4295 /* Send signal 0 instead of anything ptrace, because ptracing a
4296 running thread errors out claiming that the thread doesn't
4298 err = kill_lwp (GET_LWP (ptid), 0);
4300 if (debug_linux_nat)
4301 fprintf_unfiltered (gdb_stdlog,
4302 "LLTA: KILL(SIG0) %s (%s)\n",
4303 target_pid_to_str (ptid),
4304 err ? safe_strerror (tmp_errno) : "OK");
4313 linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
4315 return linux_thread_alive (ptid);
4319 linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
4321 static char buf[64];
4324 && (GET_PID (ptid) != GET_LWP (ptid)
4325 || num_lwps (GET_PID (ptid)) > 1))
4327 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
4331 return normal_pid_to_str (ptid);
4335 linux_nat_thread_name (struct thread_info *thr)
4337 int pid = ptid_get_pid (thr->ptid);
4338 long lwp = ptid_get_lwp (thr->ptid);
4339 #define FORMAT "/proc/%d/task/%ld/comm"
4340 char buf[sizeof (FORMAT) + 30];
4342 char *result = NULL;
4344 snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
4345 comm_file = fopen (buf, "r");
4348 /* Not exported by the kernel, so we define it here. */
4350 static char line[COMM_LEN + 1];
4352 if (fgets (line, sizeof (line), comm_file))
4354 char *nl = strchr (line, '\n');
4371 /* Accepts an integer PID; Returns a string representing a file that
4372 can be opened to get the symbols for the child process. */
4375 linux_child_pid_to_exec_file (int pid)
4377 char *name1, *name2;
4379 name1 = xmalloc (MAXPATHLEN);
4380 name2 = xmalloc (MAXPATHLEN);
4381 make_cleanup (xfree, name1);
4382 make_cleanup (xfree, name2);
4383 memset (name2, 0, MAXPATHLEN);
4385 sprintf (name1, "/proc/%d/exe", pid);
4386 if (readlink (name1, name2, MAXPATHLEN) > 0)
4392 /* Service function for corefiles and info proc. */
4395 read_mapping (FILE *mapfile,
4400 char *device, long long *inode, char *filename)
4402 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
4403 addr, endaddr, permissions, offset, device, inode);
4406 if (ret > 0 && ret != EOF)
4408 /* Eat everything up to EOL for the filename. This will prevent
4409 weird filenames (such as one with embedded whitespace) from
4410 confusing this code. It also makes this code more robust in
4411 respect to annotations the kernel may add after the filename.
4413 Note the filename is used for informational purposes
4415 ret += fscanf (mapfile, "%[^\n]\n", filename);
4418 return (ret != 0 && ret != EOF);
4421 /* Fills the "to_find_memory_regions" target vector. Lists the memory
4422 regions in the inferior for a corefile. */
4425 linux_nat_find_memory_regions (find_memory_region_ftype func, void *obfd)
4427 int pid = PIDGET (inferior_ptid);
4428 char mapsfilename[MAXPATHLEN];
4430 long long addr, endaddr, size, offset, inode;
4431 char permissions[8], device[8], filename[MAXPATHLEN];
4432 int read, write, exec;
4433 struct cleanup *cleanup;
4435 /* Compose the filename for the /proc memory map, and open it. */
4436 sprintf (mapsfilename, "/proc/%d/maps", pid);
4437 if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
4438 error (_("Could not open %s."), mapsfilename);
4439 cleanup = make_cleanup_fclose (mapsfile);
4442 fprintf_filtered (gdb_stdout,
4443 "Reading memory regions from %s\n", mapsfilename);
4445 /* Now iterate until end-of-file. */
4446 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
4447 &offset, &device[0], &inode, &filename[0]))
4449 size = endaddr - addr;
4451 /* Get the segment's permissions. */
4452 read = (strchr (permissions, 'r') != 0);
4453 write = (strchr (permissions, 'w') != 0);
4454 exec = (strchr (permissions, 'x') != 0);
4458 fprintf_filtered (gdb_stdout,
4459 "Save segment, %s bytes at %s (%c%c%c)",
4460 plongest (size), paddress (target_gdbarch, addr),
4462 write ? 'w' : ' ', exec ? 'x' : ' ');
4464 fprintf_filtered (gdb_stdout, " for %s", filename);
4465 fprintf_filtered (gdb_stdout, "\n");
4468 /* Invoke the callback function to create the corefile
4470 func (addr, size, read, write, exec, obfd);
4472 do_cleanups (cleanup);
4477 find_signalled_thread (struct thread_info *info, void *data)
4479 if (info->suspend.stop_signal != TARGET_SIGNAL_0
4480 && ptid_get_pid (info->ptid) == ptid_get_pid (inferior_ptid))
4486 static enum target_signal
4487 find_stop_signal (void)
4489 struct thread_info *info =
4490 iterate_over_threads (find_signalled_thread, NULL);
4493 return info->suspend.stop_signal;
4495 return TARGET_SIGNAL_0;
4498 /* Records the thread's register state for the corefile note
4502 linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
4503 char *note_data, int *note_size,
4504 enum target_signal stop_signal)
4506 unsigned long lwp = ptid_get_lwp (ptid);
4507 struct gdbarch *gdbarch = target_gdbarch;
4508 struct regcache *regcache = get_thread_arch_regcache (ptid, gdbarch);
4509 const struct regset *regset;
4511 struct cleanup *old_chain;
4512 struct core_regset_section *sect_list;
4515 old_chain = save_inferior_ptid ();
4516 inferior_ptid = ptid;
4517 target_fetch_registers (regcache, -1);
4518 do_cleanups (old_chain);
4520 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
4521 sect_list = gdbarch_core_regset_sections (gdbarch);
4523 /* The loop below uses the new struct core_regset_section, which stores
4524 the supported section names and sizes for the core file. Note that
4525 note PRSTATUS needs to be treated specially. But the other notes are
4526 structurally the same, so they can benefit from the new struct. */
4527 if (core_regset_p && sect_list != NULL)
4528 while (sect_list->sect_name != NULL)
4530 regset = gdbarch_regset_from_core_section (gdbarch,
4531 sect_list->sect_name,
4533 gdb_assert (regset && regset->collect_regset);
4534 gdb_regset = xmalloc (sect_list->size);
4535 regset->collect_regset (regset, regcache, -1,
4536 gdb_regset, sect_list->size);
4538 if (strcmp (sect_list->sect_name, ".reg") == 0)
4539 note_data = (char *) elfcore_write_prstatus
4540 (obfd, note_data, note_size,
4541 lwp, target_signal_to_host (stop_signal),
4544 note_data = (char *) elfcore_write_register_note
4545 (obfd, note_data, note_size,
4546 sect_list->sect_name, gdb_regset,
4552 /* For architectures that does not have the struct core_regset_section
4553 implemented, we use the old method. When all the architectures have
4554 the new support, the code below should be deleted. */
4557 gdb_gregset_t gregs;
4558 gdb_fpregset_t fpregs;
4561 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
4563 != NULL && regset->collect_regset != NULL)
4564 regset->collect_regset (regset, regcache, -1,
4565 &gregs, sizeof (gregs));
4567 fill_gregset (regcache, &gregs, -1);
4569 note_data = (char *) elfcore_write_prstatus
4570 (obfd, note_data, note_size, lwp, target_signal_to_host (stop_signal),
4574 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
4576 != NULL && regset->collect_regset != NULL)
4577 regset->collect_regset (regset, regcache, -1,
4578 &fpregs, sizeof (fpregs));
4580 fill_fpregset (regcache, &fpregs, -1);
4582 note_data = (char *) elfcore_write_prfpreg (obfd,
4585 &fpregs, sizeof (fpregs));
4591 struct linux_nat_corefile_thread_data
4597 enum target_signal stop_signal;
4600 /* Called by gdbthread.c once per thread. Records the thread's
4601 register state for the corefile note section. */
4604 linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
4606 struct linux_nat_corefile_thread_data *args = data;
4608 args->note_data = linux_nat_do_thread_registers (args->obfd,
4618 /* Enumerate spufs IDs for process PID. */
4621 iterate_over_spus (int pid, void (*callback) (void *, int), void *data)
4625 struct dirent *entry;
4627 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4628 dir = opendir (path);
4633 while ((entry = readdir (dir)) != NULL)
4639 fd = atoi (entry->d_name);
4643 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4644 if (stat (path, &st) != 0)
4646 if (!S_ISDIR (st.st_mode))
4649 if (statfs (path, &stfs) != 0)
4651 if (stfs.f_type != SPUFS_MAGIC)
4654 callback (data, fd);
4660 /* Generate corefile notes for SPU contexts. */
4662 struct linux_spu_corefile_data
4670 linux_spu_corefile_callback (void *data, int fd)
4672 struct linux_spu_corefile_data *args = data;
4675 static const char *spu_files[] =
4697 for (i = 0; i < sizeof (spu_files) / sizeof (spu_files[0]); i++)
4699 char annex[32], note_name[32];
4703 xsnprintf (annex, sizeof annex, "%d/%s", fd, spu_files[i]);
4704 spu_len = target_read_alloc (¤t_target, TARGET_OBJECT_SPU,
4708 xsnprintf (note_name, sizeof note_name, "SPU/%s", annex);
4709 args->note_data = elfcore_write_note (args->obfd, args->note_data,
4710 args->note_size, note_name,
4711 NT_SPU, spu_data, spu_len);
4718 linux_spu_make_corefile_notes (bfd *obfd, char *note_data, int *note_size)
4720 struct linux_spu_corefile_data args;
4723 args.note_data = note_data;
4724 args.note_size = note_size;
4726 iterate_over_spus (PIDGET (inferior_ptid),
4727 linux_spu_corefile_callback, &args);
4729 return args.note_data;
4732 /* Fills the "to_make_corefile_note" target vector. Builds the note
4733 section for a corefile, and returns it in a malloc buffer. */
4736 linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
4738 struct linux_nat_corefile_thread_data thread_args;
4739 /* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */
4740 char fname[16] = { '\0' };
4741 /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */
4742 char psargs[80] = { '\0' };
4743 char *note_data = NULL;
4744 ptid_t filter = pid_to_ptid (ptid_get_pid (inferior_ptid));
4748 if (get_exec_file (0))
4750 strncpy (fname, lbasename (get_exec_file (0)), sizeof (fname));
4751 strncpy (psargs, get_exec_file (0), sizeof (psargs));
4752 if (get_inferior_args ())
4755 char *psargs_end = psargs + sizeof (psargs);
4757 /* linux_elfcore_write_prpsinfo () handles zero unterminated
4759 string_end = memchr (psargs, 0, sizeof (psargs));
4760 if (string_end != NULL)
4762 *string_end++ = ' ';
4763 strncpy (string_end, get_inferior_args (),
4764 psargs_end - string_end);
4767 note_data = (char *) elfcore_write_prpsinfo (obfd,
4769 note_size, fname, psargs);
4772 /* Dump information for threads. */
4773 thread_args.obfd = obfd;
4774 thread_args.note_data = note_data;
4775 thread_args.note_size = note_size;
4776 thread_args.num_notes = 0;
4777 thread_args.stop_signal = find_stop_signal ();
4778 iterate_over_lwps (filter, linux_nat_corefile_thread_callback, &thread_args);
4779 gdb_assert (thread_args.num_notes != 0);
4780 note_data = thread_args.note_data;
4782 auxv_len = target_read_alloc (¤t_target, TARGET_OBJECT_AUXV,
4786 note_data = elfcore_write_note (obfd, note_data, note_size,
4787 "CORE", NT_AUXV, auxv, auxv_len);
4791 note_data = linux_spu_make_corefile_notes (obfd, note_data, note_size);
4793 make_cleanup (xfree, note_data);
4797 /* Implement the "info proc" command. */
4801 /* Display the default cmdline, cwd and exe outputs. */
4804 /* Display `info proc mappings'. */
4807 /* Display `info proc status'. */
4810 /* Display `info proc stat'. */
4813 /* Display `info proc cmdline'. */
4816 /* Display `info proc exe'. */
4819 /* Display `info proc cwd'. */
4822 /* Display all of the above. */
4827 linux_nat_info_proc_cmd_1 (char *args, enum info_proc_what what, int from_tty)
4829 /* A long is used for pid instead of an int to avoid a loss of precision
4830 compiler warning from the output of strtoul. */
4831 long pid = PIDGET (inferior_ptid);
4833 char buffer[MAXPATHLEN];
4834 char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
4835 int cmdline_f = (what == IP_MINIMAL || what == IP_CMDLINE || what == IP_ALL);
4836 int cwd_f = (what == IP_MINIMAL || what == IP_CWD || what == IP_ALL);
4837 int exe_f = (what == IP_MINIMAL || what == IP_EXE || what == IP_ALL);
4838 int mappings_f = (what == IP_MAPPINGS || what == IP_ALL);
4839 int status_f = (what == IP_STATUS || what == IP_ALL);
4840 int stat_f = (what == IP_STAT || what == IP_ALL);
4843 if (args && isdigit (args[0]))
4844 pid = strtoul (args, &args, 10);
4846 args = skip_spaces (args);
4847 if (args && args[0])
4848 error (_("Too many parameters: %s"), args);
4851 error (_("No current process: you must name one."));
4853 sprintf (fname1, "/proc/%ld", pid);
4854 if (stat (fname1, &dummy) != 0)
4855 error (_("No /proc directory: '%s'"), fname1);
4857 printf_filtered (_("process %ld\n"), pid);
4860 sprintf (fname1, "/proc/%ld/cmdline", pid);
4861 if ((procfile = fopen (fname1, "r")) != NULL)
4863 struct cleanup *cleanup = make_cleanup_fclose (procfile);
4865 if (fgets (buffer, sizeof (buffer), procfile))
4866 printf_filtered ("cmdline = '%s'\n", buffer);
4868 warning (_("unable to read '%s'"), fname1);
4869 do_cleanups (cleanup);
4872 warning (_("unable to open /proc file '%s'"), fname1);
4876 sprintf (fname1, "/proc/%ld/cwd", pid);
4877 memset (fname2, 0, sizeof (fname2));
4878 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
4879 printf_filtered ("cwd = '%s'\n", fname2);
4881 warning (_("unable to read link '%s'"), fname1);
4885 sprintf (fname1, "/proc/%ld/exe", pid);
4886 memset (fname2, 0, sizeof (fname2));
4887 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
4888 printf_filtered ("exe = '%s'\n", fname2);
4890 warning (_("unable to read link '%s'"), fname1);
4894 sprintf (fname1, "/proc/%ld/maps", pid);
4895 if ((procfile = fopen (fname1, "r")) != NULL)
4897 long long addr, endaddr, size, offset, inode;
4898 char permissions[8], device[8], filename[MAXPATHLEN];
4899 struct cleanup *cleanup;
4901 cleanup = make_cleanup_fclose (procfile);
4902 printf_filtered (_("Mapped address spaces:\n\n"));
4903 if (gdbarch_addr_bit (target_gdbarch) == 32)
4905 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
4908 " Size", " Offset", "objfile");
4912 printf_filtered (" %18s %18s %10s %10s %7s\n",
4915 " Size", " Offset", "objfile");
4918 while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
4919 &offset, &device[0], &inode, &filename[0]))
4921 size = endaddr - addr;
4923 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
4924 calls here (and possibly above) should be abstracted
4925 out into their own functions? Andrew suggests using
4926 a generic local_address_string instead to print out
4927 the addresses; that makes sense to me, too. */
4929 if (gdbarch_addr_bit (target_gdbarch) == 32)
4931 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
4932 (unsigned long) addr, /* FIXME: pr_addr */
4933 (unsigned long) endaddr,
4935 (unsigned int) offset,
4936 filename[0] ? filename : "");
4940 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
4941 (unsigned long) addr, /* FIXME: pr_addr */
4942 (unsigned long) endaddr,
4944 (unsigned int) offset,
4945 filename[0] ? filename : "");
4949 do_cleanups (cleanup);
4952 warning (_("unable to open /proc file '%s'"), fname1);
4956 sprintf (fname1, "/proc/%ld/status", pid);
4957 if ((procfile = fopen (fname1, "r")) != NULL)
4959 struct cleanup *cleanup = make_cleanup_fclose (procfile);
4961 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
4962 puts_filtered (buffer);
4963 do_cleanups (cleanup);
4966 warning (_("unable to open /proc file '%s'"), fname1);
4970 sprintf (fname1, "/proc/%ld/stat", pid);
4971 if ((procfile = fopen (fname1, "r")) != NULL)
4976 struct cleanup *cleanup = make_cleanup_fclose (procfile);
4978 if (fscanf (procfile, "%d ", &itmp) > 0)
4979 printf_filtered (_("Process: %d\n"), itmp);
4980 if (fscanf (procfile, "(%[^)]) ", &buffer[0]) > 0)
4981 printf_filtered (_("Exec file: %s\n"), buffer);
4982 if (fscanf (procfile, "%c ", &ctmp) > 0)
4983 printf_filtered (_("State: %c\n"), ctmp);
4984 if (fscanf (procfile, "%d ", &itmp) > 0)
4985 printf_filtered (_("Parent process: %d\n"), itmp);
4986 if (fscanf (procfile, "%d ", &itmp) > 0)
4987 printf_filtered (_("Process group: %d\n"), itmp);
4988 if (fscanf (procfile, "%d ", &itmp) > 0)
4989 printf_filtered (_("Session id: %d\n"), itmp);
4990 if (fscanf (procfile, "%d ", &itmp) > 0)
4991 printf_filtered (_("TTY: %d\n"), itmp);
4992 if (fscanf (procfile, "%d ", &itmp) > 0)
4993 printf_filtered (_("TTY owner process group: %d\n"), itmp);
4994 if (fscanf (procfile, "%lu ", <mp) > 0)
4995 printf_filtered (_("Flags: 0x%lx\n"), ltmp);
4996 if (fscanf (procfile, "%lu ", <mp) > 0)
4997 printf_filtered (_("Minor faults (no memory page): %lu\n"),
4998 (unsigned long) ltmp);
4999 if (fscanf (procfile, "%lu ", <mp) > 0)
5000 printf_filtered (_("Minor faults, children: %lu\n"),
5001 (unsigned long) ltmp);
5002 if (fscanf (procfile, "%lu ", <mp) > 0)
5003 printf_filtered (_("Major faults (memory page faults): %lu\n"),
5004 (unsigned long) ltmp);
5005 if (fscanf (procfile, "%lu ", <mp) > 0)
5006 printf_filtered (_("Major faults, children: %lu\n"),
5007 (unsigned long) ltmp);
5008 if (fscanf (procfile, "%ld ", <mp) > 0)
5009 printf_filtered (_("utime: %ld\n"), ltmp);
5010 if (fscanf (procfile, "%ld ", <mp) > 0)
5011 printf_filtered (_("stime: %ld\n"), ltmp);
5012 if (fscanf (procfile, "%ld ", <mp) > 0)
5013 printf_filtered (_("utime, children: %ld\n"), ltmp);
5014 if (fscanf (procfile, "%ld ", <mp) > 0)
5015 printf_filtered (_("stime, children: %ld\n"), ltmp);
5016 if (fscanf (procfile, "%ld ", <mp) > 0)
5017 printf_filtered (_("jiffies remaining in current "
5018 "time slice: %ld\n"), ltmp);
5019 if (fscanf (procfile, "%ld ", <mp) > 0)
5020 printf_filtered (_("'nice' value: %ld\n"), ltmp);
5021 if (fscanf (procfile, "%lu ", <mp) > 0)
5022 printf_filtered (_("jiffies until next timeout: %lu\n"),
5023 (unsigned long) ltmp);
5024 if (fscanf (procfile, "%lu ", <mp) > 0)
5025 printf_filtered (_("jiffies until next SIGALRM: %lu\n"),
5026 (unsigned long) ltmp);
5027 if (fscanf (procfile, "%ld ", <mp) > 0)
5028 printf_filtered (_("start time (jiffies since "
5029 "system boot): %ld\n"), ltmp);
5030 if (fscanf (procfile, "%lu ", <mp) > 0)
5031 printf_filtered (_("Virtual memory size: %lu\n"),
5032 (unsigned long) ltmp);
5033 if (fscanf (procfile, "%lu ", <mp) > 0)
5034 printf_filtered (_("Resident set size: %lu\n"),
5035 (unsigned long) ltmp);
5036 if (fscanf (procfile, "%lu ", <mp) > 0)
5037 printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp);
5038 if (fscanf (procfile, "%lu ", <mp) > 0)
5039 printf_filtered (_("Start of text: 0x%lx\n"), ltmp);
5040 if (fscanf (procfile, "%lu ", <mp) > 0)
5041 printf_filtered (_("End of text: 0x%lx\n"), ltmp);
5042 if (fscanf (procfile, "%lu ", <mp) > 0)
5043 printf_filtered (_("Start of stack: 0x%lx\n"), ltmp);
5044 #if 0 /* Don't know how architecture-dependent the rest is...
5045 Anyway the signal bitmap info is available from "status". */
5046 if (fscanf (procfile, "%lu ", <mp) > 0) /* FIXME arch? */
5047 printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp);
5048 if (fscanf (procfile, "%lu ", <mp) > 0) /* FIXME arch? */
5049 printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp);
5050 if (fscanf (procfile, "%ld ", <mp) > 0)
5051 printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp);
5052 if (fscanf (procfile, "%ld ", <mp) > 0)
5053 printf_filtered (_("Blocked signals bitmap: 0x%lx\n"), ltmp);
5054 if (fscanf (procfile, "%ld ", <mp) > 0)
5055 printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp);
5056 if (fscanf (procfile, "%ld ", <mp) > 0)
5057 printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp);
5058 if (fscanf (procfile, "%lu ", <mp) > 0) /* FIXME arch? */
5059 printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp);
5061 do_cleanups (cleanup);
5064 warning (_("unable to open /proc file '%s'"), fname1);
5068 /* Implement `info proc' when given without any futher parameters. */
5071 linux_nat_info_proc_cmd (char *args, int from_tty)
5073 linux_nat_info_proc_cmd_1 (args, IP_MINIMAL, from_tty);
5076 /* Implement `info proc mappings'. */
5079 linux_nat_info_proc_cmd_mappings (char *args, int from_tty)
5081 linux_nat_info_proc_cmd_1 (args, IP_MAPPINGS, from_tty);
5084 /* Implement `info proc stat'. */
5087 linux_nat_info_proc_cmd_stat (char *args, int from_tty)
5089 linux_nat_info_proc_cmd_1 (args, IP_STAT, from_tty);
5092 /* Implement `info proc status'. */
5095 linux_nat_info_proc_cmd_status (char *args, int from_tty)
5097 linux_nat_info_proc_cmd_1 (args, IP_STATUS, from_tty);
5100 /* Implement `info proc cwd'. */
5103 linux_nat_info_proc_cmd_cwd (char *args, int from_tty)
5105 linux_nat_info_proc_cmd_1 (args, IP_CWD, from_tty);
5108 /* Implement `info proc cmdline'. */
5111 linux_nat_info_proc_cmd_cmdline (char *args, int from_tty)
5113 linux_nat_info_proc_cmd_1 (args, IP_CMDLINE, from_tty);
5116 /* Implement `info proc exe'. */
5119 linux_nat_info_proc_cmd_exe (char *args, int from_tty)
5121 linux_nat_info_proc_cmd_1 (args, IP_EXE, from_tty);
5124 /* Implement `info proc all'. */
5127 linux_nat_info_proc_cmd_all (char *args, int from_tty)
5129 linux_nat_info_proc_cmd_1 (args, IP_ALL, from_tty);
5132 /* Implement the to_xfer_partial interface for memory reads using the /proc
5133 filesystem. Because we can use a single read() call for /proc, this
5134 can be much more efficient than banging away at PTRACE_PEEKTEXT,
5135 but it doesn't support writes. */
5138 linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
5139 const char *annex, gdb_byte *readbuf,
5140 const gdb_byte *writebuf,
5141 ULONGEST offset, LONGEST len)
5147 if (object != TARGET_OBJECT_MEMORY || !readbuf)
5150 /* Don't bother for one word. */
5151 if (len < 3 * sizeof (long))
5154 /* We could keep this file open and cache it - possibly one per
5155 thread. That requires some juggling, but is even faster. */
5156 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
5157 fd = open (filename, O_RDONLY | O_LARGEFILE);
5161 /* If pread64 is available, use it. It's faster if the kernel
5162 supports it (only one syscall), and it's 64-bit safe even on
5163 32-bit platforms (for instance, SPARC debugging a SPARC64
5166 if (pread64 (fd, readbuf, len, offset) != len)
5168 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
5179 /* Enumerate spufs IDs for process PID. */
5181 spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, LONGEST len)
5183 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
5185 LONGEST written = 0;
5188 struct dirent *entry;
5190 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
5191 dir = opendir (path);
5196 while ((entry = readdir (dir)) != NULL)
5202 fd = atoi (entry->d_name);
5206 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
5207 if (stat (path, &st) != 0)
5209 if (!S_ISDIR (st.st_mode))
5212 if (statfs (path, &stfs) != 0)
5214 if (stfs.f_type != SPUFS_MAGIC)
5217 if (pos >= offset && pos + 4 <= offset + len)
5219 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
5229 /* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
5230 object type, using the /proc file system. */
5232 linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
5233 const char *annex, gdb_byte *readbuf,
5234 const gdb_byte *writebuf,
5235 ULONGEST offset, LONGEST len)
5240 int pid = PIDGET (inferior_ptid);
5247 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5250 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
5251 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5256 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5263 ret = write (fd, writebuf, (size_t) len);
5265 ret = read (fd, readbuf, (size_t) len);
5272 /* Parse LINE as a signal set and add its set bits to SIGS. */
5275 add_line_to_sigset (const char *line, sigset_t *sigs)
5277 int len = strlen (line) - 1;
5281 if (line[len] != '\n')
5282 error (_("Could not parse signal set: %s"), line);
5290 if (*p >= '0' && *p <= '9')
5292 else if (*p >= 'a' && *p <= 'f')
5293 digit = *p - 'a' + 10;
5295 error (_("Could not parse signal set: %s"), line);
5300 sigaddset (sigs, signum + 1);
5302 sigaddset (sigs, signum + 2);
5304 sigaddset (sigs, signum + 3);
5306 sigaddset (sigs, signum + 4);
5312 /* Find process PID's pending signals from /proc/pid/status and set
5316 linux_proc_pending_signals (int pid, sigset_t *pending,
5317 sigset_t *blocked, sigset_t *ignored)
5320 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
5321 struct cleanup *cleanup;
5323 sigemptyset (pending);
5324 sigemptyset (blocked);
5325 sigemptyset (ignored);
5326 sprintf (fname, "/proc/%d/status", pid);
5327 procfile = fopen (fname, "r");
5328 if (procfile == NULL)
5329 error (_("Could not open %s"), fname);
5330 cleanup = make_cleanup_fclose (procfile);
5332 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
5334 /* Normal queued signals are on the SigPnd line in the status
5335 file. However, 2.6 kernels also have a "shared" pending
5336 queue for delivering signals to a thread group, so check for
5339 Unfortunately some Red Hat kernels include the shared pending
5340 queue but not the ShdPnd status field. */
5342 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
5343 add_line_to_sigset (buffer + 8, pending);
5344 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
5345 add_line_to_sigset (buffer + 8, pending);
5346 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
5347 add_line_to_sigset (buffer + 8, blocked);
5348 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
5349 add_line_to_sigset (buffer + 8, ignored);
5352 do_cleanups (cleanup);
5356 linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
5357 const char *annex, gdb_byte *readbuf,
5358 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
5360 gdb_assert (object == TARGET_OBJECT_OSDATA);
5362 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5366 linux_xfer_partial (struct target_ops *ops, enum target_object object,
5367 const char *annex, gdb_byte *readbuf,
5368 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
5372 if (object == TARGET_OBJECT_AUXV)
5373 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
5376 if (object == TARGET_OBJECT_OSDATA)
5377 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
5380 if (object == TARGET_OBJECT_SPU)
5381 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
5384 /* GDB calculates all the addresses in possibly larget width of the address.
5385 Address width needs to be masked before its final use - either by
5386 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
5388 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
5390 if (object == TARGET_OBJECT_MEMORY)
5392 int addr_bit = gdbarch_addr_bit (target_gdbarch);
5394 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
5395 offset &= ((ULONGEST) 1 << addr_bit) - 1;
5398 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
5403 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
5407 /* Create a prototype generic GNU/Linux target. The client can override
5408 it with local methods. */
5411 linux_target_install_ops (struct target_ops *t)
5413 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
5414 t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint;
5415 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
5416 t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint;
5417 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
5418 t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint;
5419 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
5420 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
5421 t->to_post_startup_inferior = linux_child_post_startup_inferior;
5422 t->to_post_attach = linux_child_post_attach;
5423 t->to_follow_fork = linux_child_follow_fork;
5424 t->to_find_memory_regions = linux_nat_find_memory_regions;
5425 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
5427 super_xfer_partial = t->to_xfer_partial;
5428 t->to_xfer_partial = linux_xfer_partial;
5434 struct target_ops *t;
5436 t = inf_ptrace_target ();
5437 linux_target_install_ops (t);
5443 linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
5445 struct target_ops *t;
5447 t = inf_ptrace_trad_target (register_u_offset);
5448 linux_target_install_ops (t);
5453 /* target_is_async_p implementation. */
5456 linux_nat_is_async_p (void)
5458 /* NOTE: palves 2008-03-21: We're only async when the user requests
5459 it explicitly with the "set target-async" command.
5460 Someday, linux will always be async. */
5461 return target_async_permitted;
5464 /* target_can_async_p implementation. */
5467 linux_nat_can_async_p (void)
5469 /* NOTE: palves 2008-03-21: We're only async when the user requests
5470 it explicitly with the "set target-async" command.
5471 Someday, linux will always be async. */
5472 return target_async_permitted;
5476 linux_nat_supports_non_stop (void)
5481 /* True if we want to support multi-process. To be removed when GDB
5482 supports multi-exec. */
5484 int linux_multi_process = 1;
5487 linux_nat_supports_multi_process (void)
5489 return linux_multi_process;
5493 linux_nat_supports_disable_randomization (void)
5495 #ifdef HAVE_PERSONALITY
5502 static int async_terminal_is_ours = 1;
5504 /* target_terminal_inferior implementation. */
5507 linux_nat_terminal_inferior (void)
5509 if (!target_is_async_p ())
5511 /* Async mode is disabled. */
5512 terminal_inferior ();
5516 terminal_inferior ();
5518 /* Calls to target_terminal_*() are meant to be idempotent. */
5519 if (!async_terminal_is_ours)
5522 delete_file_handler (input_fd);
5523 async_terminal_is_ours = 0;
5527 /* target_terminal_ours implementation. */
5530 linux_nat_terminal_ours (void)
5532 if (!target_is_async_p ())
5534 /* Async mode is disabled. */
5539 /* GDB should never give the terminal to the inferior if the
5540 inferior is running in the background (run&, continue&, etc.),
5541 but claiming it sure should. */
5544 if (async_terminal_is_ours)
5547 clear_sigint_trap ();
5548 add_file_handler (input_fd, stdin_event_handler, 0);
5549 async_terminal_is_ours = 1;
5552 static void (*async_client_callback) (enum inferior_event_type event_type,
5554 static void *async_client_context;
5556 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5557 so we notice when any child changes state, and notify the
5558 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
5559 above to wait for the arrival of a SIGCHLD. */
5562 sigchld_handler (int signo)
5564 int old_errno = errno;
5566 if (debug_linux_nat)
5567 ui_file_write_async_safe (gdb_stdlog,
5568 "sigchld\n", sizeof ("sigchld\n") - 1);
5570 if (signo == SIGCHLD
5571 && linux_nat_event_pipe[0] != -1)
5572 async_file_mark (); /* Let the event loop know that there are
5573 events to handle. */
5578 /* Callback registered with the target events file descriptor. */
5581 handle_target_event (int error, gdb_client_data client_data)
5583 (*async_client_callback) (INF_REG_EVENT, async_client_context);
5586 /* Create/destroy the target events pipe. Returns previous state. */
5589 linux_async_pipe (int enable)
5591 int previous = (linux_nat_event_pipe[0] != -1);
5593 if (previous != enable)
5597 block_child_signals (&prev_mask);
5601 if (pipe (linux_nat_event_pipe) == -1)
5602 internal_error (__FILE__, __LINE__,
5603 "creating event pipe failed.");
5605 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
5606 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
5610 close (linux_nat_event_pipe[0]);
5611 close (linux_nat_event_pipe[1]);
5612 linux_nat_event_pipe[0] = -1;
5613 linux_nat_event_pipe[1] = -1;
5616 restore_child_signals_mask (&prev_mask);
5622 /* target_async implementation. */
5625 linux_nat_async (void (*callback) (enum inferior_event_type event_type,
5626 void *context), void *context)
5628 if (callback != NULL)
5630 async_client_callback = callback;
5631 async_client_context = context;
5632 if (!linux_async_pipe (1))
5634 add_file_handler (linux_nat_event_pipe[0],
5635 handle_target_event, NULL);
5636 /* There may be pending events to handle. Tell the event loop
5643 async_client_callback = callback;
5644 async_client_context = context;
5645 delete_file_handler (linux_nat_event_pipe[0]);
5646 linux_async_pipe (0);
5651 /* Stop an LWP, and push a TARGET_SIGNAL_0 stop status if no other
5655 linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
5659 ptid_t ptid = lwp->ptid;
5661 if (debug_linux_nat)
5662 fprintf_unfiltered (gdb_stdlog,
5663 "LNSL: running -> suspending %s\n",
5664 target_pid_to_str (lwp->ptid));
5667 if (lwp->last_resume_kind == resume_stop)
5669 if (debug_linux_nat)
5670 fprintf_unfiltered (gdb_stdlog,
5671 "linux-nat: already stopping LWP %ld at "
5673 ptid_get_lwp (lwp->ptid));
5677 stop_callback (lwp, NULL);
5678 lwp->last_resume_kind = resume_stop;
5682 /* Already known to be stopped; do nothing. */
5684 if (debug_linux_nat)
5686 if (find_thread_ptid (lwp->ptid)->stop_requested)
5687 fprintf_unfiltered (gdb_stdlog,
5688 "LNSL: already stopped/stop_requested %s\n",
5689 target_pid_to_str (lwp->ptid));
5691 fprintf_unfiltered (gdb_stdlog,
5692 "LNSL: already stopped/no "
5693 "stop_requested yet %s\n",
5694 target_pid_to_str (lwp->ptid));
5701 linux_nat_stop (ptid_t ptid)
5704 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
5706 linux_ops->to_stop (ptid);
5710 linux_nat_close (int quitting)
5712 /* Unregister from the event loop. */
5713 if (target_is_async_p ())
5714 target_async (NULL, 0);
5716 if (linux_ops->to_close)
5717 linux_ops->to_close (quitting);
5720 /* When requests are passed down from the linux-nat layer to the
5721 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
5722 used. The address space pointer is stored in the inferior object,
5723 but the common code that is passed such ptid can't tell whether
5724 lwpid is a "main" process id or not (it assumes so). We reverse
5725 look up the "main" process id from the lwp here. */
5727 struct address_space *
5728 linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
5730 struct lwp_info *lwp;
5731 struct inferior *inf;
5734 pid = GET_LWP (ptid);
5735 if (GET_LWP (ptid) == 0)
5737 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
5739 lwp = find_lwp_pid (ptid);
5740 pid = GET_PID (lwp->ptid);
5744 /* A (pid,lwpid,0) ptid. */
5745 pid = GET_PID (ptid);
5748 inf = find_inferior_pid (pid);
5749 gdb_assert (inf != NULL);
5754 linux_nat_core_of_thread_1 (ptid_t ptid)
5756 struct cleanup *back_to;
5759 char *content = NULL;
5762 int content_read = 0;
5766 filename = xstrprintf ("/proc/%d/task/%ld/stat",
5767 GET_PID (ptid), GET_LWP (ptid));
5768 back_to = make_cleanup (xfree, filename);
5770 f = fopen (filename, "r");
5773 do_cleanups (back_to);
5777 make_cleanup_fclose (f);
5783 content = xrealloc (content, content_read + 1024);
5784 n = fread (content + content_read, 1, 1024, f);
5788 content[content_read] = '\0';
5793 make_cleanup (xfree, content);
5795 p = strchr (content, '(');
5799 p = strchr (p, ')');
5803 /* If the first field after program name has index 0, then core number is
5804 the field with index 36. There's no constant for that anywhere. */
5806 p = strtok_r (p, " ", &ts);
5807 for (i = 0; p != NULL && i != 36; ++i)
5808 p = strtok_r (NULL, " ", &ts);
5810 if (p == NULL || sscanf (p, "%d", &core) == 0)
5813 do_cleanups (back_to);
5818 /* Return the cached value of the processor core for thread PTID. */
5821 linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
5823 struct lwp_info *info = find_lwp_pid (ptid);
5831 linux_nat_add_target (struct target_ops *t)
5833 /* Save the provided single-threaded target. We save this in a separate
5834 variable because another target we've inherited from (e.g. inf-ptrace)
5835 may have saved a pointer to T; we want to use it for the final
5836 process stratum target. */
5837 linux_ops_saved = *t;
5838 linux_ops = &linux_ops_saved;
5840 /* Override some methods for multithreading. */
5841 t->to_create_inferior = linux_nat_create_inferior;
5842 t->to_attach = linux_nat_attach;
5843 t->to_detach = linux_nat_detach;
5844 t->to_resume = linux_nat_resume;
5845 t->to_wait = linux_nat_wait;
5846 t->to_pass_signals = linux_nat_pass_signals;
5847 t->to_xfer_partial = linux_nat_xfer_partial;
5848 t->to_kill = linux_nat_kill;
5849 t->to_mourn_inferior = linux_nat_mourn_inferior;
5850 t->to_thread_alive = linux_nat_thread_alive;
5851 t->to_pid_to_str = linux_nat_pid_to_str;
5852 t->to_thread_name = linux_nat_thread_name;
5853 t->to_has_thread_control = tc_schedlock;
5854 t->to_thread_address_space = linux_nat_thread_address_space;
5855 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
5856 t->to_stopped_data_address = linux_nat_stopped_data_address;
5858 t->to_can_async_p = linux_nat_can_async_p;
5859 t->to_is_async_p = linux_nat_is_async_p;
5860 t->to_supports_non_stop = linux_nat_supports_non_stop;
5861 t->to_async = linux_nat_async;
5862 t->to_terminal_inferior = linux_nat_terminal_inferior;
5863 t->to_terminal_ours = linux_nat_terminal_ours;
5864 t->to_close = linux_nat_close;
5866 /* Methods for non-stop support. */
5867 t->to_stop = linux_nat_stop;
5869 t->to_supports_multi_process = linux_nat_supports_multi_process;
5871 t->to_supports_disable_randomization
5872 = linux_nat_supports_disable_randomization;
5874 t->to_core_of_thread = linux_nat_core_of_thread;
5876 /* We don't change the stratum; this target will sit at
5877 process_stratum and thread_db will set at thread_stratum. This
5878 is a little strange, since this is a multi-threaded-capable
5879 target, but we want to be on the stack below thread_db, and we
5880 also want to be used for single-threaded processes. */
5885 /* Register a method to call whenever a new thread is attached. */
5887 linux_nat_set_new_thread (struct target_ops *t,
5888 void (*new_thread) (struct lwp_info *))
5890 /* Save the pointer. We only support a single registered instance
5891 of the GNU/Linux native target, so we do not need to map this to
5893 linux_nat_new_thread = new_thread;
5896 /* Register a method that converts a siginfo object between the layout
5897 that ptrace returns, and the layout in the architecture of the
5900 linux_nat_set_siginfo_fixup (struct target_ops *t,
5901 int (*siginfo_fixup) (struct siginfo *,
5905 /* Save the pointer. */
5906 linux_nat_siginfo_fixup = siginfo_fixup;
5909 /* Register a method to call prior to resuming a thread. */
5912 linux_nat_set_prepare_to_resume (struct target_ops *t,
5913 void (*prepare_to_resume) (struct lwp_info *))
5915 /* Save the pointer. */
5916 linux_nat_prepare_to_resume = prepare_to_resume;
5919 /* Return the saved siginfo associated with PTID. */
5921 linux_nat_get_siginfo (ptid_t ptid)
5923 struct lwp_info *lp = find_lwp_pid (ptid);
5925 gdb_assert (lp != NULL);
5927 return &lp->siginfo;
5930 /* Provide a prototype to silence -Wmissing-prototypes. */
5931 extern initialize_file_ftype _initialize_linux_nat;
5934 _initialize_linux_nat (void)
5936 static struct cmd_list_element *info_proc_cmdlist;
5938 add_prefix_cmd ("proc", class_info, linux_nat_info_proc_cmd,
5940 Show /proc process information about any running process.\n\
5941 Specify any process id, or use the program being debugged by default."),
5942 &info_proc_cmdlist, "info proc ",
5943 1/*allow-unknown*/, &infolist);
5945 add_cmd ("mappings", class_info, linux_nat_info_proc_cmd_mappings, _("\
5946 List of mapped memory regions."),
5947 &info_proc_cmdlist);
5949 add_cmd ("stat", class_info, linux_nat_info_proc_cmd_stat, _("\
5950 List process info from /proc/PID/stat."),
5951 &info_proc_cmdlist);
5953 add_cmd ("status", class_info, linux_nat_info_proc_cmd_status, _("\
5954 List process info from /proc/PID/status."),
5955 &info_proc_cmdlist);
5957 add_cmd ("cwd", class_info, linux_nat_info_proc_cmd_cwd, _("\
5958 List current working directory of the process."),
5959 &info_proc_cmdlist);
5961 add_cmd ("cmdline", class_info, linux_nat_info_proc_cmd_cmdline, _("\
5962 List command line arguments of the process."),
5963 &info_proc_cmdlist);
5965 add_cmd ("exe", class_info, linux_nat_info_proc_cmd_exe, _("\
5966 List absolute filename for executable of the process."),
5967 &info_proc_cmdlist);
5969 add_cmd ("all", class_info, linux_nat_info_proc_cmd_all, _("\
5970 List all available /proc info."),
5971 &info_proc_cmdlist);
5973 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance,
5974 &debug_linux_nat, _("\
5975 Set debugging of GNU/Linux lwp module."), _("\
5976 Show debugging of GNU/Linux lwp module."), _("\
5977 Enables printf debugging output."),
5979 show_debug_linux_nat,
5980 &setdebuglist, &showdebuglist);
5982 /* Save this mask as the default. */
5983 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
5985 /* Install a SIGCHLD handler. */
5986 sigchld_action.sa_handler = sigchld_handler;
5987 sigemptyset (&sigchld_action.sa_mask);
5988 sigchld_action.sa_flags = SA_RESTART;
5990 /* Make it the default. */
5991 sigaction (SIGCHLD, &sigchld_action, NULL);
5993 /* Make sure we don't block SIGCHLD during a sigsuspend. */
5994 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
5995 sigdelset (&suspend_mask, SIGCHLD);
5997 sigemptyset (&blocked_mask);
6001 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
6002 the GNU/Linux Threads library and therefore doesn't really belong
6005 /* Read variable NAME in the target and return its value if found.
6006 Otherwise return zero. It is assumed that the type of the variable
6010 get_signo (const char *name)
6012 struct minimal_symbol *ms;
6015 ms = lookup_minimal_symbol (name, NULL, NULL);
6019 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
6020 sizeof (signo)) != 0)
6026 /* Return the set of signals used by the threads library in *SET. */
6029 lin_thread_get_thread_signals (sigset_t *set)
6031 struct sigaction action;
6032 int restart, cancel;
6034 sigemptyset (&blocked_mask);
6037 restart = get_signo ("__pthread_sig_restart");
6038 cancel = get_signo ("__pthread_sig_cancel");
6040 /* LinuxThreads normally uses the first two RT signals, but in some legacy
6041 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
6042 not provide any way for the debugger to query the signal numbers -
6043 fortunately they don't change! */
6046 restart = __SIGRTMIN;
6049 cancel = __SIGRTMIN + 1;
6051 sigaddset (set, restart);
6052 sigaddset (set, cancel);
6054 /* The GNU/Linux Threads library makes terminating threads send a
6055 special "cancel" signal instead of SIGCHLD. Make sure we catch
6056 those (to prevent them from terminating GDB itself, which is
6057 likely to be their default action) and treat them the same way as
6060 action.sa_handler = sigchld_handler;
6061 sigemptyset (&action.sa_mask);
6062 action.sa_flags = SA_RESTART;
6063 sigaction (cancel, &action, NULL);
6065 /* We block the "cancel" signal throughout this code ... */
6066 sigaddset (&blocked_mask, cancel);
6067 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
6069 /* ... except during a sigsuspend. */
6070 sigdelset (&suspend_mask, cancel);