1 /* GNU/Linux native-dependent code common to multiple platforms.
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011 Free Software Foundation, Inc.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdb_string.h"
26 #include "gdb_assert.h"
27 #ifdef HAVE_TKILL_SYSCALL
29 #include <sys/syscall.h>
31 #include <sys/ptrace.h>
32 #include "linux-nat.h"
33 #include "linux-ptrace.h"
34 #include "linux-procfs.h"
35 #include "linux-fork.h"
36 #include "gdbthread.h"
40 #include "inf-ptrace.h"
42 #include <sys/param.h> /* for MAXPATHLEN */
43 #include <sys/procfs.h> /* for elf_gregset etc. */
44 #include "elf-bfd.h" /* for elfcore_write_* */
45 #include "gregset.h" /* for gregset */
46 #include "gdbcore.h" /* for get_exec_file */
47 #include <ctype.h> /* for isdigit */
48 #include "gdbthread.h" /* for struct thread_info etc. */
49 #include "gdb_stat.h" /* for struct stat */
50 #include <fcntl.h> /* for O_RDONLY */
52 #include "event-loop.h"
53 #include "event-top.h"
55 #include <sys/types.h>
56 #include "gdb_dirent.h"
57 #include "xml-support.h"
61 #include "linux-osdata.h"
62 #include "cli/cli-utils.h"
65 #define SPUFS_MAGIC 0x23c9b64e
68 #ifdef HAVE_PERSONALITY
69 # include <sys/personality.h>
70 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
71 # define ADDR_NO_RANDOMIZE 0x0040000
73 #endif /* HAVE_PERSONALITY */
75 /* This comment documents high-level logic of this file.
77 Waiting for events in sync mode
78 ===============================
80 When waiting for an event in a specific thread, we just use waitpid, passing
81 the specific pid, and not passing WNOHANG.
83 When waiting for an event in all threads, waitpid is not quite good. Prior to
84 version 2.4, Linux can either wait for event in main thread, or in secondary
85 threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
86 miss an event. The solution is to use non-blocking waitpid, together with
87 sigsuspend. First, we use non-blocking waitpid to get an event in the main
88 process, if any. Second, we use non-blocking waitpid with the __WCLONED
89 flag to check for events in cloned processes. If nothing is found, we use
90 sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
91 happened to a child process -- and SIGCHLD will be delivered both for events
92 in main debugged process and in cloned processes. As soon as we know there's
93 an event, we get back to calling nonblocking waitpid with and without
96 Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
97 so that we don't miss a signal. If SIGCHLD arrives in between, when it's
98 blocked, the signal becomes pending and sigsuspend immediately
99 notices it and returns.
101 Waiting for events in async mode
102 ================================
104 In async mode, GDB should always be ready to handle both user input
105 and target events, so neither blocking waitpid nor sigsuspend are
106 viable options. Instead, we should asynchronously notify the GDB main
107 event loop whenever there's an unprocessed event from the target. We
108 detect asynchronous target events by handling SIGCHLD signals. To
109 notify the event loop about target events, the self-pipe trick is used
110 --- a pipe is registered as waitable event source in the event loop,
111 the event loop select/poll's on the read end of this pipe (as well on
112 other event sources, e.g., stdin), and the SIGCHLD handler writes a
113 byte to this pipe. This is more portable than relying on
114 pselect/ppoll, since on kernels that lack those syscalls, libc
115 emulates them with select/poll+sigprocmask, and that is racy
116 (a.k.a. plain broken).
118 Obviously, if we fail to notify the event loop if there's a target
119 event, it's bad. OTOH, if we notify the event loop when there's no
120 event from the target, linux_nat_wait will detect that there's no real
121 event to report, and return event of type TARGET_WAITKIND_IGNORE.
122 This is mostly harmless, but it will waste time and is better avoided.
124 The main design point is that every time GDB is outside linux-nat.c,
125 we have a SIGCHLD handler installed that is called when something
126 happens to the target and notifies the GDB event loop. Whenever GDB
127 core decides to handle the event, and calls into linux-nat.c, we
128 process things as in sync mode, except that the we never block in
131 While processing an event, we may end up momentarily blocked in
132 waitpid calls. Those waitpid calls, while blocking, are guarantied to
133 return quickly. E.g., in all-stop mode, before reporting to the core
134 that an LWP hit a breakpoint, all LWPs are stopped by sending them
135 SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
136 Note that this is different from blocking indefinitely waiting for the
137 next event --- here, we're already handling an event.
142 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
143 signal is not entirely significant; we just need for a signal to be delivered,
144 so that we can intercept it. SIGSTOP's advantage is that it can not be
145 blocked. A disadvantage is that it is not a real-time signal, so it can only
146 be queued once; we do not keep track of other sources of SIGSTOP.
148 Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
149 use them, because they have special behavior when the signal is generated -
150 not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
151 kills the entire thread group.
153 A delivered SIGSTOP would stop the entire thread group, not just the thread we
154 tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
155 cancel it (by PTRACE_CONT without passing SIGSTOP).
157 We could use a real-time signal instead. This would solve those problems; we
158 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
159 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
160 generates it, and there are races with trying to find a signal that is not
164 #define O_LARGEFILE 0
167 /* Unlike other extended result codes, WSTOPSIG (status) on
168 PTRACE_O_TRACESYSGOOD syscall events doesn't return SIGTRAP, but
169 instead SIGTRAP with bit 7 set. */
170 #define SYSCALL_SIGTRAP (SIGTRAP | 0x80)
172 /* The single-threaded native GNU/Linux target_ops. We save a pointer for
173 the use of the multi-threaded target. */
174 static struct target_ops *linux_ops;
175 static struct target_ops linux_ops_saved;
177 /* The method to call, if any, when a new thread is attached. */
178 static void (*linux_nat_new_thread) (struct lwp_info *);
180 /* Hook to call prior to resuming a thread. */
181 static void (*linux_nat_prepare_to_resume) (struct lwp_info *);
183 /* The method to call, if any, when the siginfo object needs to be
184 converted between the layout returned by ptrace, and the layout in
185 the architecture of the inferior. */
186 static int (*linux_nat_siginfo_fixup) (struct siginfo *,
190 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
191 Called by our to_xfer_partial. */
192 static LONGEST (*super_xfer_partial) (struct target_ops *,
194 const char *, gdb_byte *,
198 static int debug_linux_nat;
200 show_debug_linux_nat (struct ui_file *file, int from_tty,
201 struct cmd_list_element *c, const char *value)
203 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
207 struct simple_pid_list
211 struct simple_pid_list *next;
213 struct simple_pid_list *stopped_pids;
215 /* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
216 can not be used, 1 if it can. */
218 static int linux_supports_tracefork_flag = -1;
220 /* This variable is a tri-state flag: -1 for unknown, 0 if
221 PTRACE_O_TRACESYSGOOD can not be used, 1 if it can. */
223 static int linux_supports_tracesysgood_flag = -1;
225 /* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
226 PTRACE_O_TRACEVFORKDONE. */
228 static int linux_supports_tracevforkdone_flag = -1;
230 /* Stores the current used ptrace() options. */
231 static int current_ptrace_options = 0;
233 /* Async mode support. */
235 /* The read/write ends of the pipe registered as waitable file in the
237 static int linux_nat_event_pipe[2] = { -1, -1 };
239 /* Flush the event pipe. */
242 async_file_flush (void)
249 ret = read (linux_nat_event_pipe[0], &buf, 1);
251 while (ret >= 0 || (ret == -1 && errno == EINTR));
254 /* Put something (anything, doesn't matter what, or how much) in event
255 pipe, so that the select/poll in the event-loop realizes we have
256 something to process. */
259 async_file_mark (void)
263 /* It doesn't really matter what the pipe contains, as long we end
264 up with something in it. Might as well flush the previous
270 ret = write (linux_nat_event_pipe[1], "+", 1);
272 while (ret == -1 && errno == EINTR);
274 /* Ignore EAGAIN. If the pipe is full, the event loop will already
275 be awakened anyway. */
278 static void linux_nat_async (void (*callback)
279 (enum inferior_event_type event_type,
282 static int kill_lwp (int lwpid, int signo);
284 static int stop_callback (struct lwp_info *lp, void *data);
286 static void block_child_signals (sigset_t *prev_mask);
287 static void restore_child_signals_mask (sigset_t *prev_mask);
290 static struct lwp_info *add_lwp (ptid_t ptid);
291 static void purge_lwp_list (int pid);
292 static struct lwp_info *find_lwp_pid (ptid_t ptid);
295 /* Trivial list manipulation functions to keep track of a list of
296 new stopped processes. */
298 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
300 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
303 new_pid->status = status;
304 new_pid->next = *listp;
309 in_pid_list_p (struct simple_pid_list *list, int pid)
311 struct simple_pid_list *p;
313 for (p = list; p != NULL; p = p->next)
320 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
322 struct simple_pid_list **p;
324 for (p = listp; *p != NULL; p = &(*p)->next)
325 if ((*p)->pid == pid)
327 struct simple_pid_list *next = (*p)->next;
329 *statusp = (*p)->status;
338 /* A helper function for linux_test_for_tracefork, called after fork (). */
341 linux_tracefork_child (void)
343 ptrace (PTRACE_TRACEME, 0, 0, 0);
344 kill (getpid (), SIGSTOP);
349 /* Wrapper function for waitpid which handles EINTR. */
352 my_waitpid (int pid, int *statusp, int flags)
358 ret = waitpid (pid, statusp, flags);
360 while (ret == -1 && errno == EINTR);
365 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
367 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
368 we know that the feature is not available. This may change the tracing
369 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
371 However, if it succeeds, we don't know for sure that the feature is
372 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
373 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
374 fork tracing, and let it fork. If the process exits, we assume that we
375 can't use TRACEFORK; if we get the fork notification, and we can extract
376 the new child's PID, then we assume that we can. */
379 linux_test_for_tracefork (int original_pid)
381 int child_pid, ret, status;
385 /* We don't want those ptrace calls to be interrupted. */
386 block_child_signals (&prev_mask);
388 linux_supports_tracefork_flag = 0;
389 linux_supports_tracevforkdone_flag = 0;
391 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
394 restore_child_signals_mask (&prev_mask);
400 perror_with_name (("fork"));
403 linux_tracefork_child ();
405 ret = my_waitpid (child_pid, &status, 0);
407 perror_with_name (("waitpid"));
408 else if (ret != child_pid)
409 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
410 if (! WIFSTOPPED (status))
411 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."),
414 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
417 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
420 warning (_("linux_test_for_tracefork: failed to kill child"));
421 restore_child_signals_mask (&prev_mask);
425 ret = my_waitpid (child_pid, &status, 0);
426 if (ret != child_pid)
427 warning (_("linux_test_for_tracefork: failed "
428 "to wait for killed child"));
429 else if (!WIFSIGNALED (status))
430 warning (_("linux_test_for_tracefork: unexpected "
431 "wait status 0x%x from killed child"), status);
433 restore_child_signals_mask (&prev_mask);
437 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
438 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
439 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
440 linux_supports_tracevforkdone_flag = (ret == 0);
442 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
444 warning (_("linux_test_for_tracefork: failed to resume child"));
446 ret = my_waitpid (child_pid, &status, 0);
448 if (ret == child_pid && WIFSTOPPED (status)
449 && status >> 16 == PTRACE_EVENT_FORK)
452 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
453 if (ret == 0 && second_pid != 0)
457 linux_supports_tracefork_flag = 1;
458 my_waitpid (second_pid, &second_status, 0);
459 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
461 warning (_("linux_test_for_tracefork: "
462 "failed to kill second child"));
463 my_waitpid (second_pid, &status, 0);
467 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
468 "(%d, status 0x%x)"), ret, status);
470 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
472 warning (_("linux_test_for_tracefork: failed to kill child"));
473 my_waitpid (child_pid, &status, 0);
475 restore_child_signals_mask (&prev_mask);
478 /* Determine if PTRACE_O_TRACESYSGOOD can be used to follow syscalls.
480 We try to enable syscall tracing on ORIGINAL_PID. If this fails,
481 we know that the feature is not available. This may change the tracing
482 options for ORIGINAL_PID, but we'll be setting them shortly anyway. */
485 linux_test_for_tracesysgood (int original_pid)
490 /* We don't want those ptrace calls to be interrupted. */
491 block_child_signals (&prev_mask);
493 linux_supports_tracesysgood_flag = 0;
495 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACESYSGOOD);
499 linux_supports_tracesysgood_flag = 1;
501 restore_child_signals_mask (&prev_mask);
504 /* Determine wether we support PTRACE_O_TRACESYSGOOD option available.
505 This function also sets linux_supports_tracesysgood_flag. */
508 linux_supports_tracesysgood (int pid)
510 if (linux_supports_tracesysgood_flag == -1)
511 linux_test_for_tracesysgood (pid);
512 return linux_supports_tracesysgood_flag;
515 /* Return non-zero iff we have tracefork functionality available.
516 This function also sets linux_supports_tracefork_flag. */
519 linux_supports_tracefork (int pid)
521 if (linux_supports_tracefork_flag == -1)
522 linux_test_for_tracefork (pid);
523 return linux_supports_tracefork_flag;
527 linux_supports_tracevforkdone (int pid)
529 if (linux_supports_tracefork_flag == -1)
530 linux_test_for_tracefork (pid);
531 return linux_supports_tracevforkdone_flag;
535 linux_enable_tracesysgood (ptid_t ptid)
537 int pid = ptid_get_lwp (ptid);
540 pid = ptid_get_pid (ptid);
542 if (linux_supports_tracesysgood (pid) == 0)
545 current_ptrace_options |= PTRACE_O_TRACESYSGOOD;
547 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
552 linux_enable_event_reporting (ptid_t ptid)
554 int pid = ptid_get_lwp (ptid);
557 pid = ptid_get_pid (ptid);
559 if (! linux_supports_tracefork (pid))
562 current_ptrace_options |= PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK
563 | PTRACE_O_TRACEEXEC | PTRACE_O_TRACECLONE;
565 if (linux_supports_tracevforkdone (pid))
566 current_ptrace_options |= PTRACE_O_TRACEVFORKDONE;
568 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
569 read-only process state. */
571 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
575 linux_child_post_attach (int pid)
577 linux_enable_event_reporting (pid_to_ptid (pid));
578 linux_enable_tracesysgood (pid_to_ptid (pid));
582 linux_child_post_startup_inferior (ptid_t ptid)
584 linux_enable_event_reporting (ptid);
585 linux_enable_tracesysgood (ptid);
589 linux_child_follow_fork (struct target_ops *ops, int follow_child)
593 int parent_pid, child_pid;
595 block_child_signals (&prev_mask);
597 has_vforked = (inferior_thread ()->pending_follow.kind
598 == TARGET_WAITKIND_VFORKED);
599 parent_pid = ptid_get_lwp (inferior_ptid);
601 parent_pid = ptid_get_pid (inferior_ptid);
602 child_pid = PIDGET (inferior_thread ()->pending_follow.value.related_pid);
605 linux_enable_event_reporting (pid_to_ptid (child_pid));
608 && !non_stop /* Non-stop always resumes both branches. */
609 && (!target_is_async_p () || sync_execution)
610 && !(follow_child || detach_fork || sched_multi))
612 /* The parent stays blocked inside the vfork syscall until the
613 child execs or exits. If we don't let the child run, then
614 the parent stays blocked. If we're telling the parent to run
615 in the foreground, the user will not be able to ctrl-c to get
616 back the terminal, effectively hanging the debug session. */
617 fprintf_filtered (gdb_stderr, _("\
618 Can not resume the parent process over vfork in the foreground while\n\
619 holding the child stopped. Try \"set detach-on-fork\" or \
620 \"set schedule-multiple\".\n"));
621 /* FIXME output string > 80 columns. */
627 struct lwp_info *child_lp = NULL;
629 /* We're already attached to the parent, by default. */
631 /* Detach new forked process? */
634 /* Before detaching from the child, remove all breakpoints
635 from it. If we forked, then this has already been taken
636 care of by infrun.c. If we vforked however, any
637 breakpoint inserted in the parent is visible in the
638 child, even those added while stopped in a vfork
639 catchpoint. This will remove the breakpoints from the
640 parent also, but they'll be reinserted below. */
643 /* keep breakpoints list in sync. */
644 remove_breakpoints_pid (GET_PID (inferior_ptid));
647 if (info_verbose || debug_linux_nat)
649 target_terminal_ours ();
650 fprintf_filtered (gdb_stdlog,
651 "Detaching after fork from "
652 "child process %d.\n",
656 ptrace (PTRACE_DETACH, child_pid, 0, 0);
660 struct inferior *parent_inf, *child_inf;
661 struct cleanup *old_chain;
663 /* Add process to GDB's tables. */
664 child_inf = add_inferior (child_pid);
666 parent_inf = current_inferior ();
667 child_inf->attach_flag = parent_inf->attach_flag;
668 copy_terminal_info (child_inf, parent_inf);
670 old_chain = save_inferior_ptid ();
671 save_current_program_space ();
673 inferior_ptid = ptid_build (child_pid, child_pid, 0);
674 add_thread (inferior_ptid);
675 child_lp = add_lwp (inferior_ptid);
676 child_lp->stopped = 1;
677 child_lp->last_resume_kind = resume_stop;
679 /* If this is a vfork child, then the address-space is
680 shared with the parent. */
683 child_inf->pspace = parent_inf->pspace;
684 child_inf->aspace = parent_inf->aspace;
686 /* The parent will be frozen until the child is done
687 with the shared region. Keep track of the
689 child_inf->vfork_parent = parent_inf;
690 child_inf->pending_detach = 0;
691 parent_inf->vfork_child = child_inf;
692 parent_inf->pending_detach = 0;
696 child_inf->aspace = new_address_space ();
697 child_inf->pspace = add_program_space (child_inf->aspace);
698 child_inf->removable = 1;
699 set_current_program_space (child_inf->pspace);
700 clone_program_space (child_inf->pspace, parent_inf->pspace);
702 /* Let the shared library layer (solib-svr4) learn about
703 this new process, relocate the cloned exec, pull in
704 shared libraries, and install the solib event
705 breakpoint. If a "cloned-VM" event was propagated
706 better throughout the core, this wouldn't be
708 solib_create_inferior_hook (0);
711 /* Let the thread_db layer learn about this new process. */
712 check_for_thread_db ();
714 do_cleanups (old_chain);
719 struct lwp_info *parent_lp;
720 struct inferior *parent_inf;
722 parent_inf = current_inferior ();
724 /* If we detached from the child, then we have to be careful
725 to not insert breakpoints in the parent until the child
726 is done with the shared memory region. However, if we're
727 staying attached to the child, then we can and should
728 insert breakpoints, so that we can debug it. A
729 subsequent child exec or exit is enough to know when does
730 the child stops using the parent's address space. */
731 parent_inf->waiting_for_vfork_done = detach_fork;
732 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
734 parent_lp = find_lwp_pid (pid_to_ptid (parent_pid));
735 gdb_assert (linux_supports_tracefork_flag >= 0);
737 if (linux_supports_tracevforkdone (0))
740 fprintf_unfiltered (gdb_stdlog,
741 "LCFF: waiting for VFORK_DONE on %d\n",
743 parent_lp->stopped = 1;
745 /* We'll handle the VFORK_DONE event like any other
746 event, in target_wait. */
750 /* We can't insert breakpoints until the child has
751 finished with the shared memory region. We need to
752 wait until that happens. Ideal would be to just
754 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
755 - waitpid (parent_pid, &status, __WALL);
756 However, most architectures can't handle a syscall
757 being traced on the way out if it wasn't traced on
760 We might also think to loop, continuing the child
761 until it exits or gets a SIGTRAP. One problem is
762 that the child might call ptrace with PTRACE_TRACEME.
764 There's no simple and reliable way to figure out when
765 the vforked child will be done with its copy of the
766 shared memory. We could step it out of the syscall,
767 two instructions, let it go, and then single-step the
768 parent once. When we have hardware single-step, this
769 would work; with software single-step it could still
770 be made to work but we'd have to be able to insert
771 single-step breakpoints in the child, and we'd have
772 to insert -just- the single-step breakpoint in the
773 parent. Very awkward.
775 In the end, the best we can do is to make sure it
776 runs for a little while. Hopefully it will be out of
777 range of any breakpoints we reinsert. Usually this
778 is only the single-step breakpoint at vfork's return
782 fprintf_unfiltered (gdb_stdlog,
783 "LCFF: no VFORK_DONE "
784 "support, sleeping a bit\n");
788 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
789 and leave it pending. The next linux_nat_resume call
790 will notice a pending event, and bypasses actually
791 resuming the inferior. */
792 parent_lp->status = 0;
793 parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
794 parent_lp->stopped = 1;
796 /* If we're in async mode, need to tell the event loop
797 there's something here to process. */
798 if (target_can_async_p ())
805 struct inferior *parent_inf, *child_inf;
806 struct lwp_info *child_lp;
807 struct program_space *parent_pspace;
809 if (info_verbose || debug_linux_nat)
811 target_terminal_ours ();
813 fprintf_filtered (gdb_stdlog,
814 _("Attaching after process %d "
815 "vfork to child process %d.\n"),
816 parent_pid, child_pid);
818 fprintf_filtered (gdb_stdlog,
819 _("Attaching after process %d "
820 "fork to child process %d.\n"),
821 parent_pid, child_pid);
824 /* Add the new inferior first, so that the target_detach below
825 doesn't unpush the target. */
827 child_inf = add_inferior (child_pid);
829 parent_inf = current_inferior ();
830 child_inf->attach_flag = parent_inf->attach_flag;
831 copy_terminal_info (child_inf, parent_inf);
833 parent_pspace = parent_inf->pspace;
835 /* If we're vforking, we want to hold on to the parent until the
836 child exits or execs. At child exec or exit time we can
837 remove the old breakpoints from the parent and detach or
838 resume debugging it. Otherwise, detach the parent now; we'll
839 want to reuse it's program/address spaces, but we can't set
840 them to the child before removing breakpoints from the
841 parent, otherwise, the breakpoints module could decide to
842 remove breakpoints from the wrong process (since they'd be
843 assigned to the same address space). */
847 gdb_assert (child_inf->vfork_parent == NULL);
848 gdb_assert (parent_inf->vfork_child == NULL);
849 child_inf->vfork_parent = parent_inf;
850 child_inf->pending_detach = 0;
851 parent_inf->vfork_child = child_inf;
852 parent_inf->pending_detach = detach_fork;
853 parent_inf->waiting_for_vfork_done = 0;
855 else if (detach_fork)
856 target_detach (NULL, 0);
858 /* Note that the detach above makes PARENT_INF dangling. */
860 /* Add the child thread to the appropriate lists, and switch to
861 this new thread, before cloning the program space, and
862 informing the solib layer about this new process. */
864 inferior_ptid = ptid_build (child_pid, child_pid, 0);
865 add_thread (inferior_ptid);
866 child_lp = add_lwp (inferior_ptid);
867 child_lp->stopped = 1;
868 child_lp->last_resume_kind = resume_stop;
870 /* If this is a vfork child, then the address-space is shared
871 with the parent. If we detached from the parent, then we can
872 reuse the parent's program/address spaces. */
873 if (has_vforked || detach_fork)
875 child_inf->pspace = parent_pspace;
876 child_inf->aspace = child_inf->pspace->aspace;
880 child_inf->aspace = new_address_space ();
881 child_inf->pspace = add_program_space (child_inf->aspace);
882 child_inf->removable = 1;
883 set_current_program_space (child_inf->pspace);
884 clone_program_space (child_inf->pspace, parent_pspace);
886 /* Let the shared library layer (solib-svr4) learn about
887 this new process, relocate the cloned exec, pull in
888 shared libraries, and install the solib event breakpoint.
889 If a "cloned-VM" event was propagated better throughout
890 the core, this wouldn't be required. */
891 solib_create_inferior_hook (0);
894 /* Let the thread_db layer learn about this new process. */
895 check_for_thread_db ();
898 restore_child_signals_mask (&prev_mask);
904 linux_child_insert_fork_catchpoint (int pid)
906 return !linux_supports_tracefork (pid);
910 linux_child_remove_fork_catchpoint (int pid)
916 linux_child_insert_vfork_catchpoint (int pid)
918 return !linux_supports_tracefork (pid);
922 linux_child_remove_vfork_catchpoint (int pid)
928 linux_child_insert_exec_catchpoint (int pid)
930 return !linux_supports_tracefork (pid);
934 linux_child_remove_exec_catchpoint (int pid)
940 linux_child_set_syscall_catchpoint (int pid, int needed, int any_count,
941 int table_size, int *table)
943 if (!linux_supports_tracesysgood (pid))
946 /* On GNU/Linux, we ignore the arguments. It means that we only
947 enable the syscall catchpoints, but do not disable them.
949 Also, we do not use the `table' information because we do not
950 filter system calls here. We let GDB do the logic for us. */
954 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
955 are processes sharing the same VM space. A multi-threaded process
956 is basically a group of such processes. However, such a grouping
957 is almost entirely a user-space issue; the kernel doesn't enforce
958 such a grouping at all (this might change in the future). In
959 general, we'll rely on the threads library (i.e. the GNU/Linux
960 Threads library) to provide such a grouping.
962 It is perfectly well possible to write a multi-threaded application
963 without the assistance of a threads library, by using the clone
964 system call directly. This module should be able to give some
965 rudimentary support for debugging such applications if developers
966 specify the CLONE_PTRACE flag in the clone system call, and are
967 using the Linux kernel 2.4 or above.
969 Note that there are some peculiarities in GNU/Linux that affect
972 - In general one should specify the __WCLONE flag to waitpid in
973 order to make it report events for any of the cloned processes
974 (and leave it out for the initial process). However, if a cloned
975 process has exited the exit status is only reported if the
976 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
977 we cannot use it since GDB must work on older systems too.
979 - When a traced, cloned process exits and is waited for by the
980 debugger, the kernel reassigns it to the original parent and
981 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
982 library doesn't notice this, which leads to the "zombie problem":
983 When debugged a multi-threaded process that spawns a lot of
984 threads will run out of processes, even if the threads exit,
985 because the "zombies" stay around. */
987 /* List of known LWPs. */
988 struct lwp_info *lwp_list;
991 /* Original signal mask. */
992 static sigset_t normal_mask;
994 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
995 _initialize_linux_nat. */
996 static sigset_t suspend_mask;
998 /* Signals to block to make that sigsuspend work. */
999 static sigset_t blocked_mask;
1001 /* SIGCHLD action. */
1002 struct sigaction sigchld_action;
1004 /* Block child signals (SIGCHLD and linux threads signals), and store
1005 the previous mask in PREV_MASK. */
1008 block_child_signals (sigset_t *prev_mask)
1010 /* Make sure SIGCHLD is blocked. */
1011 if (!sigismember (&blocked_mask, SIGCHLD))
1012 sigaddset (&blocked_mask, SIGCHLD);
1014 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
1017 /* Restore child signals mask, previously returned by
1018 block_child_signals. */
1021 restore_child_signals_mask (sigset_t *prev_mask)
1023 sigprocmask (SIG_SETMASK, prev_mask, NULL);
1026 /* Mask of signals to pass directly to the inferior. */
1027 static sigset_t pass_mask;
1029 /* Update signals to pass to the inferior. */
1031 linux_nat_pass_signals (int numsigs, unsigned char *pass_signals)
1035 sigemptyset (&pass_mask);
1037 for (signo = 1; signo < NSIG; signo++)
1039 int target_signo = target_signal_from_host (signo);
1040 if (target_signo < numsigs && pass_signals[target_signo])
1041 sigaddset (&pass_mask, signo);
1047 /* Prototypes for local functions. */
1048 static int stop_wait_callback (struct lwp_info *lp, void *data);
1049 static int linux_thread_alive (ptid_t ptid);
1050 static char *linux_child_pid_to_exec_file (int pid);
1053 /* Convert wait status STATUS to a string. Used for printing debug
1057 status_to_str (int status)
1059 static char buf[64];
1061 if (WIFSTOPPED (status))
1063 if (WSTOPSIG (status) == SYSCALL_SIGTRAP)
1064 snprintf (buf, sizeof (buf), "%s (stopped at syscall)",
1065 strsignal (SIGTRAP));
1067 snprintf (buf, sizeof (buf), "%s (stopped)",
1068 strsignal (WSTOPSIG (status)));
1070 else if (WIFSIGNALED (status))
1071 snprintf (buf, sizeof (buf), "%s (terminated)",
1072 strsignal (WTERMSIG (status)));
1074 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
1079 /* Destroy and free LP. */
1082 lwp_free (struct lwp_info *lp)
1084 xfree (lp->arch_private);
1088 /* Remove all LWPs belong to PID from the lwp list. */
1091 purge_lwp_list (int pid)
1093 struct lwp_info *lp, *lpprev, *lpnext;
1097 for (lp = lwp_list; lp; lp = lpnext)
1101 if (ptid_get_pid (lp->ptid) == pid)
1104 lwp_list = lp->next;
1106 lpprev->next = lp->next;
1115 /* Return the number of known LWPs in the tgid given by PID. */
1121 struct lwp_info *lp;
1123 for (lp = lwp_list; lp; lp = lp->next)
1124 if (ptid_get_pid (lp->ptid) == pid)
1130 /* Add the LWP specified by PID to the list. Return a pointer to the
1131 structure describing the new LWP. The LWP should already be stopped
1132 (with an exception for the very first LWP). */
1134 static struct lwp_info *
1135 add_lwp (ptid_t ptid)
1137 struct lwp_info *lp;
1139 gdb_assert (is_lwp (ptid));
1141 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
1143 memset (lp, 0, sizeof (struct lwp_info));
1145 lp->last_resume_kind = resume_continue;
1146 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1151 lp->next = lwp_list;
1154 /* Let the arch specific bits know about this new thread. Current
1155 clients of this callback take the opportunity to install
1156 watchpoints in the new thread. Don't do this for the first
1157 thread though. If we're spawning a child ("run"), the thread
1158 executes the shell wrapper first, and we shouldn't touch it until
1159 it execs the program we want to debug. For "attach", it'd be
1160 okay to call the callback, but it's not necessary, because
1161 watchpoints can't yet have been inserted into the inferior. */
1162 if (num_lwps (GET_PID (ptid)) > 1 && linux_nat_new_thread != NULL)
1163 linux_nat_new_thread (lp);
1168 /* Remove the LWP specified by PID from the list. */
1171 delete_lwp (ptid_t ptid)
1173 struct lwp_info *lp, *lpprev;
1177 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
1178 if (ptid_equal (lp->ptid, ptid))
1185 lpprev->next = lp->next;
1187 lwp_list = lp->next;
1192 /* Return a pointer to the structure describing the LWP corresponding
1193 to PID. If no corresponding LWP could be found, return NULL. */
1195 static struct lwp_info *
1196 find_lwp_pid (ptid_t ptid)
1198 struct lwp_info *lp;
1202 lwp = GET_LWP (ptid);
1204 lwp = GET_PID (ptid);
1206 for (lp = lwp_list; lp; lp = lp->next)
1207 if (lwp == GET_LWP (lp->ptid))
1213 /* Call CALLBACK with its second argument set to DATA for every LWP in
1214 the list. If CALLBACK returns 1 for a particular LWP, return a
1215 pointer to the structure describing that LWP immediately.
1216 Otherwise return NULL. */
1219 iterate_over_lwps (ptid_t filter,
1220 int (*callback) (struct lwp_info *, void *),
1223 struct lwp_info *lp, *lpnext;
1225 for (lp = lwp_list; lp; lp = lpnext)
1229 if (ptid_match (lp->ptid, filter))
1231 if ((*callback) (lp, data))
1239 /* Update our internal state when changing from one checkpoint to
1240 another indicated by NEW_PTID. We can only switch single-threaded
1241 applications, so we only create one new LWP, and the previous list
1245 linux_nat_switch_fork (ptid_t new_ptid)
1247 struct lwp_info *lp;
1249 purge_lwp_list (GET_PID (inferior_ptid));
1251 lp = add_lwp (new_ptid);
1254 /* This changes the thread's ptid while preserving the gdb thread
1255 num. Also changes the inferior pid, while preserving the
1257 thread_change_ptid (inferior_ptid, new_ptid);
1259 /* We've just told GDB core that the thread changed target id, but,
1260 in fact, it really is a different thread, with different register
1262 registers_changed ();
1265 /* Handle the exit of a single thread LP. */
1268 exit_lwp (struct lwp_info *lp)
1270 struct thread_info *th = find_thread_ptid (lp->ptid);
1274 if (print_thread_events)
1275 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1277 delete_thread (lp->ptid);
1280 delete_lwp (lp->ptid);
1283 /* Detect `T (stopped)' in `/proc/PID/status'.
1284 Other states including `T (tracing stop)' are reported as false. */
1287 pid_is_stopped (pid_t pid)
1293 snprintf (buf, sizeof (buf), "/proc/%d/status", (int) pid);
1294 status_file = fopen (buf, "r");
1295 if (status_file != NULL)
1299 while (fgets (buf, sizeof (buf), status_file))
1301 if (strncmp (buf, "State:", 6) == 0)
1307 if (have_state && strstr (buf, "T (stopped)") != NULL)
1309 fclose (status_file);
1314 /* Wait for the LWP specified by LP, which we have just attached to.
1315 Returns a wait status for that LWP, to cache. */
1318 linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1321 pid_t new_pid, pid = GET_LWP (ptid);
1324 if (pid_is_stopped (pid))
1326 if (debug_linux_nat)
1327 fprintf_unfiltered (gdb_stdlog,
1328 "LNPAW: Attaching to a stopped process\n");
1330 /* The process is definitely stopped. It is in a job control
1331 stop, unless the kernel predates the TASK_STOPPED /
1332 TASK_TRACED distinction, in which case it might be in a
1333 ptrace stop. Make sure it is in a ptrace stop; from there we
1334 can kill it, signal it, et cetera.
1336 First make sure there is a pending SIGSTOP. Since we are
1337 already attached, the process can not transition from stopped
1338 to running without a PTRACE_CONT; so we know this signal will
1339 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1340 probably already in the queue (unless this kernel is old
1341 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1342 is not an RT signal, it can only be queued once. */
1343 kill_lwp (pid, SIGSTOP);
1345 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1346 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1347 ptrace (PTRACE_CONT, pid, 0, 0);
1350 /* Make sure the initial process is stopped. The user-level threads
1351 layer might want to poke around in the inferior, and that won't
1352 work if things haven't stabilized yet. */
1353 new_pid = my_waitpid (pid, &status, 0);
1354 if (new_pid == -1 && errno == ECHILD)
1357 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1359 /* Try again with __WCLONE to check cloned processes. */
1360 new_pid = my_waitpid (pid, &status, __WCLONE);
1364 gdb_assert (pid == new_pid);
1366 if (!WIFSTOPPED (status))
1368 /* The pid we tried to attach has apparently just exited. */
1369 if (debug_linux_nat)
1370 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
1371 pid, status_to_str (status));
1375 if (WSTOPSIG (status) != SIGSTOP)
1378 if (debug_linux_nat)
1379 fprintf_unfiltered (gdb_stdlog,
1380 "LNPAW: Received %s after attaching\n",
1381 status_to_str (status));
1387 /* Attach to the LWP specified by PID. Return 0 if successful, -1 if
1388 the new LWP could not be attached, or 1 if we're already auto
1389 attached to this thread, but haven't processed the
1390 PTRACE_EVENT_CLONE event of its parent thread, so we just ignore
1391 its existance, without considering it an error. */
1394 lin_lwp_attach_lwp (ptid_t ptid)
1396 struct lwp_info *lp;
1400 gdb_assert (is_lwp (ptid));
1402 block_child_signals (&prev_mask);
1404 lp = find_lwp_pid (ptid);
1405 lwpid = GET_LWP (ptid);
1407 /* We assume that we're already attached to any LWP that has an id
1408 equal to the overall process id, and to any LWP that is already
1409 in our list of LWPs. If we're not seeing exit events from threads
1410 and we've had PID wraparound since we last tried to stop all threads,
1411 this assumption might be wrong; fortunately, this is very unlikely
1413 if (lwpid != GET_PID (ptid) && lp == NULL)
1415 int status, cloned = 0, signalled = 0;
1417 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1419 if (linux_supports_tracefork_flag)
1421 /* If we haven't stopped all threads when we get here,
1422 we may have seen a thread listed in thread_db's list,
1423 but not processed the PTRACE_EVENT_CLONE yet. If
1424 that's the case, ignore this new thread, and let
1425 normal event handling discover it later. */
1426 if (in_pid_list_p (stopped_pids, lwpid))
1428 /* We've already seen this thread stop, but we
1429 haven't seen the PTRACE_EVENT_CLONE extended
1431 restore_child_signals_mask (&prev_mask);
1439 /* See if we've got a stop for this new child
1440 pending. If so, we're already attached. */
1441 new_pid = my_waitpid (lwpid, &status, WNOHANG);
1442 if (new_pid == -1 && errno == ECHILD)
1443 new_pid = my_waitpid (lwpid, &status, __WCLONE | WNOHANG);
1446 if (WIFSTOPPED (status))
1447 add_to_pid_list (&stopped_pids, lwpid, status);
1449 restore_child_signals_mask (&prev_mask);
1455 /* If we fail to attach to the thread, issue a warning,
1456 but continue. One way this can happen is if thread
1457 creation is interrupted; as of Linux kernel 2.6.19, a
1458 bug may place threads in the thread list and then fail
1460 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1461 safe_strerror (errno));
1462 restore_child_signals_mask (&prev_mask);
1466 if (debug_linux_nat)
1467 fprintf_unfiltered (gdb_stdlog,
1468 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1469 target_pid_to_str (ptid));
1471 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
1472 if (!WIFSTOPPED (status))
1474 restore_child_signals_mask (&prev_mask);
1478 lp = add_lwp (ptid);
1480 lp->cloned = cloned;
1481 lp->signalled = signalled;
1482 if (WSTOPSIG (status) != SIGSTOP)
1485 lp->status = status;
1488 target_post_attach (GET_LWP (lp->ptid));
1490 if (debug_linux_nat)
1492 fprintf_unfiltered (gdb_stdlog,
1493 "LLAL: waitpid %s received %s\n",
1494 target_pid_to_str (ptid),
1495 status_to_str (status));
1500 /* We assume that the LWP representing the original process is
1501 already stopped. Mark it as stopped in the data structure
1502 that the GNU/linux ptrace layer uses to keep track of
1503 threads. Note that this won't have already been done since
1504 the main thread will have, we assume, been stopped by an
1505 attach from a different layer. */
1507 lp = add_lwp (ptid);
1511 lp->last_resume_kind = resume_stop;
1512 restore_child_signals_mask (&prev_mask);
1517 linux_nat_create_inferior (struct target_ops *ops,
1518 char *exec_file, char *allargs, char **env,
1521 #ifdef HAVE_PERSONALITY
1522 int personality_orig = 0, personality_set = 0;
1523 #endif /* HAVE_PERSONALITY */
1525 /* The fork_child mechanism is synchronous and calls target_wait, so
1526 we have to mask the async mode. */
1528 #ifdef HAVE_PERSONALITY
1529 if (disable_randomization)
1532 personality_orig = personality (0xffffffff);
1533 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1535 personality_set = 1;
1536 personality (personality_orig | ADDR_NO_RANDOMIZE);
1538 if (errno != 0 || (personality_set
1539 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1540 warning (_("Error disabling address space randomization: %s"),
1541 safe_strerror (errno));
1543 #endif /* HAVE_PERSONALITY */
1545 /* Make sure we report all signals during startup. */
1546 linux_nat_pass_signals (0, NULL);
1548 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
1550 #ifdef HAVE_PERSONALITY
1551 if (personality_set)
1554 personality (personality_orig);
1556 warning (_("Error restoring address space randomization: %s"),
1557 safe_strerror (errno));
1559 #endif /* HAVE_PERSONALITY */
1563 linux_nat_attach (struct target_ops *ops, char *args, int from_tty)
1565 struct lwp_info *lp;
1569 /* Make sure we report all signals during attach. */
1570 linux_nat_pass_signals (0, NULL);
1572 linux_ops->to_attach (ops, args, from_tty);
1574 /* The ptrace base target adds the main thread with (pid,0,0)
1575 format. Decorate it with lwp info. */
1576 ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1577 thread_change_ptid (inferior_ptid, ptid);
1579 /* Add the initial process as the first LWP to the list. */
1580 lp = add_lwp (ptid);
1582 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1584 if (!WIFSTOPPED (status))
1586 if (WIFEXITED (status))
1588 int exit_code = WEXITSTATUS (status);
1590 target_terminal_ours ();
1591 target_mourn_inferior ();
1593 error (_("Unable to attach: program exited normally."));
1595 error (_("Unable to attach: program exited with code %d."),
1598 else if (WIFSIGNALED (status))
1600 enum target_signal signo;
1602 target_terminal_ours ();
1603 target_mourn_inferior ();
1605 signo = target_signal_from_host (WTERMSIG (status));
1606 error (_("Unable to attach: program terminated with signal "
1608 target_signal_to_name (signo),
1609 target_signal_to_string (signo));
1612 internal_error (__FILE__, __LINE__,
1613 _("unexpected status %d for PID %ld"),
1614 status, (long) GET_LWP (ptid));
1619 /* Save the wait status to report later. */
1621 if (debug_linux_nat)
1622 fprintf_unfiltered (gdb_stdlog,
1623 "LNA: waitpid %ld, saving status %s\n",
1624 (long) GET_PID (lp->ptid), status_to_str (status));
1626 lp->status = status;
1628 if (target_can_async_p ())
1629 target_async (inferior_event_handler, 0);
1632 /* Get pending status of LP. */
1634 get_pending_status (struct lwp_info *lp, int *status)
1636 enum target_signal signo = TARGET_SIGNAL_0;
1638 /* If we paused threads momentarily, we may have stored pending
1639 events in lp->status or lp->waitstatus (see stop_wait_callback),
1640 and GDB core hasn't seen any signal for those threads.
1641 Otherwise, the last signal reported to the core is found in the
1642 thread object's stop_signal.
1644 There's a corner case that isn't handled here at present. Only
1645 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1646 stop_signal make sense as a real signal to pass to the inferior.
1647 Some catchpoint related events, like
1648 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1649 to TARGET_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1650 those traps are debug API (ptrace in our case) related and
1651 induced; the inferior wouldn't see them if it wasn't being
1652 traced. Hence, we should never pass them to the inferior, even
1653 when set to pass state. Since this corner case isn't handled by
1654 infrun.c when proceeding with a signal, for consistency, neither
1655 do we handle it here (or elsewhere in the file we check for
1656 signal pass state). Normally SIGTRAP isn't set to pass state, so
1657 this is really a corner case. */
1659 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1660 signo = TARGET_SIGNAL_0; /* a pending ptrace event, not a real signal. */
1661 else if (lp->status)
1662 signo = target_signal_from_host (WSTOPSIG (lp->status));
1663 else if (non_stop && !is_executing (lp->ptid))
1665 struct thread_info *tp = find_thread_ptid (lp->ptid);
1667 signo = tp->suspend.stop_signal;
1671 struct target_waitstatus last;
1674 get_last_target_status (&last_ptid, &last);
1676 if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1678 struct thread_info *tp = find_thread_ptid (lp->ptid);
1680 signo = tp->suspend.stop_signal;
1686 if (signo == TARGET_SIGNAL_0)
1688 if (debug_linux_nat)
1689 fprintf_unfiltered (gdb_stdlog,
1690 "GPT: lwp %s has no pending signal\n",
1691 target_pid_to_str (lp->ptid));
1693 else if (!signal_pass_state (signo))
1695 if (debug_linux_nat)
1696 fprintf_unfiltered (gdb_stdlog,
1697 "GPT: lwp %s had signal %s, "
1698 "but it is in no pass state\n",
1699 target_pid_to_str (lp->ptid),
1700 target_signal_to_string (signo));
1704 *status = W_STOPCODE (target_signal_to_host (signo));
1706 if (debug_linux_nat)
1707 fprintf_unfiltered (gdb_stdlog,
1708 "GPT: lwp %s has pending signal %s\n",
1709 target_pid_to_str (lp->ptid),
1710 target_signal_to_string (signo));
1717 detach_callback (struct lwp_info *lp, void *data)
1719 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1721 if (debug_linux_nat && lp->status)
1722 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1723 strsignal (WSTOPSIG (lp->status)),
1724 target_pid_to_str (lp->ptid));
1726 /* If there is a pending SIGSTOP, get rid of it. */
1729 if (debug_linux_nat)
1730 fprintf_unfiltered (gdb_stdlog,
1731 "DC: Sending SIGCONT to %s\n",
1732 target_pid_to_str (lp->ptid));
1734 kill_lwp (GET_LWP (lp->ptid), SIGCONT);
1738 /* We don't actually detach from the LWP that has an id equal to the
1739 overall process id just yet. */
1740 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1744 /* Pass on any pending signal for this LWP. */
1745 get_pending_status (lp, &status);
1747 if (linux_nat_prepare_to_resume != NULL)
1748 linux_nat_prepare_to_resume (lp);
1750 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
1751 WSTOPSIG (status)) < 0)
1752 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
1753 safe_strerror (errno));
1755 if (debug_linux_nat)
1756 fprintf_unfiltered (gdb_stdlog,
1757 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1758 target_pid_to_str (lp->ptid),
1759 strsignal (WSTOPSIG (status)));
1761 delete_lwp (lp->ptid);
1768 linux_nat_detach (struct target_ops *ops, char *args, int from_tty)
1772 struct lwp_info *main_lwp;
1774 pid = GET_PID (inferior_ptid);
1776 if (target_can_async_p ())
1777 linux_nat_async (NULL, 0);
1779 /* Stop all threads before detaching. ptrace requires that the
1780 thread is stopped to sucessfully detach. */
1781 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
1782 /* ... and wait until all of them have reported back that
1783 they're no longer running. */
1784 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
1786 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
1788 /* Only the initial process should be left right now. */
1789 gdb_assert (num_lwps (GET_PID (inferior_ptid)) == 1);
1791 main_lwp = find_lwp_pid (pid_to_ptid (pid));
1793 /* Pass on any pending signal for the last LWP. */
1794 if ((args == NULL || *args == '\0')
1795 && get_pending_status (main_lwp, &status) != -1
1796 && WIFSTOPPED (status))
1798 /* Put the signal number in ARGS so that inf_ptrace_detach will
1799 pass it along with PTRACE_DETACH. */
1801 sprintf (args, "%d", (int) WSTOPSIG (status));
1802 if (debug_linux_nat)
1803 fprintf_unfiltered (gdb_stdlog,
1804 "LND: Sending signal %s to %s\n",
1806 target_pid_to_str (main_lwp->ptid));
1809 if (linux_nat_prepare_to_resume != NULL)
1810 linux_nat_prepare_to_resume (main_lwp);
1811 delete_lwp (main_lwp->ptid);
1813 if (forks_exist_p ())
1815 /* Multi-fork case. The current inferior_ptid is being detached
1816 from, but there are other viable forks to debug. Detach from
1817 the current fork, and context-switch to the first
1819 linux_fork_detach (args, from_tty);
1821 if (non_stop && target_can_async_p ())
1822 target_async (inferior_event_handler, 0);
1825 linux_ops->to_detach (ops, args, from_tty);
1831 resume_lwp (struct lwp_info *lp, int step)
1835 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
1837 if (inf->vfork_child != NULL)
1839 if (debug_linux_nat)
1840 fprintf_unfiltered (gdb_stdlog,
1841 "RC: Not resuming %s (vfork parent)\n",
1842 target_pid_to_str (lp->ptid));
1844 else if (lp->status == 0
1845 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
1847 if (debug_linux_nat)
1848 fprintf_unfiltered (gdb_stdlog,
1849 "RC: PTRACE_CONT %s, 0, 0 (resuming sibling)\n",
1850 target_pid_to_str (lp->ptid));
1852 if (linux_nat_prepare_to_resume != NULL)
1853 linux_nat_prepare_to_resume (lp);
1854 linux_ops->to_resume (linux_ops,
1855 pid_to_ptid (GET_LWP (lp->ptid)),
1856 step, TARGET_SIGNAL_0);
1859 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1860 lp->stopped_by_watchpoint = 0;
1864 if (debug_linux_nat)
1865 fprintf_unfiltered (gdb_stdlog,
1866 "RC: Not resuming sibling %s (has pending)\n",
1867 target_pid_to_str (lp->ptid));
1872 if (debug_linux_nat)
1873 fprintf_unfiltered (gdb_stdlog,
1874 "RC: Not resuming sibling %s (not stopped)\n",
1875 target_pid_to_str (lp->ptid));
1880 resume_callback (struct lwp_info *lp, void *data)
1887 resume_clear_callback (struct lwp_info *lp, void *data)
1890 lp->last_resume_kind = resume_stop;
1895 resume_set_callback (struct lwp_info *lp, void *data)
1898 lp->last_resume_kind = resume_continue;
1903 linux_nat_resume (struct target_ops *ops,
1904 ptid_t ptid, int step, enum target_signal signo)
1907 struct lwp_info *lp;
1910 if (debug_linux_nat)
1911 fprintf_unfiltered (gdb_stdlog,
1912 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1913 step ? "step" : "resume",
1914 target_pid_to_str (ptid),
1915 (signo != TARGET_SIGNAL_0
1916 ? strsignal (target_signal_to_host (signo)) : "0"),
1917 target_pid_to_str (inferior_ptid));
1919 block_child_signals (&prev_mask);
1921 /* A specific PTID means `step only this process id'. */
1922 resume_many = (ptid_equal (minus_one_ptid, ptid)
1923 || ptid_is_pid (ptid));
1925 /* Mark the lwps we're resuming as resumed. */
1926 iterate_over_lwps (ptid, resume_set_callback, NULL);
1928 /* See if it's the current inferior that should be handled
1931 lp = find_lwp_pid (inferior_ptid);
1933 lp = find_lwp_pid (ptid);
1934 gdb_assert (lp != NULL);
1936 /* Remember if we're stepping. */
1938 lp->last_resume_kind = step ? resume_step : resume_continue;
1940 /* If we have a pending wait status for this thread, there is no
1941 point in resuming the process. But first make sure that
1942 linux_nat_wait won't preemptively handle the event - we
1943 should never take this short-circuit if we are going to
1944 leave LP running, since we have skipped resuming all the
1945 other threads. This bit of code needs to be synchronized
1946 with linux_nat_wait. */
1948 if (lp->status && WIFSTOPPED (lp->status))
1951 && WSTOPSIG (lp->status)
1952 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
1954 if (debug_linux_nat)
1955 fprintf_unfiltered (gdb_stdlog,
1956 "LLR: Not short circuiting for ignored "
1957 "status 0x%x\n", lp->status);
1959 /* FIXME: What should we do if we are supposed to continue
1960 this thread with a signal? */
1961 gdb_assert (signo == TARGET_SIGNAL_0);
1962 signo = target_signal_from_host (WSTOPSIG (lp->status));
1967 if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1969 /* FIXME: What should we do if we are supposed to continue
1970 this thread with a signal? */
1971 gdb_assert (signo == TARGET_SIGNAL_0);
1973 if (debug_linux_nat)
1974 fprintf_unfiltered (gdb_stdlog,
1975 "LLR: Short circuiting for status 0x%x\n",
1978 restore_child_signals_mask (&prev_mask);
1979 if (target_can_async_p ())
1981 target_async (inferior_event_handler, 0);
1982 /* Tell the event loop we have something to process. */
1988 /* Mark LWP as not stopped to prevent it from being continued by
1993 iterate_over_lwps (ptid, resume_callback, NULL);
1995 /* Convert to something the lower layer understands. */
1996 ptid = pid_to_ptid (GET_LWP (lp->ptid));
1998 if (linux_nat_prepare_to_resume != NULL)
1999 linux_nat_prepare_to_resume (lp);
2000 linux_ops->to_resume (linux_ops, ptid, step, signo);
2001 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2002 lp->stopped_by_watchpoint = 0;
2004 if (debug_linux_nat)
2005 fprintf_unfiltered (gdb_stdlog,
2006 "LLR: %s %s, %s (resume event thread)\n",
2007 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2008 target_pid_to_str (ptid),
2009 (signo != TARGET_SIGNAL_0
2010 ? strsignal (target_signal_to_host (signo)) : "0"));
2012 restore_child_signals_mask (&prev_mask);
2013 if (target_can_async_p ())
2014 target_async (inferior_event_handler, 0);
2017 /* Send a signal to an LWP. */
2020 kill_lwp (int lwpid, int signo)
2022 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2023 fails, then we are not using nptl threads and we should be using kill. */
2025 #ifdef HAVE_TKILL_SYSCALL
2027 static int tkill_failed;
2034 ret = syscall (__NR_tkill, lwpid, signo);
2035 if (errno != ENOSYS)
2042 return kill (lwpid, signo);
2045 /* Handle a GNU/Linux syscall trap wait response. If we see a syscall
2046 event, check if the core is interested in it: if not, ignore the
2047 event, and keep waiting; otherwise, we need to toggle the LWP's
2048 syscall entry/exit status, since the ptrace event itself doesn't
2049 indicate it, and report the trap to higher layers. */
2052 linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
2054 struct target_waitstatus *ourstatus = &lp->waitstatus;
2055 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
2056 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
2060 /* If we're stopping threads, there's a SIGSTOP pending, which
2061 makes it so that the LWP reports an immediate syscall return,
2062 followed by the SIGSTOP. Skip seeing that "return" using
2063 PTRACE_CONT directly, and let stop_wait_callback collect the
2064 SIGSTOP. Later when the thread is resumed, a new syscall
2065 entry event. If we didn't do this (and returned 0), we'd
2066 leave a syscall entry pending, and our caller, by using
2067 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
2068 itself. Later, when the user re-resumes this LWP, we'd see
2069 another syscall entry event and we'd mistake it for a return.
2071 If stop_wait_callback didn't force the SIGSTOP out of the LWP
2072 (leaving immediately with LWP->signalled set, without issuing
2073 a PTRACE_CONT), it would still be problematic to leave this
2074 syscall enter pending, as later when the thread is resumed,
2075 it would then see the same syscall exit mentioned above,
2076 followed by the delayed SIGSTOP, while the syscall didn't
2077 actually get to execute. It seems it would be even more
2078 confusing to the user. */
2080 if (debug_linux_nat)
2081 fprintf_unfiltered (gdb_stdlog,
2082 "LHST: ignoring syscall %d "
2083 "for LWP %ld (stopping threads), "
2084 "resuming with PTRACE_CONT for SIGSTOP\n",
2086 GET_LWP (lp->ptid));
2088 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2089 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2093 if (catch_syscall_enabled ())
2095 /* Always update the entry/return state, even if this particular
2096 syscall isn't interesting to the core now. In async mode,
2097 the user could install a new catchpoint for this syscall
2098 between syscall enter/return, and we'll need to know to
2099 report a syscall return if that happens. */
2100 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2101 ? TARGET_WAITKIND_SYSCALL_RETURN
2102 : TARGET_WAITKIND_SYSCALL_ENTRY);
2104 if (catching_syscall_number (syscall_number))
2106 /* Alright, an event to report. */
2107 ourstatus->kind = lp->syscall_state;
2108 ourstatus->value.syscall_number = syscall_number;
2110 if (debug_linux_nat)
2111 fprintf_unfiltered (gdb_stdlog,
2112 "LHST: stopping for %s of syscall %d"
2115 == TARGET_WAITKIND_SYSCALL_ENTRY
2116 ? "entry" : "return",
2118 GET_LWP (lp->ptid));
2122 if (debug_linux_nat)
2123 fprintf_unfiltered (gdb_stdlog,
2124 "LHST: ignoring %s of syscall %d "
2126 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2127 ? "entry" : "return",
2129 GET_LWP (lp->ptid));
2133 /* If we had been syscall tracing, and hence used PT_SYSCALL
2134 before on this LWP, it could happen that the user removes all
2135 syscall catchpoints before we get to process this event.
2136 There are two noteworthy issues here:
2138 - When stopped at a syscall entry event, resuming with
2139 PT_STEP still resumes executing the syscall and reports a
2142 - Only PT_SYSCALL catches syscall enters. If we last
2143 single-stepped this thread, then this event can't be a
2144 syscall enter. If we last single-stepped this thread, this
2145 has to be a syscall exit.
2147 The points above mean that the next resume, be it PT_STEP or
2148 PT_CONTINUE, can not trigger a syscall trace event. */
2149 if (debug_linux_nat)
2150 fprintf_unfiltered (gdb_stdlog,
2151 "LHST: caught syscall event "
2152 "with no syscall catchpoints."
2153 " %d for LWP %ld, ignoring\n",
2155 GET_LWP (lp->ptid));
2156 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2159 /* The core isn't interested in this event. For efficiency, avoid
2160 stopping all threads only to have the core resume them all again.
2161 Since we're not stopping threads, if we're still syscall tracing
2162 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
2163 subsequent syscall. Simply resume using the inf-ptrace layer,
2164 which knows when to use PT_SYSCALL or PT_CONTINUE. */
2166 /* Note that gdbarch_get_syscall_number may access registers, hence
2168 registers_changed ();
2169 if (linux_nat_prepare_to_resume != NULL)
2170 linux_nat_prepare_to_resume (lp);
2171 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2172 lp->step, TARGET_SIGNAL_0);
2176 /* Handle a GNU/Linux extended wait response. If we see a clone
2177 event, we need to add the new LWP to our list (and not report the
2178 trap to higher layers). This function returns non-zero if the
2179 event should be ignored and we should wait again. If STOPPING is
2180 true, the new LWP remains stopped, otherwise it is continued. */
2183 linux_handle_extended_wait (struct lwp_info *lp, int status,
2186 int pid = GET_LWP (lp->ptid);
2187 struct target_waitstatus *ourstatus = &lp->waitstatus;
2188 int event = status >> 16;
2190 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
2191 || event == PTRACE_EVENT_CLONE)
2193 unsigned long new_pid;
2196 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
2198 /* If we haven't already seen the new PID stop, wait for it now. */
2199 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
2201 /* The new child has a pending SIGSTOP. We can't affect it until it
2202 hits the SIGSTOP, but we're already attached. */
2203 ret = my_waitpid (new_pid, &status,
2204 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
2206 perror_with_name (_("waiting for new child"));
2207 else if (ret != new_pid)
2208 internal_error (__FILE__, __LINE__,
2209 _("wait returned unexpected PID %d"), ret);
2210 else if (!WIFSTOPPED (status))
2211 internal_error (__FILE__, __LINE__,
2212 _("wait returned unexpected status 0x%x"), status);
2215 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
2217 if (event == PTRACE_EVENT_FORK
2218 && linux_fork_checkpointing_p (GET_PID (lp->ptid)))
2220 /* Handle checkpointing by linux-fork.c here as a special
2221 case. We don't want the follow-fork-mode or 'catch fork'
2222 to interfere with this. */
2224 /* This won't actually modify the breakpoint list, but will
2225 physically remove the breakpoints from the child. */
2226 detach_breakpoints (new_pid);
2228 /* Retain child fork in ptrace (stopped) state. */
2229 if (!find_fork_pid (new_pid))
2232 /* Report as spurious, so that infrun doesn't want to follow
2233 this fork. We're actually doing an infcall in
2235 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2236 linux_enable_event_reporting (pid_to_ptid (new_pid));
2238 /* Report the stop to the core. */
2242 if (event == PTRACE_EVENT_FORK)
2243 ourstatus->kind = TARGET_WAITKIND_FORKED;
2244 else if (event == PTRACE_EVENT_VFORK)
2245 ourstatus->kind = TARGET_WAITKIND_VFORKED;
2248 struct lwp_info *new_lp;
2250 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2252 if (debug_linux_nat)
2253 fprintf_unfiltered (gdb_stdlog,
2254 "LHEW: Got clone event "
2255 "from LWP %d, new child is LWP %ld\n",
2258 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (lp->ptid)));
2260 new_lp->stopped = 1;
2262 if (WSTOPSIG (status) != SIGSTOP)
2264 /* This can happen if someone starts sending signals to
2265 the new thread before it gets a chance to run, which
2266 have a lower number than SIGSTOP (e.g. SIGUSR1).
2267 This is an unlikely case, and harder to handle for
2268 fork / vfork than for clone, so we do not try - but
2269 we handle it for clone events here. We'll send
2270 the other signal on to the thread below. */
2272 new_lp->signalled = 1;
2276 struct thread_info *tp;
2278 /* When we stop for an event in some other thread, and
2279 pull the thread list just as this thread has cloned,
2280 we'll have seen the new thread in the thread_db list
2281 before handling the CLONE event (glibc's
2282 pthread_create adds the new thread to the thread list
2283 before clone'ing, and has the kernel fill in the
2284 thread's tid on the clone call with
2285 CLONE_PARENT_SETTID). If that happened, and the core
2286 had requested the new thread to stop, we'll have
2287 killed it with SIGSTOP. But since SIGSTOP is not an
2288 RT signal, it can only be queued once. We need to be
2289 careful to not resume the LWP if we wanted it to
2290 stop. In that case, we'll leave the SIGSTOP pending.
2291 It will later be reported as TARGET_SIGNAL_0. */
2292 tp = find_thread_ptid (new_lp->ptid);
2293 if (tp != NULL && tp->stop_requested)
2294 new_lp->last_resume_kind = resume_stop;
2301 /* Add the new thread to GDB's lists as soon as possible
2304 1) the frontend doesn't have to wait for a stop to
2307 2) we tag it with the correct running state. */
2309 /* If the thread_db layer is active, let it know about
2310 this new thread, and add it to GDB's list. */
2311 if (!thread_db_attach_lwp (new_lp->ptid))
2313 /* We're not using thread_db. Add it to GDB's
2315 target_post_attach (GET_LWP (new_lp->ptid));
2316 add_thread (new_lp->ptid);
2321 set_running (new_lp->ptid, 1);
2322 set_executing (new_lp->ptid, 1);
2323 /* thread_db_attach_lwp -> lin_lwp_attach_lwp forced
2325 new_lp->last_resume_kind = resume_continue;
2331 /* We created NEW_LP so it cannot yet contain STATUS. */
2332 gdb_assert (new_lp->status == 0);
2334 /* Save the wait status to report later. */
2335 if (debug_linux_nat)
2336 fprintf_unfiltered (gdb_stdlog,
2337 "LHEW: waitpid of new LWP %ld, "
2338 "saving status %s\n",
2339 (long) GET_LWP (new_lp->ptid),
2340 status_to_str (status));
2341 new_lp->status = status;
2344 /* Note the need to use the low target ops to resume, to
2345 handle resuming with PT_SYSCALL if we have syscall
2349 new_lp->resumed = 1;
2353 gdb_assert (new_lp->last_resume_kind == resume_continue);
2354 if (debug_linux_nat)
2355 fprintf_unfiltered (gdb_stdlog,
2356 "LHEW: resuming new LWP %ld\n",
2357 GET_LWP (new_lp->ptid));
2358 if (linux_nat_prepare_to_resume != NULL)
2359 linux_nat_prepare_to_resume (new_lp);
2360 linux_ops->to_resume (linux_ops, pid_to_ptid (new_pid),
2361 0, TARGET_SIGNAL_0);
2362 new_lp->stopped = 0;
2366 if (debug_linux_nat)
2367 fprintf_unfiltered (gdb_stdlog,
2368 "LHEW: resuming parent LWP %d\n", pid);
2369 if (linux_nat_prepare_to_resume != NULL)
2370 linux_nat_prepare_to_resume (lp);
2371 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2372 0, TARGET_SIGNAL_0);
2380 if (event == PTRACE_EVENT_EXEC)
2382 if (debug_linux_nat)
2383 fprintf_unfiltered (gdb_stdlog,
2384 "LHEW: Got exec event from LWP %ld\n",
2385 GET_LWP (lp->ptid));
2387 ourstatus->kind = TARGET_WAITKIND_EXECD;
2388 ourstatus->value.execd_pathname
2389 = xstrdup (linux_child_pid_to_exec_file (pid));
2394 if (event == PTRACE_EVENT_VFORK_DONE)
2396 if (current_inferior ()->waiting_for_vfork_done)
2398 if (debug_linux_nat)
2399 fprintf_unfiltered (gdb_stdlog,
2400 "LHEW: Got expected PTRACE_EVENT_"
2401 "VFORK_DONE from LWP %ld: stopping\n",
2402 GET_LWP (lp->ptid));
2404 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2408 if (debug_linux_nat)
2409 fprintf_unfiltered (gdb_stdlog,
2410 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
2411 "from LWP %ld: resuming\n",
2412 GET_LWP (lp->ptid));
2413 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2417 internal_error (__FILE__, __LINE__,
2418 _("unknown ptrace event %d"), event);
2421 /* Return non-zero if LWP is a zombie. */
2424 linux_lwp_is_zombie (long lwp)
2426 char buffer[MAXPATHLEN];
2431 xsnprintf (buffer, sizeof (buffer), "/proc/%ld/status", lwp);
2432 procfile = fopen (buffer, "r");
2433 if (procfile == NULL)
2435 warning (_("unable to open /proc file '%s'"), buffer);
2440 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
2441 if (strncmp (buffer, "State:", 6) == 0)
2446 retval = (have_state
2447 && strcmp (buffer, "State:\tZ (zombie)\n") == 0);
2452 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2456 wait_lwp (struct lwp_info *lp)
2460 int thread_dead = 0;
2463 gdb_assert (!lp->stopped);
2464 gdb_assert (lp->status == 0);
2466 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2467 block_child_signals (&prev_mask);
2471 /* If my_waitpid returns 0 it means the __WCLONE vs. non-__WCLONE kind
2472 was right and we should just call sigsuspend. */
2474 pid = my_waitpid (GET_LWP (lp->ptid), &status, WNOHANG);
2475 if (pid == -1 && errno == ECHILD)
2476 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE | WNOHANG);
2477 if (pid == -1 && errno == ECHILD)
2479 /* The thread has previously exited. We need to delete it
2480 now because, for some vendor 2.4 kernels with NPTL
2481 support backported, there won't be an exit event unless
2482 it is the main thread. 2.6 kernels will report an exit
2483 event for each thread that exits, as expected. */
2485 if (debug_linux_nat)
2486 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2487 target_pid_to_str (lp->ptid));
2492 /* Bugs 10970, 12702.
2493 Thread group leader may have exited in which case we'll lock up in
2494 waitpid if there are other threads, even if they are all zombies too.
2495 Basically, we're not supposed to use waitpid this way.
2496 __WCLONE is not applicable for the leader so we can't use that.
2497 LINUX_NAT_THREAD_ALIVE cannot be used here as it requires a STOPPED
2498 process; it gets ESRCH both for the zombie and for running processes.
2500 As a workaround, check if we're waiting for the thread group leader and
2501 if it's a zombie, and avoid calling waitpid if it is.
2503 This is racy, what if the tgl becomes a zombie right after we check?
2504 Therefore always use WNOHANG with sigsuspend - it is equivalent to
2505 waiting waitpid but the linux_lwp_is_zombie is safe this way. */
2507 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid)
2508 && linux_lwp_is_zombie (GET_LWP (lp->ptid)))
2511 if (debug_linux_nat)
2512 fprintf_unfiltered (gdb_stdlog,
2513 "WL: Thread group leader %s vanished.\n",
2514 target_pid_to_str (lp->ptid));
2518 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2519 get invoked despite our caller had them intentionally blocked by
2520 block_child_signals. This is sensitive only to the loop of
2521 linux_nat_wait_1 and there if we get called my_waitpid gets called
2522 again before it gets to sigsuspend so we can safely let the handlers
2523 get executed here. */
2525 sigsuspend (&suspend_mask);
2528 restore_child_signals_mask (&prev_mask);
2532 gdb_assert (pid == GET_LWP (lp->ptid));
2534 if (debug_linux_nat)
2536 fprintf_unfiltered (gdb_stdlog,
2537 "WL: waitpid %s received %s\n",
2538 target_pid_to_str (lp->ptid),
2539 status_to_str (status));
2542 /* Check if the thread has exited. */
2543 if (WIFEXITED (status) || WIFSIGNALED (status))
2546 if (debug_linux_nat)
2547 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2548 target_pid_to_str (lp->ptid));
2558 gdb_assert (WIFSTOPPED (status));
2560 /* Handle GNU/Linux's syscall SIGTRAPs. */
2561 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2563 /* No longer need the sysgood bit. The ptrace event ends up
2564 recorded in lp->waitstatus if we care for it. We can carry
2565 on handling the event like a regular SIGTRAP from here
2567 status = W_STOPCODE (SIGTRAP);
2568 if (linux_handle_syscall_trap (lp, 1))
2569 return wait_lwp (lp);
2572 /* Handle GNU/Linux's extended waitstatus for trace events. */
2573 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2575 if (debug_linux_nat)
2576 fprintf_unfiltered (gdb_stdlog,
2577 "WL: Handling extended status 0x%06x\n",
2579 if (linux_handle_extended_wait (lp, status, 1))
2580 return wait_lwp (lp);
2586 /* Save the most recent siginfo for LP. This is currently only called
2587 for SIGTRAP; some ports use the si_addr field for
2588 target_stopped_data_address. In the future, it may also be used to
2589 restore the siginfo of requeued signals. */
2592 save_siginfo (struct lwp_info *lp)
2595 ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
2596 (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
2599 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2602 /* Send a SIGSTOP to LP. */
2605 stop_callback (struct lwp_info *lp, void *data)
2607 if (!lp->stopped && !lp->signalled)
2611 if (debug_linux_nat)
2613 fprintf_unfiltered (gdb_stdlog,
2614 "SC: kill %s **<SIGSTOP>**\n",
2615 target_pid_to_str (lp->ptid));
2618 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
2619 if (debug_linux_nat)
2621 fprintf_unfiltered (gdb_stdlog,
2622 "SC: lwp kill %d %s\n",
2624 errno ? safe_strerror (errno) : "ERRNO-OK");
2628 gdb_assert (lp->status == 0);
2634 /* Request a stop on LWP. */
2637 linux_stop_lwp (struct lwp_info *lwp)
2639 stop_callback (lwp, NULL);
2642 /* Return non-zero if LWP PID has a pending SIGINT. */
2645 linux_nat_has_pending_sigint (int pid)
2647 sigset_t pending, blocked, ignored;
2649 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2651 if (sigismember (&pending, SIGINT)
2652 && !sigismember (&ignored, SIGINT))
2658 /* Set a flag in LP indicating that we should ignore its next SIGINT. */
2661 set_ignore_sigint (struct lwp_info *lp, void *data)
2663 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2664 flag to consume the next one. */
2665 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2666 && WSTOPSIG (lp->status) == SIGINT)
2669 lp->ignore_sigint = 1;
2674 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2675 This function is called after we know the LWP has stopped; if the LWP
2676 stopped before the expected SIGINT was delivered, then it will never have
2677 arrived. Also, if the signal was delivered to a shared queue and consumed
2678 by a different thread, it will never be delivered to this LWP. */
2681 maybe_clear_ignore_sigint (struct lwp_info *lp)
2683 if (!lp->ignore_sigint)
2686 if (!linux_nat_has_pending_sigint (GET_LWP (lp->ptid)))
2688 if (debug_linux_nat)
2689 fprintf_unfiltered (gdb_stdlog,
2690 "MCIS: Clearing bogus flag for %s\n",
2691 target_pid_to_str (lp->ptid));
2692 lp->ignore_sigint = 0;
2696 /* Fetch the possible triggered data watchpoint info and store it in
2699 On some archs, like x86, that use debug registers to set
2700 watchpoints, it's possible that the way to know which watched
2701 address trapped, is to check the register that is used to select
2702 which address to watch. Problem is, between setting the watchpoint
2703 and reading back which data address trapped, the user may change
2704 the set of watchpoints, and, as a consequence, GDB changes the
2705 debug registers in the inferior. To avoid reading back a stale
2706 stopped-data-address when that happens, we cache in LP the fact
2707 that a watchpoint trapped, and the corresponding data address, as
2708 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2709 registers meanwhile, we have the cached data we can rely on. */
2712 save_sigtrap (struct lwp_info *lp)
2714 struct cleanup *old_chain;
2716 if (linux_ops->to_stopped_by_watchpoint == NULL)
2718 lp->stopped_by_watchpoint = 0;
2722 old_chain = save_inferior_ptid ();
2723 inferior_ptid = lp->ptid;
2725 lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint ();
2727 if (lp->stopped_by_watchpoint)
2729 if (linux_ops->to_stopped_data_address != NULL)
2730 lp->stopped_data_address_p =
2731 linux_ops->to_stopped_data_address (¤t_target,
2732 &lp->stopped_data_address);
2734 lp->stopped_data_address_p = 0;
2737 do_cleanups (old_chain);
2740 /* See save_sigtrap. */
2743 linux_nat_stopped_by_watchpoint (void)
2745 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2747 gdb_assert (lp != NULL);
2749 return lp->stopped_by_watchpoint;
2753 linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2755 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2757 gdb_assert (lp != NULL);
2759 *addr_p = lp->stopped_data_address;
2761 return lp->stopped_data_address_p;
2764 /* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2767 sigtrap_is_event (int status)
2769 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2772 /* SIGTRAP-like events recognizer. */
2774 static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
2776 /* Check for SIGTRAP-like events in LP. */
2779 linux_nat_lp_status_is_event (struct lwp_info *lp)
2781 /* We check for lp->waitstatus in addition to lp->status, because we can
2782 have pending process exits recorded in lp->status
2783 and W_EXITCODE(0,0) == 0. We should probably have an additional
2784 lp->status_p flag. */
2786 return (lp->waitstatus.kind == TARGET_WAITKIND_IGNORE
2787 && linux_nat_status_is_event (lp->status));
2790 /* Set alternative SIGTRAP-like events recognizer. If
2791 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2795 linux_nat_set_status_is_event (struct target_ops *t,
2796 int (*status_is_event) (int status))
2798 linux_nat_status_is_event = status_is_event;
2801 /* Wait until LP is stopped. */
2804 stop_wait_callback (struct lwp_info *lp, void *data)
2806 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
2808 /* If this is a vfork parent, bail out, it is not going to report
2809 any SIGSTOP until the vfork is done with. */
2810 if (inf->vfork_child != NULL)
2817 status = wait_lwp (lp);
2821 if (lp->ignore_sigint && WIFSTOPPED (status)
2822 && WSTOPSIG (status) == SIGINT)
2824 lp->ignore_sigint = 0;
2827 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2828 if (debug_linux_nat)
2829 fprintf_unfiltered (gdb_stdlog,
2830 "PTRACE_CONT %s, 0, 0 (%s) "
2831 "(discarding SIGINT)\n",
2832 target_pid_to_str (lp->ptid),
2833 errno ? safe_strerror (errno) : "OK");
2835 return stop_wait_callback (lp, NULL);
2838 maybe_clear_ignore_sigint (lp);
2840 if (WSTOPSIG (status) != SIGSTOP)
2842 if (linux_nat_status_is_event (status))
2844 /* If a LWP other than the LWP that we're reporting an
2845 event for has hit a GDB breakpoint (as opposed to
2846 some random trap signal), then just arrange for it to
2847 hit it again later. We don't keep the SIGTRAP status
2848 and don't forward the SIGTRAP signal to the LWP. We
2849 will handle the current event, eventually we will
2850 resume all LWPs, and this one will get its breakpoint
2853 If we do not do this, then we run the risk that the
2854 user will delete or disable the breakpoint, but the
2855 thread will have already tripped on it. */
2857 /* Save the trap's siginfo in case we need it later. */
2862 /* Now resume this LWP and get the SIGSTOP event. */
2864 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2865 if (debug_linux_nat)
2867 fprintf_unfiltered (gdb_stdlog,
2868 "PTRACE_CONT %s, 0, 0 (%s)\n",
2869 target_pid_to_str (lp->ptid),
2870 errno ? safe_strerror (errno) : "OK");
2872 fprintf_unfiltered (gdb_stdlog,
2873 "SWC: Candidate SIGTRAP event in %s\n",
2874 target_pid_to_str (lp->ptid));
2876 /* Hold this event/waitstatus while we check to see if
2877 there are any more (we still want to get that SIGSTOP). */
2878 stop_wait_callback (lp, NULL);
2880 /* Hold the SIGTRAP for handling by linux_nat_wait. If
2881 there's another event, throw it back into the
2885 if (debug_linux_nat)
2886 fprintf_unfiltered (gdb_stdlog,
2887 "SWC: kill %s, %s\n",
2888 target_pid_to_str (lp->ptid),
2889 status_to_str ((int) status));
2890 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
2893 /* Save the sigtrap event. */
2894 lp->status = status;
2899 /* The thread was stopped with a signal other than
2900 SIGSTOP, and didn't accidentally trip a breakpoint. */
2902 if (debug_linux_nat)
2904 fprintf_unfiltered (gdb_stdlog,
2905 "SWC: Pending event %s in %s\n",
2906 status_to_str ((int) status),
2907 target_pid_to_str (lp->ptid));
2909 /* Now resume this LWP and get the SIGSTOP event. */
2911 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2912 if (debug_linux_nat)
2913 fprintf_unfiltered (gdb_stdlog,
2914 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
2915 target_pid_to_str (lp->ptid),
2916 errno ? safe_strerror (errno) : "OK");
2918 /* Hold this event/waitstatus while we check to see if
2919 there are any more (we still want to get that SIGSTOP). */
2920 stop_wait_callback (lp, NULL);
2922 /* If the lp->status field is still empty, use it to
2923 hold this event. If not, then this event must be
2924 returned to the event queue of the LWP. */
2927 if (debug_linux_nat)
2929 fprintf_unfiltered (gdb_stdlog,
2930 "SWC: kill %s, %s\n",
2931 target_pid_to_str (lp->ptid),
2932 status_to_str ((int) status));
2934 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
2937 lp->status = status;
2943 /* We caught the SIGSTOP that we intended to catch, so
2944 there's no SIGSTOP pending. */
2953 /* Return non-zero if LP has a wait status pending. */
2956 status_callback (struct lwp_info *lp, void *data)
2958 /* Only report a pending wait status if we pretend that this has
2959 indeed been resumed. */
2963 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2965 /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event,
2966 or a pending process exit. Note that `W_EXITCODE(0,0) ==
2967 0', so a clean process exit can not be stored pending in
2968 lp->status, it is indistinguishable from
2969 no-pending-status. */
2973 if (lp->status != 0)
2979 /* Return non-zero if LP isn't stopped. */
2982 running_callback (struct lwp_info *lp, void *data)
2984 return (!lp->stopped
2985 || ((lp->status != 0
2986 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2990 /* Count the LWP's that have had events. */
2993 count_events_callback (struct lwp_info *lp, void *data)
2997 gdb_assert (count != NULL);
2999 /* Count only resumed LWPs that have a SIGTRAP event pending. */
3000 if (lp->resumed && linux_nat_lp_status_is_event (lp))
3006 /* Select the LWP (if any) that is currently being single-stepped. */
3009 select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
3011 if (lp->last_resume_kind == resume_step
3018 /* Select the Nth LWP that has had a SIGTRAP event. */
3021 select_event_lwp_callback (struct lwp_info *lp, void *data)
3023 int *selector = data;
3025 gdb_assert (selector != NULL);
3027 /* Select only resumed LWPs that have a SIGTRAP event pending. */
3028 if (lp->resumed && linux_nat_lp_status_is_event (lp))
3029 if ((*selector)-- == 0)
3036 cancel_breakpoint (struct lwp_info *lp)
3038 /* Arrange for a breakpoint to be hit again later. We don't keep
3039 the SIGTRAP status and don't forward the SIGTRAP signal to the
3040 LWP. We will handle the current event, eventually we will resume
3041 this LWP, and this breakpoint will trap again.
3043 If we do not do this, then we run the risk that the user will
3044 delete or disable the breakpoint, but the LWP will have already
3047 struct regcache *regcache = get_thread_regcache (lp->ptid);
3048 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3051 pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
3052 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3054 if (debug_linux_nat)
3055 fprintf_unfiltered (gdb_stdlog,
3056 "CB: Push back breakpoint for %s\n",
3057 target_pid_to_str (lp->ptid));
3059 /* Back up the PC if necessary. */
3060 if (gdbarch_decr_pc_after_break (gdbarch))
3061 regcache_write_pc (regcache, pc);
3069 cancel_breakpoints_callback (struct lwp_info *lp, void *data)
3071 struct lwp_info *event_lp = data;
3073 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
3077 /* If a LWP other than the LWP that we're reporting an event for has
3078 hit a GDB breakpoint (as opposed to some random trap signal),
3079 then just arrange for it to hit it again later. We don't keep
3080 the SIGTRAP status and don't forward the SIGTRAP signal to the
3081 LWP. We will handle the current event, eventually we will resume
3082 all LWPs, and this one will get its breakpoint trap again.
3084 If we do not do this, then we run the risk that the user will
3085 delete or disable the breakpoint, but the LWP will have already
3088 if (linux_nat_lp_status_is_event (lp)
3089 && cancel_breakpoint (lp))
3090 /* Throw away the SIGTRAP. */
3096 /* Select one LWP out of those that have events pending. */
3099 select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
3102 int random_selector;
3103 struct lwp_info *event_lp;
3105 /* Record the wait status for the original LWP. */
3106 (*orig_lp)->status = *status;
3108 /* Give preference to any LWP that is being single-stepped. */
3109 event_lp = iterate_over_lwps (filter,
3110 select_singlestep_lwp_callback, NULL);
3111 if (event_lp != NULL)
3113 if (debug_linux_nat)
3114 fprintf_unfiltered (gdb_stdlog,
3115 "SEL: Select single-step %s\n",
3116 target_pid_to_str (event_lp->ptid));
3120 /* No single-stepping LWP. Select one at random, out of those
3121 which have had SIGTRAP events. */
3123 /* First see how many SIGTRAP events we have. */
3124 iterate_over_lwps (filter, count_events_callback, &num_events);
3126 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
3127 random_selector = (int)
3128 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
3130 if (debug_linux_nat && num_events > 1)
3131 fprintf_unfiltered (gdb_stdlog,
3132 "SEL: Found %d SIGTRAP events, selecting #%d\n",
3133 num_events, random_selector);
3135 event_lp = iterate_over_lwps (filter,
3136 select_event_lwp_callback,
3140 if (event_lp != NULL)
3142 /* Switch the event LWP. */
3143 *orig_lp = event_lp;
3144 *status = event_lp->status;
3147 /* Flush the wait status for the event LWP. */
3148 (*orig_lp)->status = 0;
3151 /* Return non-zero if LP has been resumed. */
3154 resumed_callback (struct lwp_info *lp, void *data)
3159 /* Stop an active thread, verify it still exists, then resume it. If
3160 the thread ends up with a pending status, then it is not resumed,
3161 and *DATA (really a pointer to int), is set. */
3164 stop_and_resume_callback (struct lwp_info *lp, void *data)
3166 int *new_pending_p = data;
3170 ptid_t ptid = lp->ptid;
3172 stop_callback (lp, NULL);
3173 stop_wait_callback (lp, NULL);
3175 /* Resume if the lwp still exists, and the core wanted it
3177 lp = find_lwp_pid (ptid);
3180 if (lp->last_resume_kind == resume_stop
3183 /* The core wanted the LWP to stop. Even if it stopped
3184 cleanly (with SIGSTOP), leave the event pending. */
3185 if (debug_linux_nat)
3186 fprintf_unfiltered (gdb_stdlog,
3187 "SARC: core wanted LWP %ld stopped "
3188 "(leaving SIGSTOP pending)\n",
3189 GET_LWP (lp->ptid));
3190 lp->status = W_STOPCODE (SIGSTOP);
3193 if (lp->status == 0)
3195 if (debug_linux_nat)
3196 fprintf_unfiltered (gdb_stdlog,
3197 "SARC: re-resuming LWP %ld\n",
3198 GET_LWP (lp->ptid));
3199 resume_lwp (lp, lp->step);
3203 if (debug_linux_nat)
3204 fprintf_unfiltered (gdb_stdlog,
3205 "SARC: not re-resuming LWP %ld "
3207 GET_LWP (lp->ptid));
3216 /* Check if we should go on and pass this event to common code.
3217 Return the affected lwp if we are, or NULL otherwise. If we stop
3218 all lwps temporarily, we may end up with new pending events in some
3219 other lwp. In that case set *NEW_PENDING_P to true. */
3221 static struct lwp_info *
3222 linux_nat_filter_event (int lwpid, int status, int *new_pending_p)
3224 struct lwp_info *lp;
3228 lp = find_lwp_pid (pid_to_ptid (lwpid));
3230 /* Check for stop events reported by a process we didn't already
3231 know about - anything not already in our LWP list.
3233 If we're expecting to receive stopped processes after
3234 fork, vfork, and clone events, then we'll just add the
3235 new one to our list and go back to waiting for the event
3236 to be reported - the stopped process might be returned
3237 from waitpid before or after the event is.
3239 But note the case of a non-leader thread exec'ing after the
3240 leader having exited, and gone from our lists. The non-leader
3241 thread changes its tid to the tgid. */
3243 if (WIFSTOPPED (status) && lp == NULL
3244 && (WSTOPSIG (status) == SIGTRAP && status >> 16 == PTRACE_EVENT_EXEC))
3246 /* A multi-thread exec after we had seen the leader exiting. */
3247 if (debug_linux_nat)
3248 fprintf_unfiltered (gdb_stdlog,
3249 "LLW: Re-adding thread group leader LWP %d.\n",
3252 lp = add_lwp (BUILD_LWP (lwpid, lwpid));
3255 add_thread (lp->ptid);
3258 if (WIFSTOPPED (status) && !lp)
3260 add_to_pid_list (&stopped_pids, lwpid, status);
3264 /* Make sure we don't report an event for the exit of an LWP not in
3265 our list, i.e. not part of the current process. This can happen
3266 if we detach from a program we originally forked and then it
3268 if (!WIFSTOPPED (status) && !lp)
3271 /* Handle GNU/Linux's syscall SIGTRAPs. */
3272 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
3274 /* No longer need the sysgood bit. The ptrace event ends up
3275 recorded in lp->waitstatus if we care for it. We can carry
3276 on handling the event like a regular SIGTRAP from here
3278 status = W_STOPCODE (SIGTRAP);
3279 if (linux_handle_syscall_trap (lp, 0))
3283 /* Handle GNU/Linux's extended waitstatus for trace events. */
3284 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
3286 if (debug_linux_nat)
3287 fprintf_unfiltered (gdb_stdlog,
3288 "LLW: Handling extended status 0x%06x\n",
3290 if (linux_handle_extended_wait (lp, status, 0))
3294 if (linux_nat_status_is_event (status))
3296 /* Save the trap's siginfo in case we need it later. */
3302 /* Check if the thread has exited. */
3303 if ((WIFEXITED (status) || WIFSIGNALED (status))
3304 && num_lwps (GET_PID (lp->ptid)) > 1)
3306 /* If this is the main thread, we must stop all threads and verify
3307 if they are still alive. This is because in the nptl thread model
3308 on Linux 2.4, there is no signal issued for exiting LWPs
3309 other than the main thread. We only get the main thread exit
3310 signal once all child threads have already exited. If we
3311 stop all the threads and use the stop_wait_callback to check
3312 if they have exited we can determine whether this signal
3313 should be ignored or whether it means the end of the debugged
3314 application, regardless of which threading model is being
3316 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
3319 iterate_over_lwps (pid_to_ptid (GET_PID (lp->ptid)),
3320 stop_and_resume_callback, new_pending_p);
3323 if (debug_linux_nat)
3324 fprintf_unfiltered (gdb_stdlog,
3325 "LLW: %s exited.\n",
3326 target_pid_to_str (lp->ptid));
3328 if (num_lwps (GET_PID (lp->ptid)) > 1)
3330 /* If there is at least one more LWP, then the exit signal
3331 was not the end of the debugged application and should be
3338 /* Check if the current LWP has previously exited. In the nptl
3339 thread model, LWPs other than the main thread do not issue
3340 signals when they exit so we must check whenever the thread has
3341 stopped. A similar check is made in stop_wait_callback(). */
3342 if (num_lwps (GET_PID (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
3344 ptid_t ptid = pid_to_ptid (GET_PID (lp->ptid));
3346 if (debug_linux_nat)
3347 fprintf_unfiltered (gdb_stdlog,
3348 "LLW: %s exited.\n",
3349 target_pid_to_str (lp->ptid));
3353 /* Make sure there is at least one thread running. */
3354 gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
3356 /* Discard the event. */
3360 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3361 an attempt to stop an LWP. */
3363 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3365 if (debug_linux_nat)
3366 fprintf_unfiltered (gdb_stdlog,
3367 "LLW: Delayed SIGSTOP caught for %s.\n",
3368 target_pid_to_str (lp->ptid));
3372 if (lp->last_resume_kind != resume_stop)
3374 /* This is a delayed SIGSTOP. */
3376 registers_changed ();
3378 if (linux_nat_prepare_to_resume != NULL)
3379 linux_nat_prepare_to_resume (lp);
3380 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3381 lp->step, TARGET_SIGNAL_0);
3382 if (debug_linux_nat)
3383 fprintf_unfiltered (gdb_stdlog,
3384 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
3386 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3387 target_pid_to_str (lp->ptid));
3390 gdb_assert (lp->resumed);
3392 /* Discard the event. */
3397 /* Make sure we don't report a SIGINT that we have already displayed
3398 for another thread. */
3399 if (lp->ignore_sigint
3400 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3402 if (debug_linux_nat)
3403 fprintf_unfiltered (gdb_stdlog,
3404 "LLW: Delayed SIGINT caught for %s.\n",
3405 target_pid_to_str (lp->ptid));
3407 /* This is a delayed SIGINT. */
3408 lp->ignore_sigint = 0;
3410 registers_changed ();
3411 if (linux_nat_prepare_to_resume != NULL)
3412 linux_nat_prepare_to_resume (lp);
3413 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3414 lp->step, TARGET_SIGNAL_0);
3415 if (debug_linux_nat)
3416 fprintf_unfiltered (gdb_stdlog,
3417 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3419 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3420 target_pid_to_str (lp->ptid));
3423 gdb_assert (lp->resumed);
3425 /* Discard the event. */
3429 /* An interesting event. */
3431 lp->status = status;
3435 /* Detect zombie thread group leaders, and "exit" them. We can't reap
3436 their exits until all other threads in the group have exited. */
3439 check_zombie_leaders (void)
3441 struct inferior *inf;
3445 struct lwp_info *leader_lp;
3450 leader_lp = find_lwp_pid (pid_to_ptid (inf->pid));
3451 if (leader_lp != NULL
3452 /* Check if there are other threads in the group, as we may
3453 have raced with the inferior simply exiting. */
3454 && num_lwps (inf->pid) > 1
3455 && linux_lwp_is_zombie (inf->pid))
3457 if (debug_linux_nat)
3458 fprintf_unfiltered (gdb_stdlog,
3459 "CZL: Thread group leader %d zombie "
3460 "(it exited, or another thread execd).\n",
3463 /* A leader zombie can mean one of two things:
3465 - It exited, and there's an exit status pending
3466 available, or only the leader exited (not the whole
3467 program). In the latter case, we can't waitpid the
3468 leader's exit status until all other threads are gone.
3470 - There are 3 or more threads in the group, and a thread
3471 other than the leader exec'd. On an exec, the Linux
3472 kernel destroys all other threads (except the execing
3473 one) in the thread group, and resets the execing thread's
3474 tid to the tgid. No exit notification is sent for the
3475 execing thread -- from the ptracer's perspective, it
3476 appears as though the execing thread just vanishes.
3477 Until we reap all other threads except the leader and the
3478 execing thread, the leader will be zombie, and the
3479 execing thread will be in `D (disc sleep)'. As soon as
3480 all other threads are reaped, the execing thread changes
3481 it's tid to the tgid, and the previous (zombie) leader
3482 vanishes, giving place to the "new" leader. We could try
3483 distinguishing the exit and exec cases, by waiting once
3484 more, and seeing if something comes out, but it doesn't
3485 sound useful. The previous leader _does_ go away, and
3486 we'll re-add the new one once we see the exec event
3487 (which is just the same as what would happen if the
3488 previous leader did exit voluntarily before some other
3491 if (debug_linux_nat)
3492 fprintf_unfiltered (gdb_stdlog,
3493 "CZL: Thread group leader %d vanished.\n",
3495 exit_lwp (leader_lp);
3501 linux_nat_wait_1 (struct target_ops *ops,
3502 ptid_t ptid, struct target_waitstatus *ourstatus,
3505 static sigset_t prev_mask;
3506 enum resume_kind last_resume_kind;
3507 struct lwp_info *lp;
3510 if (debug_linux_nat)
3511 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3513 /* The first time we get here after starting a new inferior, we may
3514 not have added it to the LWP list yet - this is the earliest
3515 moment at which we know its PID. */
3516 if (ptid_is_pid (inferior_ptid))
3518 /* Upgrade the main thread's ptid. */
3519 thread_change_ptid (inferior_ptid,
3520 BUILD_LWP (GET_PID (inferior_ptid),
3521 GET_PID (inferior_ptid)));
3523 lp = add_lwp (inferior_ptid);
3527 /* Make sure SIGCHLD is blocked. */
3528 block_child_signals (&prev_mask);
3534 /* First check if there is a LWP with a wait status pending. */
3535 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3537 /* Any LWP in the PTID group that's been resumed will do. */
3538 lp = iterate_over_lwps (ptid, status_callback, NULL);
3541 if (debug_linux_nat && lp->status)
3542 fprintf_unfiltered (gdb_stdlog,
3543 "LLW: Using pending wait status %s for %s.\n",
3544 status_to_str (lp->status),
3545 target_pid_to_str (lp->ptid));
3548 else if (is_lwp (ptid))
3550 if (debug_linux_nat)
3551 fprintf_unfiltered (gdb_stdlog,
3552 "LLW: Waiting for specific LWP %s.\n",
3553 target_pid_to_str (ptid));
3555 /* We have a specific LWP to check. */
3556 lp = find_lwp_pid (ptid);
3559 if (debug_linux_nat && lp->status)
3560 fprintf_unfiltered (gdb_stdlog,
3561 "LLW: Using pending wait status %s for %s.\n",
3562 status_to_str (lp->status),
3563 target_pid_to_str (lp->ptid));
3565 /* We check for lp->waitstatus in addition to lp->status,
3566 because we can have pending process exits recorded in
3567 lp->status and W_EXITCODE(0,0) == 0. We should probably have
3568 an additional lp->status_p flag. */
3569 if (lp->status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3573 if (lp && lp->signalled && lp->last_resume_kind != resume_stop)
3575 /* A pending SIGSTOP may interfere with the normal stream of
3576 events. In a typical case where interference is a problem,
3577 we have a SIGSTOP signal pending for LWP A while
3578 single-stepping it, encounter an event in LWP B, and take the
3579 pending SIGSTOP while trying to stop LWP A. After processing
3580 the event in LWP B, LWP A is continued, and we'll never see
3581 the SIGTRAP associated with the last time we were
3582 single-stepping LWP A. */
3584 /* Resume the thread. It should halt immediately returning the
3586 registers_changed ();
3587 if (linux_nat_prepare_to_resume != NULL)
3588 linux_nat_prepare_to_resume (lp);
3589 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3590 lp->step, TARGET_SIGNAL_0);
3591 if (debug_linux_nat)
3592 fprintf_unfiltered (gdb_stdlog,
3593 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
3594 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3595 target_pid_to_str (lp->ptid));
3597 gdb_assert (lp->resumed);
3599 /* Catch the pending SIGSTOP. */
3600 status = lp->status;
3603 stop_wait_callback (lp, NULL);
3605 /* If the lp->status field isn't empty, we caught another signal
3606 while flushing the SIGSTOP. Return it back to the event
3607 queue of the LWP, as we already have an event to handle. */
3610 if (debug_linux_nat)
3611 fprintf_unfiltered (gdb_stdlog,
3612 "LLW: kill %s, %s\n",
3613 target_pid_to_str (lp->ptid),
3614 status_to_str (lp->status));
3615 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
3618 lp->status = status;
3621 if (!target_can_async_p ())
3623 /* Causes SIGINT to be passed on to the attached process. */
3627 /* But if we don't find a pending event, we'll have to wait. */
3633 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3636 - If the thread group leader exits while other threads in the
3637 thread group still exist, waitpid(TGID, ...) hangs. That
3638 waitpid won't return an exit status until the other threads
3639 in the group are reapped.
3641 - When a non-leader thread execs, that thread just vanishes
3642 without reporting an exit (so we'd hang if we waited for it
3643 explicitly in that case). The exec event is reported to
3647 lwpid = my_waitpid (-1, &status, __WCLONE | WNOHANG);
3648 if (lwpid == 0 || (lwpid == -1 && errno == ECHILD))
3649 lwpid = my_waitpid (-1, &status, WNOHANG);
3651 if (debug_linux_nat)
3652 fprintf_unfiltered (gdb_stdlog,
3653 "LNW: waitpid(-1, ...) returned %d, %s\n",
3654 lwpid, errno ? safe_strerror (errno) : "ERRNO-OK");
3658 /* If this is true, then we paused LWPs momentarily, and may
3659 now have pending events to handle. */
3662 if (debug_linux_nat)
3664 fprintf_unfiltered (gdb_stdlog,
3665 "LLW: waitpid %ld received %s\n",
3666 (long) lwpid, status_to_str (status));
3669 lp = linux_nat_filter_event (lwpid, status, &new_pending);
3671 /* STATUS is now no longer valid, use LP->STATUS instead. */
3674 if (lp && !ptid_match (lp->ptid, ptid))
3676 gdb_assert (lp->resumed);
3678 if (debug_linux_nat)
3680 "LWP %ld got an event %06x, leaving pending.\n",
3681 ptid_get_lwp (lp->ptid), lp->status);
3683 if (WIFSTOPPED (lp->status))
3685 if (WSTOPSIG (lp->status) != SIGSTOP)
3687 /* Cancel breakpoint hits. The breakpoint may
3688 be removed before we fetch events from this
3689 process to report to the core. It is best
3690 not to assume the moribund breakpoints
3691 heuristic always handles these cases --- it
3692 could be too many events go through to the
3693 core before this one is handled. All-stop
3694 always cancels breakpoint hits in all
3697 && linux_nat_lp_status_is_event (lp)
3698 && cancel_breakpoint (lp))
3700 /* Throw away the SIGTRAP. */
3703 if (debug_linux_nat)
3705 "LLW: LWP %ld hit a breakpoint while"
3706 " waiting for another process;"
3708 ptid_get_lwp (lp->ptid));
3718 else if (WIFEXITED (lp->status) || WIFSIGNALED (lp->status))
3720 if (debug_linux_nat)
3722 "Process %ld exited while stopping LWPs\n",
3723 ptid_get_lwp (lp->ptid));
3725 /* This was the last lwp in the process. Since
3726 events are serialized to GDB core, and we can't
3727 report this one right now, but GDB core and the
3728 other target layers will want to be notified
3729 about the exit code/signal, leave the status
3730 pending for the next time we're able to report
3733 /* Prevent trying to stop this thread again. We'll
3734 never try to resume it because it has a pending
3738 /* Dead LWP's aren't expected to reported a pending
3742 /* Store the pending event in the waitstatus as
3743 well, because W_EXITCODE(0,0) == 0. */
3744 store_waitstatus (&lp->waitstatus, lp->status);
3753 /* Some LWP now has a pending event. Go all the way
3754 back to check it. */
3760 /* We got an event to report to the core. */
3764 /* Retry until nothing comes out of waitpid. A single
3765 SIGCHLD can indicate more than one child stopped. */
3769 /* Check for zombie thread group leaders. Those can't be reaped
3770 until all other threads in the thread group are. */
3771 check_zombie_leaders ();
3773 /* If there are no resumed children left, bail. We'd be stuck
3774 forever in the sigsuspend call below otherwise. */
3775 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3777 if (debug_linux_nat)
3778 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
3780 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3782 if (!target_can_async_p ())
3783 clear_sigint_trap ();
3785 restore_child_signals_mask (&prev_mask);
3786 return minus_one_ptid;
3789 /* No interesting event to report to the core. */
3791 if (target_options & TARGET_WNOHANG)
3793 if (debug_linux_nat)
3794 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3796 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3797 restore_child_signals_mask (&prev_mask);
3798 return minus_one_ptid;
3801 /* We shouldn't end up here unless we want to try again. */
3802 gdb_assert (lp == NULL);
3804 /* Block until we get an event reported with SIGCHLD. */
3805 sigsuspend (&suspend_mask);
3808 if (!target_can_async_p ())
3809 clear_sigint_trap ();
3813 status = lp->status;
3816 /* Don't report signals that GDB isn't interested in, such as
3817 signals that are neither printed nor stopped upon. Stopping all
3818 threads can be a bit time-consuming so if we want decent
3819 performance with heavily multi-threaded programs, especially when
3820 they're using a high frequency timer, we'd better avoid it if we
3823 if (WIFSTOPPED (status))
3825 enum target_signal signo = target_signal_from_host (WSTOPSIG (status));
3827 /* When using hardware single-step, we need to report every signal.
3828 Otherwise, signals in pass_mask may be short-circuited. */
3830 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status)))
3832 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
3833 here? It is not clear we should. GDB may not expect
3834 other threads to run. On the other hand, not resuming
3835 newly attached threads may cause an unwanted delay in
3836 getting them running. */
3837 registers_changed ();
3838 if (linux_nat_prepare_to_resume != NULL)
3839 linux_nat_prepare_to_resume (lp);
3840 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3842 if (debug_linux_nat)
3843 fprintf_unfiltered (gdb_stdlog,
3844 "LLW: %s %s, %s (preempt 'handle')\n",
3846 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3847 target_pid_to_str (lp->ptid),
3848 (signo != TARGET_SIGNAL_0
3849 ? strsignal (target_signal_to_host (signo))
3857 /* Only do the below in all-stop, as we currently use SIGINT
3858 to implement target_stop (see linux_nat_stop) in
3860 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
3862 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3863 forwarded to the entire process group, that is, all LWPs
3864 will receive it - unless they're using CLONE_THREAD to
3865 share signals. Since we only want to report it once, we
3866 mark it as ignored for all LWPs except this one. */
3867 iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid)),
3868 set_ignore_sigint, NULL);
3869 lp->ignore_sigint = 0;
3872 maybe_clear_ignore_sigint (lp);
3876 /* This LWP is stopped now. */
3879 if (debug_linux_nat)
3880 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3881 status_to_str (status), target_pid_to_str (lp->ptid));
3885 /* Now stop all other LWP's ... */
3886 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
3888 /* ... and wait until all of them have reported back that
3889 they're no longer running. */
3890 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
3892 /* If we're not waiting for a specific LWP, choose an event LWP
3893 from among those that have had events. Giving equal priority
3894 to all LWPs that have had events helps prevent
3896 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3897 select_event_lwp (ptid, &lp, &status);
3899 /* Now that we've selected our final event LWP, cancel any
3900 breakpoints in other LWPs that have hit a GDB breakpoint.
3901 See the comment in cancel_breakpoints_callback to find out
3903 iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
3905 /* We'll need this to determine whether to report a SIGSTOP as
3906 TARGET_WAITKIND_0. Need to take a copy because
3907 resume_clear_callback clears it. */
3908 last_resume_kind = lp->last_resume_kind;
3910 /* In all-stop, from the core's perspective, all LWPs are now
3911 stopped until a new resume action is sent over. */
3912 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3917 last_resume_kind = lp->last_resume_kind;
3918 resume_clear_callback (lp, NULL);
3921 if (linux_nat_status_is_event (status))
3923 if (debug_linux_nat)
3924 fprintf_unfiltered (gdb_stdlog,
3925 "LLW: trap ptid is %s.\n",
3926 target_pid_to_str (lp->ptid));
3929 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3931 *ourstatus = lp->waitstatus;
3932 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3935 store_waitstatus (ourstatus, status);
3937 if (debug_linux_nat)
3938 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3940 restore_child_signals_mask (&prev_mask);
3942 if (last_resume_kind == resume_stop
3943 && ourstatus->kind == TARGET_WAITKIND_STOPPED
3944 && WSTOPSIG (status) == SIGSTOP)
3946 /* A thread that has been requested to stop by GDB with
3947 target_stop, and it stopped cleanly, so report as SIG0. The
3948 use of SIGSTOP is an implementation detail. */
3949 ourstatus->value.sig = TARGET_SIGNAL_0;
3952 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3953 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3956 lp->core = linux_nat_core_of_thread_1 (lp->ptid);
3961 /* Resume LWPs that are currently stopped without any pending status
3962 to report, but are resumed from the core's perspective. */
3965 resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3967 ptid_t *wait_ptid_p = data;
3972 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3974 struct regcache *regcache = get_thread_regcache (lp->ptid);
3975 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3976 CORE_ADDR pc = regcache_read_pc (regcache);
3978 gdb_assert (is_executing (lp->ptid));
3980 /* Don't bother if there's a breakpoint at PC that we'd hit
3981 immediately, and we're not waiting for this LWP. */
3982 if (!ptid_match (lp->ptid, *wait_ptid_p))
3984 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3988 if (debug_linux_nat)
3989 fprintf_unfiltered (gdb_stdlog,
3990 "RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
3991 target_pid_to_str (lp->ptid),
3992 paddress (gdbarch, pc),
3995 registers_changed ();
3996 if (linux_nat_prepare_to_resume != NULL)
3997 linux_nat_prepare_to_resume (lp);
3998 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3999 lp->step, TARGET_SIGNAL_0);
4001 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
4002 lp->stopped_by_watchpoint = 0;
4009 linux_nat_wait (struct target_ops *ops,
4010 ptid_t ptid, struct target_waitstatus *ourstatus,
4015 if (debug_linux_nat)
4016 fprintf_unfiltered (gdb_stdlog,
4017 "linux_nat_wait: [%s]\n", target_pid_to_str (ptid));
4019 /* Flush the async file first. */
4020 if (target_can_async_p ())
4021 async_file_flush ();
4023 /* Resume LWPs that are currently stopped without any pending status
4024 to report, but are resumed from the core's perspective. LWPs get
4025 in this state if we find them stopping at a time we're not
4026 interested in reporting the event (target_wait on a
4027 specific_process, for example, see linux_nat_wait_1), and
4028 meanwhile the event became uninteresting. Don't bother resuming
4029 LWPs we're not going to wait for if they'd stop immediately. */
4031 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
4033 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
4035 /* If we requested any event, and something came out, assume there
4036 may be more. If we requested a specific lwp or process, also
4037 assume there may be more. */
4038 if (target_can_async_p ()
4039 && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
4040 && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
4041 || !ptid_equal (ptid, minus_one_ptid)))
4044 /* Get ready for the next event. */
4045 if (target_can_async_p ())
4046 target_async (inferior_event_handler, 0);
4052 kill_callback (struct lwp_info *lp, void *data)
4054 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
4057 kill (GET_LWP (lp->ptid), SIGKILL);
4058 if (debug_linux_nat)
4059 fprintf_unfiltered (gdb_stdlog,
4060 "KC: kill (SIGKILL) %s, 0, 0 (%s)\n",
4061 target_pid_to_str (lp->ptid),
4062 errno ? safe_strerror (errno) : "OK");
4064 /* Some kernels ignore even SIGKILL for processes under ptrace. */
4067 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
4068 if (debug_linux_nat)
4069 fprintf_unfiltered (gdb_stdlog,
4070 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
4071 target_pid_to_str (lp->ptid),
4072 errno ? safe_strerror (errno) : "OK");
4078 kill_wait_callback (struct lwp_info *lp, void *data)
4082 /* We must make sure that there are no pending events (delayed
4083 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
4084 program doesn't interfere with any following debugging session. */
4086 /* For cloned processes we must check both with __WCLONE and
4087 without, since the exit status of a cloned process isn't reported
4093 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
4094 if (pid != (pid_t) -1)
4096 if (debug_linux_nat)
4097 fprintf_unfiltered (gdb_stdlog,
4098 "KWC: wait %s received unknown.\n",
4099 target_pid_to_str (lp->ptid));
4100 /* The Linux kernel sometimes fails to kill a thread
4101 completely after PTRACE_KILL; that goes from the stop
4102 point in do_fork out to the one in
4103 get_signal_to_deliever and waits again. So kill it
4105 kill_callback (lp, NULL);
4108 while (pid == GET_LWP (lp->ptid));
4110 gdb_assert (pid == -1 && errno == ECHILD);
4115 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
4116 if (pid != (pid_t) -1)
4118 if (debug_linux_nat)
4119 fprintf_unfiltered (gdb_stdlog,
4120 "KWC: wait %s received unk.\n",
4121 target_pid_to_str (lp->ptid));
4122 /* See the call to kill_callback above. */
4123 kill_callback (lp, NULL);
4126 while (pid == GET_LWP (lp->ptid));
4128 gdb_assert (pid == -1 && errno == ECHILD);
4133 linux_nat_kill (struct target_ops *ops)
4135 struct target_waitstatus last;
4139 /* If we're stopped while forking and we haven't followed yet,
4140 kill the other task. We need to do this first because the
4141 parent will be sleeping if this is a vfork. */
4143 get_last_target_status (&last_ptid, &last);
4145 if (last.kind == TARGET_WAITKIND_FORKED
4146 || last.kind == TARGET_WAITKIND_VFORKED)
4148 ptrace (PT_KILL, PIDGET (last.value.related_pid), 0, 0);
4152 if (forks_exist_p ())
4153 linux_fork_killall ();
4156 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
4158 /* Stop all threads before killing them, since ptrace requires
4159 that the thread is stopped to sucessfully PTRACE_KILL. */
4160 iterate_over_lwps (ptid, stop_callback, NULL);
4161 /* ... and wait until all of them have reported back that
4162 they're no longer running. */
4163 iterate_over_lwps (ptid, stop_wait_callback, NULL);
4165 /* Kill all LWP's ... */
4166 iterate_over_lwps (ptid, kill_callback, NULL);
4168 /* ... and wait until we've flushed all events. */
4169 iterate_over_lwps (ptid, kill_wait_callback, NULL);
4172 target_mourn_inferior ();
4176 linux_nat_mourn_inferior (struct target_ops *ops)
4178 purge_lwp_list (ptid_get_pid (inferior_ptid));
4180 if (! forks_exist_p ())
4181 /* Normal case, no other forks available. */
4182 linux_ops->to_mourn_inferior (ops);
4184 /* Multi-fork case. The current inferior_ptid has exited, but
4185 there are other viable forks to debug. Delete the exiting
4186 one and context-switch to the first available. */
4187 linux_fork_mourn_inferior ();
4190 /* Convert a native/host siginfo object, into/from the siginfo in the
4191 layout of the inferiors' architecture. */
4194 siginfo_fixup (struct siginfo *siginfo, gdb_byte *inf_siginfo, int direction)
4198 if (linux_nat_siginfo_fixup != NULL)
4199 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
4201 /* If there was no callback, or the callback didn't do anything,
4202 then just do a straight memcpy. */
4206 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
4208 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
4213 linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
4214 const char *annex, gdb_byte *readbuf,
4215 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4218 struct siginfo siginfo;
4219 gdb_byte inf_siginfo[sizeof (struct siginfo)];
4221 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
4222 gdb_assert (readbuf || writebuf);
4224 pid = GET_LWP (inferior_ptid);
4226 pid = GET_PID (inferior_ptid);
4228 if (offset > sizeof (siginfo))
4232 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4236 /* When GDB is built as a 64-bit application, ptrace writes into
4237 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4238 inferior with a 64-bit GDB should look the same as debugging it
4239 with a 32-bit GDB, we need to convert it. GDB core always sees
4240 the converted layout, so any read/write will have to be done
4242 siginfo_fixup (&siginfo, inf_siginfo, 0);
4244 if (offset + len > sizeof (siginfo))
4245 len = sizeof (siginfo) - offset;
4247 if (readbuf != NULL)
4248 memcpy (readbuf, inf_siginfo + offset, len);
4251 memcpy (inf_siginfo + offset, writebuf, len);
4253 /* Convert back to ptrace layout before flushing it out. */
4254 siginfo_fixup (&siginfo, inf_siginfo, 1);
4257 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4266 linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
4267 const char *annex, gdb_byte *readbuf,
4268 const gdb_byte *writebuf,
4269 ULONGEST offset, LONGEST len)
4271 struct cleanup *old_chain;
4274 if (object == TARGET_OBJECT_SIGNAL_INFO)
4275 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
4278 /* The target is connected but no live inferior is selected. Pass
4279 this request down to a lower stratum (e.g., the executable
4281 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
4284 old_chain = save_inferior_ptid ();
4286 if (is_lwp (inferior_ptid))
4287 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
4289 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
4292 do_cleanups (old_chain);
4297 linux_thread_alive (ptid_t ptid)
4301 gdb_assert (is_lwp (ptid));
4303 /* Send signal 0 instead of anything ptrace, because ptracing a
4304 running thread errors out claiming that the thread doesn't
4306 err = kill_lwp (GET_LWP (ptid), 0);
4308 if (debug_linux_nat)
4309 fprintf_unfiltered (gdb_stdlog,
4310 "LLTA: KILL(SIG0) %s (%s)\n",
4311 target_pid_to_str (ptid),
4312 err ? safe_strerror (tmp_errno) : "OK");
4321 linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
4323 return linux_thread_alive (ptid);
4327 linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
4329 static char buf[64];
4332 && (GET_PID (ptid) != GET_LWP (ptid)
4333 || num_lwps (GET_PID (ptid)) > 1))
4335 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
4339 return normal_pid_to_str (ptid);
4343 linux_nat_thread_name (struct thread_info *thr)
4345 int pid = ptid_get_pid (thr->ptid);
4346 long lwp = ptid_get_lwp (thr->ptid);
4347 #define FORMAT "/proc/%d/task/%ld/comm"
4348 char buf[sizeof (FORMAT) + 30];
4350 char *result = NULL;
4352 snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
4353 comm_file = fopen (buf, "r");
4356 /* Not exported by the kernel, so we define it here. */
4358 static char line[COMM_LEN + 1];
4360 if (fgets (line, sizeof (line), comm_file))
4362 char *nl = strchr (line, '\n');
4379 /* Accepts an integer PID; Returns a string representing a file that
4380 can be opened to get the symbols for the child process. */
4383 linux_child_pid_to_exec_file (int pid)
4385 char *name1, *name2;
4387 name1 = xmalloc (MAXPATHLEN);
4388 name2 = xmalloc (MAXPATHLEN);
4389 make_cleanup (xfree, name1);
4390 make_cleanup (xfree, name2);
4391 memset (name2, 0, MAXPATHLEN);
4393 sprintf (name1, "/proc/%d/exe", pid);
4394 if (readlink (name1, name2, MAXPATHLEN) > 0)
4400 /* Service function for corefiles and info proc. */
4403 read_mapping (FILE *mapfile,
4408 char *device, long long *inode, char *filename)
4410 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
4411 addr, endaddr, permissions, offset, device, inode);
4414 if (ret > 0 && ret != EOF)
4416 /* Eat everything up to EOL for the filename. This will prevent
4417 weird filenames (such as one with embedded whitespace) from
4418 confusing this code. It also makes this code more robust in
4419 respect to annotations the kernel may add after the filename.
4421 Note the filename is used for informational purposes
4423 ret += fscanf (mapfile, "%[^\n]\n", filename);
4426 return (ret != 0 && ret != EOF);
4429 /* Fills the "to_find_memory_regions" target vector. Lists the memory
4430 regions in the inferior for a corefile. */
4433 linux_nat_find_memory_regions (find_memory_region_ftype func, void *obfd)
4435 int pid = PIDGET (inferior_ptid);
4436 char mapsfilename[MAXPATHLEN];
4438 long long addr, endaddr, size, offset, inode;
4439 char permissions[8], device[8], filename[MAXPATHLEN];
4440 int read, write, exec;
4441 struct cleanup *cleanup;
4443 /* Compose the filename for the /proc memory map, and open it. */
4444 sprintf (mapsfilename, "/proc/%d/maps", pid);
4445 if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
4446 error (_("Could not open %s."), mapsfilename);
4447 cleanup = make_cleanup_fclose (mapsfile);
4450 fprintf_filtered (gdb_stdout,
4451 "Reading memory regions from %s\n", mapsfilename);
4453 /* Now iterate until end-of-file. */
4454 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
4455 &offset, &device[0], &inode, &filename[0]))
4457 size = endaddr - addr;
4459 /* Get the segment's permissions. */
4460 read = (strchr (permissions, 'r') != 0);
4461 write = (strchr (permissions, 'w') != 0);
4462 exec = (strchr (permissions, 'x') != 0);
4466 fprintf_filtered (gdb_stdout,
4467 "Save segment, %s bytes at %s (%c%c%c)",
4468 plongest (size), paddress (target_gdbarch, addr),
4470 write ? 'w' : ' ', exec ? 'x' : ' ');
4472 fprintf_filtered (gdb_stdout, " for %s", filename);
4473 fprintf_filtered (gdb_stdout, "\n");
4476 /* Invoke the callback function to create the corefile
4478 func (addr, size, read, write, exec, obfd);
4480 do_cleanups (cleanup);
4485 find_signalled_thread (struct thread_info *info, void *data)
4487 if (info->suspend.stop_signal != TARGET_SIGNAL_0
4488 && ptid_get_pid (info->ptid) == ptid_get_pid (inferior_ptid))
4494 static enum target_signal
4495 find_stop_signal (void)
4497 struct thread_info *info =
4498 iterate_over_threads (find_signalled_thread, NULL);
4501 return info->suspend.stop_signal;
4503 return TARGET_SIGNAL_0;
4506 /* Records the thread's register state for the corefile note
4510 linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
4511 char *note_data, int *note_size,
4512 enum target_signal stop_signal)
4514 unsigned long lwp = ptid_get_lwp (ptid);
4515 struct gdbarch *gdbarch = target_gdbarch;
4516 struct regcache *regcache = get_thread_arch_regcache (ptid, gdbarch);
4517 const struct regset *regset;
4519 struct cleanup *old_chain;
4520 struct core_regset_section *sect_list;
4523 old_chain = save_inferior_ptid ();
4524 inferior_ptid = ptid;
4525 target_fetch_registers (regcache, -1);
4526 do_cleanups (old_chain);
4528 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
4529 sect_list = gdbarch_core_regset_sections (gdbarch);
4531 /* The loop below uses the new struct core_regset_section, which stores
4532 the supported section names and sizes for the core file. Note that
4533 note PRSTATUS needs to be treated specially. But the other notes are
4534 structurally the same, so they can benefit from the new struct. */
4535 if (core_regset_p && sect_list != NULL)
4536 while (sect_list->sect_name != NULL)
4538 regset = gdbarch_regset_from_core_section (gdbarch,
4539 sect_list->sect_name,
4541 gdb_assert (regset && regset->collect_regset);
4542 gdb_regset = xmalloc (sect_list->size);
4543 regset->collect_regset (regset, regcache, -1,
4544 gdb_regset, sect_list->size);
4546 if (strcmp (sect_list->sect_name, ".reg") == 0)
4547 note_data = (char *) elfcore_write_prstatus
4548 (obfd, note_data, note_size,
4549 lwp, target_signal_to_host (stop_signal),
4552 note_data = (char *) elfcore_write_register_note
4553 (obfd, note_data, note_size,
4554 sect_list->sect_name, gdb_regset,
4560 /* For architectures that does not have the struct core_regset_section
4561 implemented, we use the old method. When all the architectures have
4562 the new support, the code below should be deleted. */
4565 gdb_gregset_t gregs;
4566 gdb_fpregset_t fpregs;
4569 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
4571 != NULL && regset->collect_regset != NULL)
4572 regset->collect_regset (regset, regcache, -1,
4573 &gregs, sizeof (gregs));
4575 fill_gregset (regcache, &gregs, -1);
4577 note_data = (char *) elfcore_write_prstatus
4578 (obfd, note_data, note_size, lwp, target_signal_to_host (stop_signal),
4582 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
4584 != NULL && regset->collect_regset != NULL)
4585 regset->collect_regset (regset, regcache, -1,
4586 &fpregs, sizeof (fpregs));
4588 fill_fpregset (regcache, &fpregs, -1);
4590 note_data = (char *) elfcore_write_prfpreg (obfd,
4593 &fpregs, sizeof (fpregs));
4599 struct linux_nat_corefile_thread_data
4605 enum target_signal stop_signal;
4608 /* Called by gdbthread.c once per thread. Records the thread's
4609 register state for the corefile note section. */
4612 linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
4614 struct linux_nat_corefile_thread_data *args = data;
4616 args->note_data = linux_nat_do_thread_registers (args->obfd,
4626 /* Enumerate spufs IDs for process PID. */
4629 iterate_over_spus (int pid, void (*callback) (void *, int), void *data)
4633 struct dirent *entry;
4635 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4636 dir = opendir (path);
4641 while ((entry = readdir (dir)) != NULL)
4647 fd = atoi (entry->d_name);
4651 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4652 if (stat (path, &st) != 0)
4654 if (!S_ISDIR (st.st_mode))
4657 if (statfs (path, &stfs) != 0)
4659 if (stfs.f_type != SPUFS_MAGIC)
4662 callback (data, fd);
4668 /* Generate corefile notes for SPU contexts. */
4670 struct linux_spu_corefile_data
4678 linux_spu_corefile_callback (void *data, int fd)
4680 struct linux_spu_corefile_data *args = data;
4683 static const char *spu_files[] =
4705 for (i = 0; i < sizeof (spu_files) / sizeof (spu_files[0]); i++)
4707 char annex[32], note_name[32];
4711 xsnprintf (annex, sizeof annex, "%d/%s", fd, spu_files[i]);
4712 spu_len = target_read_alloc (¤t_target, TARGET_OBJECT_SPU,
4716 xsnprintf (note_name, sizeof note_name, "SPU/%s", annex);
4717 args->note_data = elfcore_write_note (args->obfd, args->note_data,
4718 args->note_size, note_name,
4719 NT_SPU, spu_data, spu_len);
4726 linux_spu_make_corefile_notes (bfd *obfd, char *note_data, int *note_size)
4728 struct linux_spu_corefile_data args;
4731 args.note_data = note_data;
4732 args.note_size = note_size;
4734 iterate_over_spus (PIDGET (inferior_ptid),
4735 linux_spu_corefile_callback, &args);
4737 return args.note_data;
4740 /* Fills the "to_make_corefile_note" target vector. Builds the note
4741 section for a corefile, and returns it in a malloc buffer. */
4744 linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
4746 struct linux_nat_corefile_thread_data thread_args;
4747 /* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */
4748 char fname[16] = { '\0' };
4749 /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */
4750 char psargs[80] = { '\0' };
4751 char *note_data = NULL;
4752 ptid_t filter = pid_to_ptid (ptid_get_pid (inferior_ptid));
4756 if (get_exec_file (0))
4758 strncpy (fname, lbasename (get_exec_file (0)), sizeof (fname));
4759 strncpy (psargs, get_exec_file (0), sizeof (psargs));
4760 if (get_inferior_args ())
4763 char *psargs_end = psargs + sizeof (psargs);
4765 /* linux_elfcore_write_prpsinfo () handles zero unterminated
4767 string_end = memchr (psargs, 0, sizeof (psargs));
4768 if (string_end != NULL)
4770 *string_end++ = ' ';
4771 strncpy (string_end, get_inferior_args (),
4772 psargs_end - string_end);
4775 note_data = (char *) elfcore_write_prpsinfo (obfd,
4777 note_size, fname, psargs);
4780 /* Dump information for threads. */
4781 thread_args.obfd = obfd;
4782 thread_args.note_data = note_data;
4783 thread_args.note_size = note_size;
4784 thread_args.num_notes = 0;
4785 thread_args.stop_signal = find_stop_signal ();
4786 iterate_over_lwps (filter, linux_nat_corefile_thread_callback, &thread_args);
4787 gdb_assert (thread_args.num_notes != 0);
4788 note_data = thread_args.note_data;
4790 auxv_len = target_read_alloc (¤t_target, TARGET_OBJECT_AUXV,
4794 note_data = elfcore_write_note (obfd, note_data, note_size,
4795 "CORE", NT_AUXV, auxv, auxv_len);
4799 note_data = linux_spu_make_corefile_notes (obfd, note_data, note_size);
4801 make_cleanup (xfree, note_data);
4805 /* Implement the "info proc" command. */
4809 /* Display the default cmdline, cwd and exe outputs. */
4812 /* Display `info proc mappings'. */
4815 /* Display `info proc status'. */
4818 /* Display `info proc stat'. */
4821 /* Display `info proc cmdline'. */
4824 /* Display `info proc exe'. */
4827 /* Display `info proc cwd'. */
4830 /* Display all of the above. */
4835 linux_nat_info_proc_cmd_1 (char *args, enum info_proc_what what, int from_tty)
4837 /* A long is used for pid instead of an int to avoid a loss of precision
4838 compiler warning from the output of strtoul. */
4839 long pid = PIDGET (inferior_ptid);
4841 char buffer[MAXPATHLEN];
4842 char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
4843 int cmdline_f = (what == IP_MINIMAL || what == IP_CMDLINE || what == IP_ALL);
4844 int cwd_f = (what == IP_MINIMAL || what == IP_CWD || what == IP_ALL);
4845 int exe_f = (what == IP_MINIMAL || what == IP_EXE || what == IP_ALL);
4846 int mappings_f = (what == IP_MAPPINGS || what == IP_ALL);
4847 int status_f = (what == IP_STATUS || what == IP_ALL);
4848 int stat_f = (what == IP_STAT || what == IP_ALL);
4851 if (args && isdigit (args[0]))
4852 pid = strtoul (args, &args, 10);
4854 args = skip_spaces (args);
4855 if (args && args[0])
4856 error (_("Too many parameters: %s"), args);
4859 error (_("No current process: you must name one."));
4861 sprintf (fname1, "/proc/%ld", pid);
4862 if (stat (fname1, &dummy) != 0)
4863 error (_("No /proc directory: '%s'"), fname1);
4865 printf_filtered (_("process %ld\n"), pid);
4868 sprintf (fname1, "/proc/%ld/cmdline", pid);
4869 if ((procfile = fopen (fname1, "r")) != NULL)
4871 struct cleanup *cleanup = make_cleanup_fclose (procfile);
4873 if (fgets (buffer, sizeof (buffer), procfile))
4874 printf_filtered ("cmdline = '%s'\n", buffer);
4876 warning (_("unable to read '%s'"), fname1);
4877 do_cleanups (cleanup);
4880 warning (_("unable to open /proc file '%s'"), fname1);
4884 sprintf (fname1, "/proc/%ld/cwd", pid);
4885 memset (fname2, 0, sizeof (fname2));
4886 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
4887 printf_filtered ("cwd = '%s'\n", fname2);
4889 warning (_("unable to read link '%s'"), fname1);
4893 sprintf (fname1, "/proc/%ld/exe", pid);
4894 memset (fname2, 0, sizeof (fname2));
4895 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
4896 printf_filtered ("exe = '%s'\n", fname2);
4898 warning (_("unable to read link '%s'"), fname1);
4902 sprintf (fname1, "/proc/%ld/maps", pid);
4903 if ((procfile = fopen (fname1, "r")) != NULL)
4905 long long addr, endaddr, size, offset, inode;
4906 char permissions[8], device[8], filename[MAXPATHLEN];
4907 struct cleanup *cleanup;
4909 cleanup = make_cleanup_fclose (procfile);
4910 printf_filtered (_("Mapped address spaces:\n\n"));
4911 if (gdbarch_addr_bit (target_gdbarch) == 32)
4913 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
4916 " Size", " Offset", "objfile");
4920 printf_filtered (" %18s %18s %10s %10s %7s\n",
4923 " Size", " Offset", "objfile");
4926 while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
4927 &offset, &device[0], &inode, &filename[0]))
4929 size = endaddr - addr;
4931 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
4932 calls here (and possibly above) should be abstracted
4933 out into their own functions? Andrew suggests using
4934 a generic local_address_string instead to print out
4935 the addresses; that makes sense to me, too. */
4937 if (gdbarch_addr_bit (target_gdbarch) == 32)
4939 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
4940 (unsigned long) addr, /* FIXME: pr_addr */
4941 (unsigned long) endaddr,
4943 (unsigned int) offset,
4944 filename[0] ? filename : "");
4948 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
4949 (unsigned long) addr, /* FIXME: pr_addr */
4950 (unsigned long) endaddr,
4952 (unsigned int) offset,
4953 filename[0] ? filename : "");
4957 do_cleanups (cleanup);
4960 warning (_("unable to open /proc file '%s'"), fname1);
4964 sprintf (fname1, "/proc/%ld/status", pid);
4965 if ((procfile = fopen (fname1, "r")) != NULL)
4967 struct cleanup *cleanup = make_cleanup_fclose (procfile);
4969 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
4970 puts_filtered (buffer);
4971 do_cleanups (cleanup);
4974 warning (_("unable to open /proc file '%s'"), fname1);
4978 sprintf (fname1, "/proc/%ld/stat", pid);
4979 if ((procfile = fopen (fname1, "r")) != NULL)
4984 struct cleanup *cleanup = make_cleanup_fclose (procfile);
4986 if (fscanf (procfile, "%d ", &itmp) > 0)
4987 printf_filtered (_("Process: %d\n"), itmp);
4988 if (fscanf (procfile, "(%[^)]) ", &buffer[0]) > 0)
4989 printf_filtered (_("Exec file: %s\n"), buffer);
4990 if (fscanf (procfile, "%c ", &ctmp) > 0)
4991 printf_filtered (_("State: %c\n"), ctmp);
4992 if (fscanf (procfile, "%d ", &itmp) > 0)
4993 printf_filtered (_("Parent process: %d\n"), itmp);
4994 if (fscanf (procfile, "%d ", &itmp) > 0)
4995 printf_filtered (_("Process group: %d\n"), itmp);
4996 if (fscanf (procfile, "%d ", &itmp) > 0)
4997 printf_filtered (_("Session id: %d\n"), itmp);
4998 if (fscanf (procfile, "%d ", &itmp) > 0)
4999 printf_filtered (_("TTY: %d\n"), itmp);
5000 if (fscanf (procfile, "%d ", &itmp) > 0)
5001 printf_filtered (_("TTY owner process group: %d\n"), itmp);
5002 if (fscanf (procfile, "%lu ", <mp) > 0)
5003 printf_filtered (_("Flags: 0x%lx\n"), ltmp);
5004 if (fscanf (procfile, "%lu ", <mp) > 0)
5005 printf_filtered (_("Minor faults (no memory page): %lu\n"),
5006 (unsigned long) ltmp);
5007 if (fscanf (procfile, "%lu ", <mp) > 0)
5008 printf_filtered (_("Minor faults, children: %lu\n"),
5009 (unsigned long) ltmp);
5010 if (fscanf (procfile, "%lu ", <mp) > 0)
5011 printf_filtered (_("Major faults (memory page faults): %lu\n"),
5012 (unsigned long) ltmp);
5013 if (fscanf (procfile, "%lu ", <mp) > 0)
5014 printf_filtered (_("Major faults, children: %lu\n"),
5015 (unsigned long) ltmp);
5016 if (fscanf (procfile, "%ld ", <mp) > 0)
5017 printf_filtered (_("utime: %ld\n"), ltmp);
5018 if (fscanf (procfile, "%ld ", <mp) > 0)
5019 printf_filtered (_("stime: %ld\n"), ltmp);
5020 if (fscanf (procfile, "%ld ", <mp) > 0)
5021 printf_filtered (_("utime, children: %ld\n"), ltmp);
5022 if (fscanf (procfile, "%ld ", <mp) > 0)
5023 printf_filtered (_("stime, children: %ld\n"), ltmp);
5024 if (fscanf (procfile, "%ld ", <mp) > 0)
5025 printf_filtered (_("jiffies remaining in current "
5026 "time slice: %ld\n"), ltmp);
5027 if (fscanf (procfile, "%ld ", <mp) > 0)
5028 printf_filtered (_("'nice' value: %ld\n"), ltmp);
5029 if (fscanf (procfile, "%lu ", <mp) > 0)
5030 printf_filtered (_("jiffies until next timeout: %lu\n"),
5031 (unsigned long) ltmp);
5032 if (fscanf (procfile, "%lu ", <mp) > 0)
5033 printf_filtered (_("jiffies until next SIGALRM: %lu\n"),
5034 (unsigned long) ltmp);
5035 if (fscanf (procfile, "%ld ", <mp) > 0)
5036 printf_filtered (_("start time (jiffies since "
5037 "system boot): %ld\n"), ltmp);
5038 if (fscanf (procfile, "%lu ", <mp) > 0)
5039 printf_filtered (_("Virtual memory size: %lu\n"),
5040 (unsigned long) ltmp);
5041 if (fscanf (procfile, "%lu ", <mp) > 0)
5042 printf_filtered (_("Resident set size: %lu\n"),
5043 (unsigned long) ltmp);
5044 if (fscanf (procfile, "%lu ", <mp) > 0)
5045 printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp);
5046 if (fscanf (procfile, "%lu ", <mp) > 0)
5047 printf_filtered (_("Start of text: 0x%lx\n"), ltmp);
5048 if (fscanf (procfile, "%lu ", <mp) > 0)
5049 printf_filtered (_("End of text: 0x%lx\n"), ltmp);
5050 if (fscanf (procfile, "%lu ", <mp) > 0)
5051 printf_filtered (_("Start of stack: 0x%lx\n"), ltmp);
5052 #if 0 /* Don't know how architecture-dependent the rest is...
5053 Anyway the signal bitmap info is available from "status". */
5054 if (fscanf (procfile, "%lu ", <mp) > 0) /* FIXME arch? */
5055 printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp);
5056 if (fscanf (procfile, "%lu ", <mp) > 0) /* FIXME arch? */
5057 printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp);
5058 if (fscanf (procfile, "%ld ", <mp) > 0)
5059 printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp);
5060 if (fscanf (procfile, "%ld ", <mp) > 0)
5061 printf_filtered (_("Blocked signals bitmap: 0x%lx\n"), ltmp);
5062 if (fscanf (procfile, "%ld ", <mp) > 0)
5063 printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp);
5064 if (fscanf (procfile, "%ld ", <mp) > 0)
5065 printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp);
5066 if (fscanf (procfile, "%lu ", <mp) > 0) /* FIXME arch? */
5067 printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp);
5069 do_cleanups (cleanup);
5072 warning (_("unable to open /proc file '%s'"), fname1);
5076 /* Implement `info proc' when given without any futher parameters. */
5079 linux_nat_info_proc_cmd (char *args, int from_tty)
5081 linux_nat_info_proc_cmd_1 (args, IP_MINIMAL, from_tty);
5084 /* Implement `info proc mappings'. */
5087 linux_nat_info_proc_cmd_mappings (char *args, int from_tty)
5089 linux_nat_info_proc_cmd_1 (args, IP_MAPPINGS, from_tty);
5092 /* Implement `info proc stat'. */
5095 linux_nat_info_proc_cmd_stat (char *args, int from_tty)
5097 linux_nat_info_proc_cmd_1 (args, IP_STAT, from_tty);
5100 /* Implement `info proc status'. */
5103 linux_nat_info_proc_cmd_status (char *args, int from_tty)
5105 linux_nat_info_proc_cmd_1 (args, IP_STATUS, from_tty);
5108 /* Implement `info proc cwd'. */
5111 linux_nat_info_proc_cmd_cwd (char *args, int from_tty)
5113 linux_nat_info_proc_cmd_1 (args, IP_CWD, from_tty);
5116 /* Implement `info proc cmdline'. */
5119 linux_nat_info_proc_cmd_cmdline (char *args, int from_tty)
5121 linux_nat_info_proc_cmd_1 (args, IP_CMDLINE, from_tty);
5124 /* Implement `info proc exe'. */
5127 linux_nat_info_proc_cmd_exe (char *args, int from_tty)
5129 linux_nat_info_proc_cmd_1 (args, IP_EXE, from_tty);
5132 /* Implement `info proc all'. */
5135 linux_nat_info_proc_cmd_all (char *args, int from_tty)
5137 linux_nat_info_proc_cmd_1 (args, IP_ALL, from_tty);
5140 /* Implement the to_xfer_partial interface for memory reads using the /proc
5141 filesystem. Because we can use a single read() call for /proc, this
5142 can be much more efficient than banging away at PTRACE_PEEKTEXT,
5143 but it doesn't support writes. */
5146 linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
5147 const char *annex, gdb_byte *readbuf,
5148 const gdb_byte *writebuf,
5149 ULONGEST offset, LONGEST len)
5155 if (object != TARGET_OBJECT_MEMORY || !readbuf)
5158 /* Don't bother for one word. */
5159 if (len < 3 * sizeof (long))
5162 /* We could keep this file open and cache it - possibly one per
5163 thread. That requires some juggling, but is even faster. */
5164 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
5165 fd = open (filename, O_RDONLY | O_LARGEFILE);
5169 /* If pread64 is available, use it. It's faster if the kernel
5170 supports it (only one syscall), and it's 64-bit safe even on
5171 32-bit platforms (for instance, SPARC debugging a SPARC64
5174 if (pread64 (fd, readbuf, len, offset) != len)
5176 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
5187 /* Enumerate spufs IDs for process PID. */
5189 spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, LONGEST len)
5191 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
5193 LONGEST written = 0;
5196 struct dirent *entry;
5198 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
5199 dir = opendir (path);
5204 while ((entry = readdir (dir)) != NULL)
5210 fd = atoi (entry->d_name);
5214 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
5215 if (stat (path, &st) != 0)
5217 if (!S_ISDIR (st.st_mode))
5220 if (statfs (path, &stfs) != 0)
5222 if (stfs.f_type != SPUFS_MAGIC)
5225 if (pos >= offset && pos + 4 <= offset + len)
5227 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
5237 /* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
5238 object type, using the /proc file system. */
5240 linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
5241 const char *annex, gdb_byte *readbuf,
5242 const gdb_byte *writebuf,
5243 ULONGEST offset, LONGEST len)
5248 int pid = PIDGET (inferior_ptid);
5255 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5258 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
5259 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5264 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5271 ret = write (fd, writebuf, (size_t) len);
5273 ret = read (fd, readbuf, (size_t) len);
5280 /* Parse LINE as a signal set and add its set bits to SIGS. */
5283 add_line_to_sigset (const char *line, sigset_t *sigs)
5285 int len = strlen (line) - 1;
5289 if (line[len] != '\n')
5290 error (_("Could not parse signal set: %s"), line);
5298 if (*p >= '0' && *p <= '9')
5300 else if (*p >= 'a' && *p <= 'f')
5301 digit = *p - 'a' + 10;
5303 error (_("Could not parse signal set: %s"), line);
5308 sigaddset (sigs, signum + 1);
5310 sigaddset (sigs, signum + 2);
5312 sigaddset (sigs, signum + 3);
5314 sigaddset (sigs, signum + 4);
5320 /* Find process PID's pending signals from /proc/pid/status and set
5324 linux_proc_pending_signals (int pid, sigset_t *pending,
5325 sigset_t *blocked, sigset_t *ignored)
5328 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
5329 struct cleanup *cleanup;
5331 sigemptyset (pending);
5332 sigemptyset (blocked);
5333 sigemptyset (ignored);
5334 sprintf (fname, "/proc/%d/status", pid);
5335 procfile = fopen (fname, "r");
5336 if (procfile == NULL)
5337 error (_("Could not open %s"), fname);
5338 cleanup = make_cleanup_fclose (procfile);
5340 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
5342 /* Normal queued signals are on the SigPnd line in the status
5343 file. However, 2.6 kernels also have a "shared" pending
5344 queue for delivering signals to a thread group, so check for
5347 Unfortunately some Red Hat kernels include the shared pending
5348 queue but not the ShdPnd status field. */
5350 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
5351 add_line_to_sigset (buffer + 8, pending);
5352 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
5353 add_line_to_sigset (buffer + 8, pending);
5354 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
5355 add_line_to_sigset (buffer + 8, blocked);
5356 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
5357 add_line_to_sigset (buffer + 8, ignored);
5360 do_cleanups (cleanup);
5364 linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
5365 const char *annex, gdb_byte *readbuf,
5366 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
5368 gdb_assert (object == TARGET_OBJECT_OSDATA);
5370 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5374 linux_xfer_partial (struct target_ops *ops, enum target_object object,
5375 const char *annex, gdb_byte *readbuf,
5376 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
5380 if (object == TARGET_OBJECT_AUXV)
5381 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
5384 if (object == TARGET_OBJECT_OSDATA)
5385 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
5388 if (object == TARGET_OBJECT_SPU)
5389 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
5392 /* GDB calculates all the addresses in possibly larget width of the address.
5393 Address width needs to be masked before its final use - either by
5394 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
5396 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
5398 if (object == TARGET_OBJECT_MEMORY)
5400 int addr_bit = gdbarch_addr_bit (target_gdbarch);
5402 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
5403 offset &= ((ULONGEST) 1 << addr_bit) - 1;
5406 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
5411 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
5415 /* Create a prototype generic GNU/Linux target. The client can override
5416 it with local methods. */
5419 linux_target_install_ops (struct target_ops *t)
5421 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
5422 t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint;
5423 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
5424 t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint;
5425 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
5426 t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint;
5427 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
5428 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
5429 t->to_post_startup_inferior = linux_child_post_startup_inferior;
5430 t->to_post_attach = linux_child_post_attach;
5431 t->to_follow_fork = linux_child_follow_fork;
5432 t->to_find_memory_regions = linux_nat_find_memory_regions;
5433 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
5435 super_xfer_partial = t->to_xfer_partial;
5436 t->to_xfer_partial = linux_xfer_partial;
5442 struct target_ops *t;
5444 t = inf_ptrace_target ();
5445 linux_target_install_ops (t);
5451 linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
5453 struct target_ops *t;
5455 t = inf_ptrace_trad_target (register_u_offset);
5456 linux_target_install_ops (t);
5461 /* target_is_async_p implementation. */
5464 linux_nat_is_async_p (void)
5466 /* NOTE: palves 2008-03-21: We're only async when the user requests
5467 it explicitly with the "set target-async" command.
5468 Someday, linux will always be async. */
5469 return target_async_permitted;
5472 /* target_can_async_p implementation. */
5475 linux_nat_can_async_p (void)
5477 /* NOTE: palves 2008-03-21: We're only async when the user requests
5478 it explicitly with the "set target-async" command.
5479 Someday, linux will always be async. */
5480 return target_async_permitted;
5484 linux_nat_supports_non_stop (void)
5489 /* True if we want to support multi-process. To be removed when GDB
5490 supports multi-exec. */
5492 int linux_multi_process = 1;
5495 linux_nat_supports_multi_process (void)
5497 return linux_multi_process;
5501 linux_nat_supports_disable_randomization (void)
5503 #ifdef HAVE_PERSONALITY
5510 static int async_terminal_is_ours = 1;
5512 /* target_terminal_inferior implementation. */
5515 linux_nat_terminal_inferior (void)
5517 if (!target_is_async_p ())
5519 /* Async mode is disabled. */
5520 terminal_inferior ();
5524 terminal_inferior ();
5526 /* Calls to target_terminal_*() are meant to be idempotent. */
5527 if (!async_terminal_is_ours)
5530 delete_file_handler (input_fd);
5531 async_terminal_is_ours = 0;
5535 /* target_terminal_ours implementation. */
5538 linux_nat_terminal_ours (void)
5540 if (!target_is_async_p ())
5542 /* Async mode is disabled. */
5547 /* GDB should never give the terminal to the inferior if the
5548 inferior is running in the background (run&, continue&, etc.),
5549 but claiming it sure should. */
5552 if (async_terminal_is_ours)
5555 clear_sigint_trap ();
5556 add_file_handler (input_fd, stdin_event_handler, 0);
5557 async_terminal_is_ours = 1;
5560 static void (*async_client_callback) (enum inferior_event_type event_type,
5562 static void *async_client_context;
5564 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5565 so we notice when any child changes state, and notify the
5566 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
5567 above to wait for the arrival of a SIGCHLD. */
5570 sigchld_handler (int signo)
5572 int old_errno = errno;
5574 if (debug_linux_nat)
5575 ui_file_write_async_safe (gdb_stdlog,
5576 "sigchld\n", sizeof ("sigchld\n") - 1);
5578 if (signo == SIGCHLD
5579 && linux_nat_event_pipe[0] != -1)
5580 async_file_mark (); /* Let the event loop know that there are
5581 events to handle. */
5586 /* Callback registered with the target events file descriptor. */
5589 handle_target_event (int error, gdb_client_data client_data)
5591 (*async_client_callback) (INF_REG_EVENT, async_client_context);
5594 /* Create/destroy the target events pipe. Returns previous state. */
5597 linux_async_pipe (int enable)
5599 int previous = (linux_nat_event_pipe[0] != -1);
5601 if (previous != enable)
5605 block_child_signals (&prev_mask);
5609 if (pipe (linux_nat_event_pipe) == -1)
5610 internal_error (__FILE__, __LINE__,
5611 "creating event pipe failed.");
5613 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
5614 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
5618 close (linux_nat_event_pipe[0]);
5619 close (linux_nat_event_pipe[1]);
5620 linux_nat_event_pipe[0] = -1;
5621 linux_nat_event_pipe[1] = -1;
5624 restore_child_signals_mask (&prev_mask);
5630 /* target_async implementation. */
5633 linux_nat_async (void (*callback) (enum inferior_event_type event_type,
5634 void *context), void *context)
5636 if (callback != NULL)
5638 async_client_callback = callback;
5639 async_client_context = context;
5640 if (!linux_async_pipe (1))
5642 add_file_handler (linux_nat_event_pipe[0],
5643 handle_target_event, NULL);
5644 /* There may be pending events to handle. Tell the event loop
5651 async_client_callback = callback;
5652 async_client_context = context;
5653 delete_file_handler (linux_nat_event_pipe[0]);
5654 linux_async_pipe (0);
5659 /* Stop an LWP, and push a TARGET_SIGNAL_0 stop status if no other
5663 linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
5667 ptid_t ptid = lwp->ptid;
5669 if (debug_linux_nat)
5670 fprintf_unfiltered (gdb_stdlog,
5671 "LNSL: running -> suspending %s\n",
5672 target_pid_to_str (lwp->ptid));
5675 if (lwp->last_resume_kind == resume_stop)
5677 if (debug_linux_nat)
5678 fprintf_unfiltered (gdb_stdlog,
5679 "linux-nat: already stopping LWP %ld at "
5681 ptid_get_lwp (lwp->ptid));
5685 stop_callback (lwp, NULL);
5686 lwp->last_resume_kind = resume_stop;
5690 /* Already known to be stopped; do nothing. */
5692 if (debug_linux_nat)
5694 if (find_thread_ptid (lwp->ptid)->stop_requested)
5695 fprintf_unfiltered (gdb_stdlog,
5696 "LNSL: already stopped/stop_requested %s\n",
5697 target_pid_to_str (lwp->ptid));
5699 fprintf_unfiltered (gdb_stdlog,
5700 "LNSL: already stopped/no "
5701 "stop_requested yet %s\n",
5702 target_pid_to_str (lwp->ptid));
5709 linux_nat_stop (ptid_t ptid)
5712 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
5714 linux_ops->to_stop (ptid);
5718 linux_nat_close (int quitting)
5720 /* Unregister from the event loop. */
5721 if (target_is_async_p ())
5722 target_async (NULL, 0);
5724 if (linux_ops->to_close)
5725 linux_ops->to_close (quitting);
5728 /* When requests are passed down from the linux-nat layer to the
5729 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
5730 used. The address space pointer is stored in the inferior object,
5731 but the common code that is passed such ptid can't tell whether
5732 lwpid is a "main" process id or not (it assumes so). We reverse
5733 look up the "main" process id from the lwp here. */
5735 struct address_space *
5736 linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
5738 struct lwp_info *lwp;
5739 struct inferior *inf;
5742 pid = GET_LWP (ptid);
5743 if (GET_LWP (ptid) == 0)
5745 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
5747 lwp = find_lwp_pid (ptid);
5748 pid = GET_PID (lwp->ptid);
5752 /* A (pid,lwpid,0) ptid. */
5753 pid = GET_PID (ptid);
5756 inf = find_inferior_pid (pid);
5757 gdb_assert (inf != NULL);
5762 linux_nat_core_of_thread_1 (ptid_t ptid)
5764 struct cleanup *back_to;
5767 char *content = NULL;
5770 int content_read = 0;
5774 filename = xstrprintf ("/proc/%d/task/%ld/stat",
5775 GET_PID (ptid), GET_LWP (ptid));
5776 back_to = make_cleanup (xfree, filename);
5778 f = fopen (filename, "r");
5781 do_cleanups (back_to);
5785 make_cleanup_fclose (f);
5791 content = xrealloc (content, content_read + 1024);
5792 n = fread (content + content_read, 1, 1024, f);
5796 content[content_read] = '\0';
5801 make_cleanup (xfree, content);
5803 p = strchr (content, '(');
5807 p = strchr (p, ')');
5811 /* If the first field after program name has index 0, then core number is
5812 the field with index 36. There's no constant for that anywhere. */
5814 p = strtok_r (p, " ", &ts);
5815 for (i = 0; p != NULL && i != 36; ++i)
5816 p = strtok_r (NULL, " ", &ts);
5818 if (p == NULL || sscanf (p, "%d", &core) == 0)
5821 do_cleanups (back_to);
5826 /* Return the cached value of the processor core for thread PTID. */
5829 linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
5831 struct lwp_info *info = find_lwp_pid (ptid);
5839 linux_nat_add_target (struct target_ops *t)
5841 /* Save the provided single-threaded target. We save this in a separate
5842 variable because another target we've inherited from (e.g. inf-ptrace)
5843 may have saved a pointer to T; we want to use it for the final
5844 process stratum target. */
5845 linux_ops_saved = *t;
5846 linux_ops = &linux_ops_saved;
5848 /* Override some methods for multithreading. */
5849 t->to_create_inferior = linux_nat_create_inferior;
5850 t->to_attach = linux_nat_attach;
5851 t->to_detach = linux_nat_detach;
5852 t->to_resume = linux_nat_resume;
5853 t->to_wait = linux_nat_wait;
5854 t->to_pass_signals = linux_nat_pass_signals;
5855 t->to_xfer_partial = linux_nat_xfer_partial;
5856 t->to_kill = linux_nat_kill;
5857 t->to_mourn_inferior = linux_nat_mourn_inferior;
5858 t->to_thread_alive = linux_nat_thread_alive;
5859 t->to_pid_to_str = linux_nat_pid_to_str;
5860 t->to_thread_name = linux_nat_thread_name;
5861 t->to_has_thread_control = tc_schedlock;
5862 t->to_thread_address_space = linux_nat_thread_address_space;
5863 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
5864 t->to_stopped_data_address = linux_nat_stopped_data_address;
5866 t->to_can_async_p = linux_nat_can_async_p;
5867 t->to_is_async_p = linux_nat_is_async_p;
5868 t->to_supports_non_stop = linux_nat_supports_non_stop;
5869 t->to_async = linux_nat_async;
5870 t->to_terminal_inferior = linux_nat_terminal_inferior;
5871 t->to_terminal_ours = linux_nat_terminal_ours;
5872 t->to_close = linux_nat_close;
5874 /* Methods for non-stop support. */
5875 t->to_stop = linux_nat_stop;
5877 t->to_supports_multi_process = linux_nat_supports_multi_process;
5879 t->to_supports_disable_randomization
5880 = linux_nat_supports_disable_randomization;
5882 t->to_core_of_thread = linux_nat_core_of_thread;
5884 /* We don't change the stratum; this target will sit at
5885 process_stratum and thread_db will set at thread_stratum. This
5886 is a little strange, since this is a multi-threaded-capable
5887 target, but we want to be on the stack below thread_db, and we
5888 also want to be used for single-threaded processes. */
5893 /* Register a method to call whenever a new thread is attached. */
5895 linux_nat_set_new_thread (struct target_ops *t,
5896 void (*new_thread) (struct lwp_info *))
5898 /* Save the pointer. We only support a single registered instance
5899 of the GNU/Linux native target, so we do not need to map this to
5901 linux_nat_new_thread = new_thread;
5904 /* Register a method that converts a siginfo object between the layout
5905 that ptrace returns, and the layout in the architecture of the
5908 linux_nat_set_siginfo_fixup (struct target_ops *t,
5909 int (*siginfo_fixup) (struct siginfo *,
5913 /* Save the pointer. */
5914 linux_nat_siginfo_fixup = siginfo_fixup;
5917 /* Register a method to call prior to resuming a thread. */
5920 linux_nat_set_prepare_to_resume (struct target_ops *t,
5921 void (*prepare_to_resume) (struct lwp_info *))
5923 /* Save the pointer. */
5924 linux_nat_prepare_to_resume = prepare_to_resume;
5927 /* Return the saved siginfo associated with PTID. */
5929 linux_nat_get_siginfo (ptid_t ptid)
5931 struct lwp_info *lp = find_lwp_pid (ptid);
5933 gdb_assert (lp != NULL);
5935 return &lp->siginfo;
5938 /* Provide a prototype to silence -Wmissing-prototypes. */
5939 extern initialize_file_ftype _initialize_linux_nat;
5942 _initialize_linux_nat (void)
5944 static struct cmd_list_element *info_proc_cmdlist;
5946 add_prefix_cmd ("proc", class_info, linux_nat_info_proc_cmd,
5948 Show /proc process information about any running process.\n\
5949 Specify any process id, or use the program being debugged by default."),
5950 &info_proc_cmdlist, "info proc ",
5951 1/*allow-unknown*/, &infolist);
5953 add_cmd ("mappings", class_info, linux_nat_info_proc_cmd_mappings, _("\
5954 List of mapped memory regions."),
5955 &info_proc_cmdlist);
5957 add_cmd ("stat", class_info, linux_nat_info_proc_cmd_stat, _("\
5958 List process info from /proc/PID/stat."),
5959 &info_proc_cmdlist);
5961 add_cmd ("status", class_info, linux_nat_info_proc_cmd_status, _("\
5962 List process info from /proc/PID/status."),
5963 &info_proc_cmdlist);
5965 add_cmd ("cwd", class_info, linux_nat_info_proc_cmd_cwd, _("\
5966 List current working directory of the process."),
5967 &info_proc_cmdlist);
5969 add_cmd ("cmdline", class_info, linux_nat_info_proc_cmd_cmdline, _("\
5970 List command line arguments of the process."),
5971 &info_proc_cmdlist);
5973 add_cmd ("exe", class_info, linux_nat_info_proc_cmd_exe, _("\
5974 List absolute filename for executable of the process."),
5975 &info_proc_cmdlist);
5977 add_cmd ("all", class_info, linux_nat_info_proc_cmd_all, _("\
5978 List all available /proc info."),
5979 &info_proc_cmdlist);
5981 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance,
5982 &debug_linux_nat, _("\
5983 Set debugging of GNU/Linux lwp module."), _("\
5984 Show debugging of GNU/Linux lwp module."), _("\
5985 Enables printf debugging output."),
5987 show_debug_linux_nat,
5988 &setdebuglist, &showdebuglist);
5990 /* Save this mask as the default. */
5991 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
5993 /* Install a SIGCHLD handler. */
5994 sigchld_action.sa_handler = sigchld_handler;
5995 sigemptyset (&sigchld_action.sa_mask);
5996 sigchld_action.sa_flags = SA_RESTART;
5998 /* Make it the default. */
5999 sigaction (SIGCHLD, &sigchld_action, NULL);
6001 /* Make sure we don't block SIGCHLD during a sigsuspend. */
6002 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
6003 sigdelset (&suspend_mask, SIGCHLD);
6005 sigemptyset (&blocked_mask);
6009 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
6010 the GNU/Linux Threads library and therefore doesn't really belong
6013 /* Read variable NAME in the target and return its value if found.
6014 Otherwise return zero. It is assumed that the type of the variable
6018 get_signo (const char *name)
6020 struct minimal_symbol *ms;
6023 ms = lookup_minimal_symbol (name, NULL, NULL);
6027 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
6028 sizeof (signo)) != 0)
6034 /* Return the set of signals used by the threads library in *SET. */
6037 lin_thread_get_thread_signals (sigset_t *set)
6039 struct sigaction action;
6040 int restart, cancel;
6042 sigemptyset (&blocked_mask);
6045 restart = get_signo ("__pthread_sig_restart");
6046 cancel = get_signo ("__pthread_sig_cancel");
6048 /* LinuxThreads normally uses the first two RT signals, but in some legacy
6049 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
6050 not provide any way for the debugger to query the signal numbers -
6051 fortunately they don't change! */
6054 restart = __SIGRTMIN;
6057 cancel = __SIGRTMIN + 1;
6059 sigaddset (set, restart);
6060 sigaddset (set, cancel);
6062 /* The GNU/Linux Threads library makes terminating threads send a
6063 special "cancel" signal instead of SIGCHLD. Make sure we catch
6064 those (to prevent them from terminating GDB itself, which is
6065 likely to be their default action) and treat them the same way as
6068 action.sa_handler = sigchld_handler;
6069 sigemptyset (&action.sa_mask);
6070 action.sa_flags = SA_RESTART;
6071 sigaction (cancel, &action, NULL);
6073 /* We block the "cancel" signal throughout this code ... */
6074 sigaddset (&blocked_mask, cancel);
6075 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
6077 /* ... except during a sigsuspend. */
6078 sigdelset (&suspend_mask, cancel);