1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
4 This file is part of GDB.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
27 #include <sys/ptrace.h>
28 #include "nat/linux-ptrace.h"
29 #include "nat/linux-procfs.h"
30 #include "nat/linux-personality.h"
32 #include <sys/ioctl.h>
35 #include <sys/syscall.h>
39 #include <sys/types.h>
44 #include "filestuff.h"
45 #include "tracepoint.h"
48 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
49 then ELFMAG0 will have been defined. If it didn't get included by
50 gdb_proc_service.h then including it will likely introduce a duplicate
51 definition of elf_fpregset_t. */
56 #define SPUFS_MAGIC 0x23c9b64e
59 #ifdef HAVE_PERSONALITY
60 # include <sys/personality.h>
61 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
62 # define ADDR_NO_RANDOMIZE 0x0040000
71 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
74 /* This is the kernel's hard limit. Not to be confused with
80 /* Some targets did not define these ptrace constants from the start,
81 so gdbserver defines them locally here. In the future, these may
82 be removed after they are added to asm/ptrace.h. */
83 #if !(defined(PT_TEXT_ADDR) \
84 || defined(PT_DATA_ADDR) \
85 || defined(PT_TEXT_END_ADDR))
86 #if defined(__mcoldfire__)
87 /* These are still undefined in 3.10 kernels. */
88 #define PT_TEXT_ADDR 49*4
89 #define PT_DATA_ADDR 50*4
90 #define PT_TEXT_END_ADDR 51*4
91 /* BFIN already defines these since at least 2.6.32 kernels. */
93 #define PT_TEXT_ADDR 220
94 #define PT_TEXT_END_ADDR 224
95 #define PT_DATA_ADDR 228
96 /* These are still undefined in 3.10 kernels. */
97 #elif defined(__TMS320C6X__)
98 #define PT_TEXT_ADDR (0x10000*4)
99 #define PT_DATA_ADDR (0x10004*4)
100 #define PT_TEXT_END_ADDR (0x10008*4)
104 #ifdef HAVE_LINUX_BTRACE
105 # include "nat/linux-btrace.h"
106 # include "btrace-common.h"
109 #ifndef HAVE_ELF32_AUXV_T
110 /* Copied from glibc's elf.h. */
113 uint32_t a_type; /* Entry type */
116 uint32_t a_val; /* Integer value */
117 /* We use to have pointer elements added here. We cannot do that,
118 though, since it does not work when using 32-bit definitions
119 on 64-bit platforms and vice versa. */
124 #ifndef HAVE_ELF64_AUXV_T
125 /* Copied from glibc's elf.h. */
128 uint64_t a_type; /* Entry type */
131 uint64_t a_val; /* Integer value */
132 /* We use to have pointer elements added here. We cannot do that,
133 though, since it does not work when using 32-bit definitions
134 on 64-bit platforms and vice versa. */
141 /* See nat/linux-nat.h. */
144 ptid_of_lwp (struct lwp_info *lwp)
146 return ptid_of (get_lwp_thread (lwp));
149 /* See nat/linux-nat.h. */
152 lwp_set_arch_private_info (struct lwp_info *lwp,
153 struct arch_lwp_info *info)
155 lwp->arch_private = info;
158 /* See nat/linux-nat.h. */
160 struct arch_lwp_info *
161 lwp_arch_private_info (struct lwp_info *lwp)
163 return lwp->arch_private;
166 /* See nat/linux-nat.h. */
169 lwp_is_stopped (struct lwp_info *lwp)
174 /* See nat/linux-nat.h. */
176 enum target_stop_reason
177 lwp_stop_reason (struct lwp_info *lwp)
179 return lwp->stop_reason;
182 /* A list of all unknown processes which receive stop signals. Some
183 other process will presumably claim each of these as forked
184 children momentarily. */
186 struct simple_pid_list
188 /* The process ID. */
191 /* The status as reported by waitpid. */
195 struct simple_pid_list *next;
197 struct simple_pid_list *stopped_pids;
199 /* Trivial list manipulation functions to keep track of a list of new
200 stopped processes. */
203 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
205 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
208 new_pid->status = status;
209 new_pid->next = *listp;
214 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
216 struct simple_pid_list **p;
218 for (p = listp; *p != NULL; p = &(*p)->next)
219 if ((*p)->pid == pid)
221 struct simple_pid_list *next = (*p)->next;
223 *statusp = (*p)->status;
231 enum stopping_threads_kind
233 /* Not stopping threads presently. */
234 NOT_STOPPING_THREADS,
236 /* Stopping threads. */
239 /* Stopping and suspending threads. */
240 STOPPING_AND_SUSPENDING_THREADS
243 /* This is set while stop_all_lwps is in effect. */
244 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
246 /* FIXME make into a target method? */
247 int using_threads = 1;
249 /* True if we're presently stabilizing threads (moving them out of
251 static int stabilizing_threads;
253 static void linux_resume_one_lwp (struct lwp_info *lwp,
254 int step, int signal, siginfo_t *info);
255 static void linux_resume (struct thread_resume *resume_info, size_t n);
256 static void stop_all_lwps (int suspend, struct lwp_info *except);
257 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
258 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
259 int *wstat, int options);
260 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
261 static struct lwp_info *add_lwp (ptid_t ptid);
262 static int linux_stopped_by_watchpoint (void);
263 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
264 static void proceed_all_lwps (void);
265 static int finish_step_over (struct lwp_info *lwp);
266 static int kill_lwp (unsigned long lwpid, int signo);
268 /* When the event-loop is doing a step-over, this points at the thread
270 ptid_t step_over_bkpt;
272 /* True if the low target can hardware single-step. Such targets
273 don't need a BREAKPOINT_REINSERT_ADDR callback. */
276 can_hardware_single_step (void)
278 return (the_low_target.breakpoint_reinsert_addr == NULL);
281 /* True if the low target supports memory breakpoints. If so, we'll
282 have a GET_PC implementation. */
285 supports_breakpoints (void)
287 return (the_low_target.get_pc != NULL);
290 /* Returns true if this target can support fast tracepoints. This
291 does not mean that the in-process agent has been loaded in the
295 supports_fast_tracepoints (void)
297 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
300 /* True if LWP is stopped in its stepping range. */
303 lwp_in_step_range (struct lwp_info *lwp)
305 CORE_ADDR pc = lwp->stop_pc;
307 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
310 struct pending_signals
314 struct pending_signals *prev;
317 /* The read/write ends of the pipe registered as waitable file in the
319 static int linux_event_pipe[2] = { -1, -1 };
321 /* True if we're currently in async mode. */
322 #define target_is_async_p() (linux_event_pipe[0] != -1)
324 static void send_sigstop (struct lwp_info *lwp);
325 static void wait_for_sigstop (void);
327 /* Return non-zero if HEADER is a 64-bit ELF file. */
330 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
332 if (header->e_ident[EI_MAG0] == ELFMAG0
333 && header->e_ident[EI_MAG1] == ELFMAG1
334 && header->e_ident[EI_MAG2] == ELFMAG2
335 && header->e_ident[EI_MAG3] == ELFMAG3)
337 *machine = header->e_machine;
338 return header->e_ident[EI_CLASS] == ELFCLASS64;
345 /* Return non-zero if FILE is a 64-bit ELF file,
346 zero if the file is not a 64-bit ELF file,
347 and -1 if the file is not accessible or doesn't exist. */
350 elf_64_file_p (const char *file, unsigned int *machine)
355 fd = open (file, O_RDONLY);
359 if (read (fd, &header, sizeof (header)) != sizeof (header))
366 return elf_64_header_p (&header, machine);
369 /* Accepts an integer PID; Returns true if the executable PID is
370 running is a 64-bit ELF file.. */
373 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
377 sprintf (file, "/proc/%d/exe", pid);
378 return elf_64_file_p (file, machine);
382 delete_lwp (struct lwp_info *lwp)
384 struct thread_info *thr = get_lwp_thread (lwp);
387 debug_printf ("deleting %ld\n", lwpid_of (thr));
390 free (lwp->arch_private);
394 /* Add a process to the common process list, and set its private
397 static struct process_info *
398 linux_add_process (int pid, int attached)
400 struct process_info *proc;
402 proc = add_process (pid, attached);
403 proc->priv = xcalloc (1, sizeof (*proc->priv));
405 /* Set the arch when the first LWP stops. */
406 proc->priv->new_inferior = 1;
408 if (the_low_target.new_process != NULL)
409 proc->priv->arch_private = the_low_target.new_process ();
414 static CORE_ADDR get_pc (struct lwp_info *lwp);
416 /* Handle a GNU/Linux extended wait response. If we see a clone
417 event, we need to add the new LWP to our list (and not report the
418 trap to higher layers). */
421 handle_extended_wait (struct lwp_info *event_child, int wstat)
423 int event = linux_ptrace_get_extended_event (wstat);
424 struct thread_info *event_thr = get_lwp_thread (event_child);
425 struct lwp_info *new_lwp;
427 if (event == PTRACE_EVENT_CLONE)
430 unsigned long new_pid;
433 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
436 /* If we haven't already seen the new PID stop, wait for it now. */
437 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
439 /* The new child has a pending SIGSTOP. We can't affect it until it
440 hits the SIGSTOP, but we're already attached. */
442 ret = my_waitpid (new_pid, &status, __WALL);
445 perror_with_name ("waiting for new child");
446 else if (ret != new_pid)
447 warning ("wait returned unexpected PID %d", ret);
448 else if (!WIFSTOPPED (status))
449 warning ("wait returned unexpected status 0x%x", status);
453 debug_printf ("HEW: Got clone event "
454 "from LWP %ld, new child is LWP %ld\n",
455 lwpid_of (event_thr), new_pid);
457 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
458 new_lwp = add_lwp (ptid);
460 /* Either we're going to immediately resume the new thread
461 or leave it stopped. linux_resume_one_lwp is a nop if it
462 thinks the thread is currently running, so set this first
463 before calling linux_resume_one_lwp. */
464 new_lwp->stopped = 1;
466 /* If we're suspending all threads, leave this one suspended
468 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
469 new_lwp->suspended = 1;
471 /* Normally we will get the pending SIGSTOP. But in some cases
472 we might get another signal delivered to the group first.
473 If we do get another signal, be sure not to lose it. */
474 if (WSTOPSIG (status) != SIGSTOP)
476 new_lwp->stop_expected = 1;
477 new_lwp->status_pending_p = 1;
478 new_lwp->status_pending = status;
483 /* Return the PC as read from the regcache of LWP, without any
487 get_pc (struct lwp_info *lwp)
489 struct thread_info *saved_thread;
490 struct regcache *regcache;
493 if (the_low_target.get_pc == NULL)
496 saved_thread = current_thread;
497 current_thread = get_lwp_thread (lwp);
499 regcache = get_thread_regcache (current_thread, 1);
500 pc = (*the_low_target.get_pc) (regcache);
503 debug_printf ("pc is 0x%lx\n", (long) pc);
505 current_thread = saved_thread;
509 /* This function should only be called if LWP got a SIGTRAP.
510 The SIGTRAP could mean several things.
512 On i386, where decr_pc_after_break is non-zero:
514 If we were single-stepping this process using PTRACE_SINGLESTEP, we
515 will get only the one SIGTRAP. The value of $eip will be the next
516 instruction. If the instruction we stepped over was a breakpoint,
517 we need to decrement the PC.
519 If we continue the process using PTRACE_CONT, we will get a
520 SIGTRAP when we hit a breakpoint. The value of $eip will be
521 the instruction after the breakpoint (i.e. needs to be
522 decremented). If we report the SIGTRAP to GDB, we must also
523 report the undecremented PC. If the breakpoint is removed, we
524 must resume at the decremented PC.
526 On a non-decr_pc_after_break machine with hardware or kernel
529 If we either single-step a breakpoint instruction, or continue and
530 hit a breakpoint instruction, our PC will point at the breakpoint
534 check_stopped_by_breakpoint (struct lwp_info *lwp)
537 CORE_ADDR sw_breakpoint_pc;
538 struct thread_info *saved_thread;
539 #if USE_SIGTRAP_SIGINFO
543 if (the_low_target.get_pc == NULL)
547 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
549 /* breakpoint_at reads from the current thread. */
550 saved_thread = current_thread;
551 current_thread = get_lwp_thread (lwp);
553 #if USE_SIGTRAP_SIGINFO
554 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
555 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
557 if (siginfo.si_signo == SIGTRAP)
559 if (siginfo.si_code == GDB_ARCH_TRAP_BRKPT)
563 struct thread_info *thr = get_lwp_thread (lwp);
565 debug_printf ("CSBB: Push back software breakpoint for %s\n",
566 target_pid_to_str (ptid_of (thr)));
569 /* Back up the PC if necessary. */
570 if (pc != sw_breakpoint_pc)
572 struct regcache *regcache
573 = get_thread_regcache (current_thread, 1);
574 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
577 lwp->stop_pc = sw_breakpoint_pc;
578 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
579 current_thread = saved_thread;
582 else if (siginfo.si_code == TRAP_HWBKPT)
586 struct thread_info *thr = get_lwp_thread (lwp);
588 debug_printf ("CSBB: Push back hardware "
589 "breakpoint/watchpoint for %s\n",
590 target_pid_to_str (ptid_of (thr)));
594 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
595 current_thread = saved_thread;
601 /* We may have just stepped a breakpoint instruction. E.g., in
602 non-stop mode, GDB first tells the thread A to step a range, and
603 then the user inserts a breakpoint inside the range. In that
604 case we need to report the breakpoint PC. */
605 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
606 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
610 struct thread_info *thr = get_lwp_thread (lwp);
612 debug_printf ("CSBB: %s stopped by software breakpoint\n",
613 target_pid_to_str (ptid_of (thr)));
616 /* Back up the PC if necessary. */
617 if (pc != sw_breakpoint_pc)
619 struct regcache *regcache
620 = get_thread_regcache (current_thread, 1);
621 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
624 lwp->stop_pc = sw_breakpoint_pc;
625 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
626 current_thread = saved_thread;
630 if (hardware_breakpoint_inserted_here (pc))
634 struct thread_info *thr = get_lwp_thread (lwp);
636 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
637 target_pid_to_str (ptid_of (thr)));
641 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
642 current_thread = saved_thread;
647 current_thread = saved_thread;
651 static struct lwp_info *
652 add_lwp (ptid_t ptid)
654 struct lwp_info *lwp;
656 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
657 memset (lwp, 0, sizeof (*lwp));
659 if (the_low_target.new_thread != NULL)
660 the_low_target.new_thread (lwp);
662 lwp->thread = add_thread (ptid, lwp);
667 /* Start an inferior process and returns its pid.
668 ALLARGS is a vector of program-name and args. */
671 linux_create_inferior (char *program, char **allargs)
673 struct lwp_info *new_lwp;
676 struct cleanup *restore_personality
677 = maybe_disable_address_space_randomization (disable_randomization);
679 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
685 perror_with_name ("fork");
690 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
692 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
693 signal (__SIGRTMIN + 1, SIG_DFL);
698 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
699 stdout to stderr so that inferior i/o doesn't corrupt the connection.
700 Also, redirect stdin to /dev/null. */
701 if (remote_connection_is_stdio ())
704 open ("/dev/null", O_RDONLY);
706 if (write (2, "stdin/stdout redirected\n",
707 sizeof ("stdin/stdout redirected\n") - 1) < 0)
709 /* Errors ignored. */;
713 execv (program, allargs);
715 execvp (program, allargs);
717 fprintf (stderr, "Cannot exec %s: %s.\n", program,
723 do_cleanups (restore_personality);
725 linux_add_process (pid, 0);
727 ptid = ptid_build (pid, pid, 0);
728 new_lwp = add_lwp (ptid);
729 new_lwp->must_set_ptrace_flags = 1;
734 /* Attach to an inferior process. Returns 0 on success, ERRNO on
738 linux_attach_lwp (ptid_t ptid)
740 struct lwp_info *new_lwp;
741 int lwpid = ptid_get_lwp (ptid);
743 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
747 new_lwp = add_lwp (ptid);
749 /* We need to wait for SIGSTOP before being able to make the next
750 ptrace call on this LWP. */
751 new_lwp->must_set_ptrace_flags = 1;
753 if (linux_proc_pid_is_stopped (lwpid))
756 debug_printf ("Attached to a stopped process\n");
758 /* The process is definitely stopped. It is in a job control
759 stop, unless the kernel predates the TASK_STOPPED /
760 TASK_TRACED distinction, in which case it might be in a
761 ptrace stop. Make sure it is in a ptrace stop; from there we
762 can kill it, signal it, et cetera.
764 First make sure there is a pending SIGSTOP. Since we are
765 already attached, the process can not transition from stopped
766 to running without a PTRACE_CONT; so we know this signal will
767 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
768 probably already in the queue (unless this kernel is old
769 enough to use TASK_STOPPED for ptrace stops); but since
770 SIGSTOP is not an RT signal, it can only be queued once. */
771 kill_lwp (lwpid, SIGSTOP);
773 /* Finally, resume the stopped process. This will deliver the
774 SIGSTOP (or a higher priority signal, just like normal
775 PTRACE_ATTACH), which we'll catch later on. */
776 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
779 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
782 There are several cases to consider here:
784 1) gdbserver has already attached to the process and is being notified
785 of a new thread that is being created.
786 In this case we should ignore that SIGSTOP and resume the
787 process. This is handled below by setting stop_expected = 1,
788 and the fact that add_thread sets last_resume_kind ==
791 2) This is the first thread (the process thread), and we're attaching
792 to it via attach_inferior.
793 In this case we want the process thread to stop.
794 This is handled by having linux_attach set last_resume_kind ==
795 resume_stop after we return.
797 If the pid we are attaching to is also the tgid, we attach to and
798 stop all the existing threads. Otherwise, we attach to pid and
799 ignore any other threads in the same group as this pid.
801 3) GDB is connecting to gdbserver and is requesting an enumeration of all
803 In this case we want the thread to stop.
804 FIXME: This case is currently not properly handled.
805 We should wait for the SIGSTOP but don't. Things work apparently
806 because enough time passes between when we ptrace (ATTACH) and when
807 gdb makes the next ptrace call on the thread.
809 On the other hand, if we are currently trying to stop all threads, we
810 should treat the new thread as if we had sent it a SIGSTOP. This works
811 because we are guaranteed that the add_lwp call above added us to the
812 end of the list, and so the new thread has not yet reached
813 wait_for_sigstop (but will). */
814 new_lwp->stop_expected = 1;
819 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
820 already attached. Returns true if a new LWP is found, false
824 attach_proc_task_lwp_callback (ptid_t ptid)
826 /* Is this a new thread? */
827 if (find_thread_ptid (ptid) == NULL)
829 int lwpid = ptid_get_lwp (ptid);
833 debug_printf ("Found new lwp %d\n", lwpid);
835 err = linux_attach_lwp (ptid);
837 /* Be quiet if we simply raced with the thread exiting. EPERM
838 is returned if the thread's task still exists, and is marked
839 as exited or zombie, as well as other conditions, so in that
840 case, confirm the status in /proc/PID/status. */
842 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
846 debug_printf ("Cannot attach to lwp %d: "
847 "thread is gone (%d: %s)\n",
848 lwpid, err, strerror (err));
853 warning (_("Cannot attach to lwp %d: %s"),
855 linux_ptrace_attach_fail_reason_string (ptid, err));
863 /* Attach to PID. If PID is the tgid, attach to it and all
867 linux_attach (unsigned long pid)
869 ptid_t ptid = ptid_build (pid, pid, 0);
872 /* Attach to PID. We will check for other threads
874 err = linux_attach_lwp (ptid);
876 error ("Cannot attach to process %ld: %s",
877 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
879 linux_add_process (pid, 1);
883 struct thread_info *thread;
885 /* Don't ignore the initial SIGSTOP if we just attached to this
886 process. It will be collected by wait shortly. */
887 thread = find_thread_ptid (ptid_build (pid, pid, 0));
888 thread->last_resume_kind = resume_stop;
891 /* We must attach to every LWP. If /proc is mounted, use that to
892 find them now. On the one hand, the inferior may be using raw
893 clone instead of using pthreads. On the other hand, even if it
894 is using pthreads, GDB may not be connected yet (thread_db needs
895 to do symbol lookups, through qSymbol). Also, thread_db walks
896 structures in the inferior's address space to find the list of
897 threads/LWPs, and those structures may well be corrupted. Note
898 that once thread_db is loaded, we'll still use it to list threads
899 and associate pthread info with each LWP. */
900 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
911 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
913 struct counter *counter = args;
915 if (ptid_get_pid (entry->id) == counter->pid)
917 if (++counter->count > 1)
925 last_thread_of_process_p (int pid)
927 struct counter counter = { pid , 0 };
929 return (find_inferior (&all_threads,
930 second_thread_of_pid_p, &counter) == NULL);
936 linux_kill_one_lwp (struct lwp_info *lwp)
938 struct thread_info *thr = get_lwp_thread (lwp);
939 int pid = lwpid_of (thr);
941 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
942 there is no signal context, and ptrace(PTRACE_KILL) (or
943 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
944 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
945 alternative is to kill with SIGKILL. We only need one SIGKILL
946 per process, not one for each thread. But since we still support
947 linuxthreads, and we also support debugging programs using raw
948 clone without CLONE_THREAD, we send one for each thread. For
949 years, we used PTRACE_KILL only, so we're being a bit paranoid
950 about some old kernels where PTRACE_KILL might work better
951 (dubious if there are any such, but that's why it's paranoia), so
952 we try SIGKILL first, PTRACE_KILL second, and so we're fine
956 kill_lwp (pid, SIGKILL);
959 int save_errno = errno;
961 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
962 target_pid_to_str (ptid_of (thr)),
963 save_errno ? strerror (save_errno) : "OK");
967 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
970 int save_errno = errno;
972 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
973 target_pid_to_str (ptid_of (thr)),
974 save_errno ? strerror (save_errno) : "OK");
978 /* Kill LWP and wait for it to die. */
981 kill_wait_lwp (struct lwp_info *lwp)
983 struct thread_info *thr = get_lwp_thread (lwp);
984 int pid = ptid_get_pid (ptid_of (thr));
985 int lwpid = ptid_get_lwp (ptid_of (thr));
990 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
994 linux_kill_one_lwp (lwp);
996 /* Make sure it died. Notes:
998 - The loop is most likely unnecessary.
1000 - We don't use linux_wait_for_event as that could delete lwps
1001 while we're iterating over them. We're not interested in
1002 any pending status at this point, only in making sure all
1003 wait status on the kernel side are collected until the
1006 - We don't use __WALL here as the __WALL emulation relies on
1007 SIGCHLD, and killing a stopped process doesn't generate
1008 one, nor an exit status.
1010 res = my_waitpid (lwpid, &wstat, 0);
1011 if (res == -1 && errno == ECHILD)
1012 res = my_waitpid (lwpid, &wstat, __WCLONE);
1013 } while (res > 0 && WIFSTOPPED (wstat));
1015 gdb_assert (res > 0);
1018 /* Callback for `find_inferior'. Kills an lwp of a given process,
1019 except the leader. */
1022 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1024 struct thread_info *thread = (struct thread_info *) entry;
1025 struct lwp_info *lwp = get_thread_lwp (thread);
1026 int pid = * (int *) args;
1028 if (ptid_get_pid (entry->id) != pid)
1031 /* We avoid killing the first thread here, because of a Linux kernel (at
1032 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1033 the children get a chance to be reaped, it will remain a zombie
1036 if (lwpid_of (thread) == pid)
1039 debug_printf ("lkop: is last of process %s\n",
1040 target_pid_to_str (entry->id));
1044 kill_wait_lwp (lwp);
1049 linux_kill (int pid)
1051 struct process_info *process;
1052 struct lwp_info *lwp;
1054 process = find_process_pid (pid);
1055 if (process == NULL)
1058 /* If we're killing a running inferior, make sure it is stopped
1059 first, as PTRACE_KILL will not work otherwise. */
1060 stop_all_lwps (0, NULL);
1062 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1064 /* See the comment in linux_kill_one_lwp. We did not kill the first
1065 thread in the list, so do so now. */
1066 lwp = find_lwp_pid (pid_to_ptid (pid));
1071 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1075 kill_wait_lwp (lwp);
1077 the_target->mourn (process);
1079 /* Since we presently can only stop all lwps of all processes, we
1080 need to unstop lwps of other processes. */
1081 unstop_all_lwps (0, NULL);
1085 /* Get pending signal of THREAD, for detaching purposes. This is the
1086 signal the thread last stopped for, which we need to deliver to the
1087 thread when detaching, otherwise, it'd be suppressed/lost. */
1090 get_detach_signal (struct thread_info *thread)
1092 enum gdb_signal signo = GDB_SIGNAL_0;
1094 struct lwp_info *lp = get_thread_lwp (thread);
1096 if (lp->status_pending_p)
1097 status = lp->status_pending;
1100 /* If the thread had been suspended by gdbserver, and it stopped
1101 cleanly, then it'll have stopped with SIGSTOP. But we don't
1102 want to deliver that SIGSTOP. */
1103 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1104 || thread->last_status.value.sig == GDB_SIGNAL_0)
1107 /* Otherwise, we may need to deliver the signal we
1109 status = lp->last_status;
1112 if (!WIFSTOPPED (status))
1115 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1116 target_pid_to_str (ptid_of (thread)));
1120 /* Extended wait statuses aren't real SIGTRAPs. */
1121 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1124 debug_printf ("GPS: lwp %s had stopped with extended "
1125 "status: no pending signal\n",
1126 target_pid_to_str (ptid_of (thread)));
1130 signo = gdb_signal_from_host (WSTOPSIG (status));
1132 if (program_signals_p && !program_signals[signo])
1135 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1136 target_pid_to_str (ptid_of (thread)),
1137 gdb_signal_to_string (signo));
1140 else if (!program_signals_p
1141 /* If we have no way to know which signals GDB does not
1142 want to have passed to the program, assume
1143 SIGTRAP/SIGINT, which is GDB's default. */
1144 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1147 debug_printf ("GPS: lwp %s had signal %s, "
1148 "but we don't know if we should pass it. "
1149 "Default to not.\n",
1150 target_pid_to_str (ptid_of (thread)),
1151 gdb_signal_to_string (signo));
1157 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1158 target_pid_to_str (ptid_of (thread)),
1159 gdb_signal_to_string (signo));
1161 return WSTOPSIG (status);
1166 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1168 struct thread_info *thread = (struct thread_info *) entry;
1169 struct lwp_info *lwp = get_thread_lwp (thread);
1170 int pid = * (int *) args;
1173 if (ptid_get_pid (entry->id) != pid)
1176 /* If there is a pending SIGSTOP, get rid of it. */
1177 if (lwp->stop_expected)
1180 debug_printf ("Sending SIGCONT to %s\n",
1181 target_pid_to_str (ptid_of (thread)));
1183 kill_lwp (lwpid_of (thread), SIGCONT);
1184 lwp->stop_expected = 0;
1187 /* Flush any pending changes to the process's registers. */
1188 regcache_invalidate_thread (thread);
1190 /* Pass on any pending signal for this thread. */
1191 sig = get_detach_signal (thread);
1193 /* Finally, let it resume. */
1194 if (the_low_target.prepare_to_resume != NULL)
1195 the_low_target.prepare_to_resume (lwp);
1196 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1197 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1198 error (_("Can't detach %s: %s"),
1199 target_pid_to_str (ptid_of (thread)),
1207 linux_detach (int pid)
1209 struct process_info *process;
1211 process = find_process_pid (pid);
1212 if (process == NULL)
1215 /* Stop all threads before detaching. First, ptrace requires that
1216 the thread is stopped to sucessfully detach. Second, thread_db
1217 may need to uninstall thread event breakpoints from memory, which
1218 only works with a stopped process anyway. */
1219 stop_all_lwps (0, NULL);
1221 #ifdef USE_THREAD_DB
1222 thread_db_detach (process);
1225 /* Stabilize threads (move out of jump pads). */
1226 stabilize_threads ();
1228 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1230 the_target->mourn (process);
1232 /* Since we presently can only stop all lwps of all processes, we
1233 need to unstop lwps of other processes. */
1234 unstop_all_lwps (0, NULL);
1238 /* Remove all LWPs that belong to process PROC from the lwp list. */
1241 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1243 struct thread_info *thread = (struct thread_info *) entry;
1244 struct lwp_info *lwp = get_thread_lwp (thread);
1245 struct process_info *process = proc;
1247 if (pid_of (thread) == pid_of (process))
1254 linux_mourn (struct process_info *process)
1256 struct process_info_private *priv;
1258 #ifdef USE_THREAD_DB
1259 thread_db_mourn (process);
1262 find_inferior (&all_threads, delete_lwp_callback, process);
1264 /* Freeing all private data. */
1265 priv = process->priv;
1266 free (priv->arch_private);
1268 process->priv = NULL;
1270 remove_process (process);
1274 linux_join (int pid)
1279 ret = my_waitpid (pid, &status, 0);
1280 if (WIFEXITED (status) || WIFSIGNALED (status))
1282 } while (ret != -1 || errno != ECHILD);
1285 /* Return nonzero if the given thread is still alive. */
1287 linux_thread_alive (ptid_t ptid)
1289 struct lwp_info *lwp = find_lwp_pid (ptid);
1291 /* We assume we always know if a thread exits. If a whole process
1292 exited but we still haven't been able to report it to GDB, we'll
1293 hold on to the last lwp of the dead process. */
1300 /* Return 1 if this lwp still has an interesting status pending. If
1301 not (e.g., it had stopped for a breakpoint that is gone), return
1305 thread_still_has_status_pending_p (struct thread_info *thread)
1307 struct lwp_info *lp = get_thread_lwp (thread);
1309 if (!lp->status_pending_p)
1312 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1313 report any status pending the LWP may have. */
1314 if (thread->last_resume_kind == resume_stop
1315 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1318 if (thread->last_resume_kind != resume_stop
1319 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1320 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1322 struct thread_info *saved_thread;
1326 gdb_assert (lp->last_status != 0);
1330 saved_thread = current_thread;
1331 current_thread = thread;
1333 if (pc != lp->stop_pc)
1336 debug_printf ("PC of %ld changed\n",
1341 #if !USE_SIGTRAP_SIGINFO
1342 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1343 && !(*the_low_target.breakpoint_at) (pc))
1346 debug_printf ("previous SW breakpoint of %ld gone\n",
1350 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1351 && !hardware_breakpoint_inserted_here (pc))
1354 debug_printf ("previous HW breakpoint of %ld gone\n",
1360 current_thread = saved_thread;
1365 debug_printf ("discarding pending breakpoint status\n");
1366 lp->status_pending_p = 0;
1374 /* Return 1 if this lwp has an interesting status pending. */
1376 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1378 struct thread_info *thread = (struct thread_info *) entry;
1379 struct lwp_info *lp = get_thread_lwp (thread);
1380 ptid_t ptid = * (ptid_t *) arg;
1382 /* Check if we're only interested in events from a specific process
1383 or a specific LWP. */
1384 if (!ptid_match (ptid_of (thread), ptid))
1387 if (lp->status_pending_p
1388 && !thread_still_has_status_pending_p (thread))
1390 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1394 return lp->status_pending_p;
1398 same_lwp (struct inferior_list_entry *entry, void *data)
1400 ptid_t ptid = *(ptid_t *) data;
1403 if (ptid_get_lwp (ptid) != 0)
1404 lwp = ptid_get_lwp (ptid);
1406 lwp = ptid_get_pid (ptid);
1408 if (ptid_get_lwp (entry->id) == lwp)
1415 find_lwp_pid (ptid_t ptid)
1417 struct inferior_list_entry *thread
1418 = find_inferior (&all_threads, same_lwp, &ptid);
1423 return get_thread_lwp ((struct thread_info *) thread);
1426 /* Return the number of known LWPs in the tgid given by PID. */
1431 struct inferior_list_entry *inf, *tmp;
1434 ALL_INFERIORS (&all_threads, inf, tmp)
1436 if (ptid_get_pid (inf->id) == pid)
1443 /* The arguments passed to iterate_over_lwps. */
1445 struct iterate_over_lwps_args
1447 /* The FILTER argument passed to iterate_over_lwps. */
1450 /* The CALLBACK argument passed to iterate_over_lwps. */
1451 iterate_over_lwps_ftype *callback;
1453 /* The DATA argument passed to iterate_over_lwps. */
1457 /* Callback for find_inferior used by iterate_over_lwps to filter
1458 calls to the callback supplied to that function. Returning a
1459 nonzero value causes find_inferiors to stop iterating and return
1460 the current inferior_list_entry. Returning zero indicates that
1461 find_inferiors should continue iterating. */
1464 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1466 struct iterate_over_lwps_args *args
1467 = (struct iterate_over_lwps_args *) args_p;
1469 if (ptid_match (entry->id, args->filter))
1471 struct thread_info *thr = (struct thread_info *) entry;
1472 struct lwp_info *lwp = get_thread_lwp (thr);
1474 return (*args->callback) (lwp, args->data);
1480 /* See nat/linux-nat.h. */
1483 iterate_over_lwps (ptid_t filter,
1484 iterate_over_lwps_ftype callback,
1487 struct iterate_over_lwps_args args = {filter, callback, data};
1488 struct inferior_list_entry *entry;
1490 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1494 return get_thread_lwp ((struct thread_info *) entry);
1497 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1498 their exits until all other threads in the group have exited. */
1501 check_zombie_leaders (void)
1503 struct process_info *proc, *tmp;
1505 ALL_PROCESSES (proc, tmp)
1507 pid_t leader_pid = pid_of (proc);
1508 struct lwp_info *leader_lp;
1510 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1513 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1514 "num_lwps=%d, zombie=%d\n",
1515 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1516 linux_proc_pid_is_zombie (leader_pid));
1518 if (leader_lp != NULL
1519 /* Check if there are other threads in the group, as we may
1520 have raced with the inferior simply exiting. */
1521 && !last_thread_of_process_p (leader_pid)
1522 && linux_proc_pid_is_zombie (leader_pid))
1524 /* A leader zombie can mean one of two things:
1526 - It exited, and there's an exit status pending
1527 available, or only the leader exited (not the whole
1528 program). In the latter case, we can't waitpid the
1529 leader's exit status until all other threads are gone.
1531 - There are 3 or more threads in the group, and a thread
1532 other than the leader exec'd. On an exec, the Linux
1533 kernel destroys all other threads (except the execing
1534 one) in the thread group, and resets the execing thread's
1535 tid to the tgid. No exit notification is sent for the
1536 execing thread -- from the ptracer's perspective, it
1537 appears as though the execing thread just vanishes.
1538 Until we reap all other threads except the leader and the
1539 execing thread, the leader will be zombie, and the
1540 execing thread will be in `D (disc sleep)'. As soon as
1541 all other threads are reaped, the execing thread changes
1542 it's tid to the tgid, and the previous (zombie) leader
1543 vanishes, giving place to the "new" leader. We could try
1544 distinguishing the exit and exec cases, by waiting once
1545 more, and seeing if something comes out, but it doesn't
1546 sound useful. The previous leader _does_ go away, and
1547 we'll re-add the new one once we see the exec event
1548 (which is just the same as what would happen if the
1549 previous leader did exit voluntarily before some other
1554 "CZL: Thread group leader %d zombie "
1555 "(it exited, or another thread execd).\n",
1558 delete_lwp (leader_lp);
1563 /* Callback for `find_inferior'. Returns the first LWP that is not
1564 stopped. ARG is a PTID filter. */
1567 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1569 struct thread_info *thr = (struct thread_info *) entry;
1570 struct lwp_info *lwp;
1571 ptid_t filter = *(ptid_t *) arg;
1573 if (!ptid_match (ptid_of (thr), filter))
1576 lwp = get_thread_lwp (thr);
1583 /* This function should only be called if the LWP got a SIGTRAP.
1585 Handle any tracepoint steps or hits. Return true if a tracepoint
1586 event was handled, 0 otherwise. */
1589 handle_tracepoints (struct lwp_info *lwp)
1591 struct thread_info *tinfo = get_lwp_thread (lwp);
1592 int tpoint_related_event = 0;
1594 gdb_assert (lwp->suspended == 0);
1596 /* If this tracepoint hit causes a tracing stop, we'll immediately
1597 uninsert tracepoints. To do this, we temporarily pause all
1598 threads, unpatch away, and then unpause threads. We need to make
1599 sure the unpausing doesn't resume LWP too. */
1602 /* And we need to be sure that any all-threads-stopping doesn't try
1603 to move threads out of the jump pads, as it could deadlock the
1604 inferior (LWP could be in the jump pad, maybe even holding the
1607 /* Do any necessary step collect actions. */
1608 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1610 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1612 /* See if we just hit a tracepoint and do its main collect
1614 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1618 gdb_assert (lwp->suspended == 0);
1619 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1621 if (tpoint_related_event)
1624 debug_printf ("got a tracepoint event\n");
1631 /* Convenience wrapper. Returns true if LWP is presently collecting a
1635 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1636 struct fast_tpoint_collect_status *status)
1638 CORE_ADDR thread_area;
1639 struct thread_info *thread = get_lwp_thread (lwp);
1641 if (the_low_target.get_thread_area == NULL)
1644 /* Get the thread area address. This is used to recognize which
1645 thread is which when tracing with the in-process agent library.
1646 We don't read anything from the address, and treat it as opaque;
1647 it's the address itself that we assume is unique per-thread. */
1648 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1651 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1654 /* The reason we resume in the caller, is because we want to be able
1655 to pass lwp->status_pending as WSTAT, and we need to clear
1656 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1657 refuses to resume. */
1660 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1662 struct thread_info *saved_thread;
1664 saved_thread = current_thread;
1665 current_thread = get_lwp_thread (lwp);
1668 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1669 && supports_fast_tracepoints ()
1670 && agent_loaded_p ())
1672 struct fast_tpoint_collect_status status;
1676 debug_printf ("Checking whether LWP %ld needs to move out of the "
1678 lwpid_of (current_thread));
1680 r = linux_fast_tracepoint_collecting (lwp, &status);
1683 || (WSTOPSIG (*wstat) != SIGILL
1684 && WSTOPSIG (*wstat) != SIGFPE
1685 && WSTOPSIG (*wstat) != SIGSEGV
1686 && WSTOPSIG (*wstat) != SIGBUS))
1688 lwp->collecting_fast_tracepoint = r;
1692 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1694 /* Haven't executed the original instruction yet.
1695 Set breakpoint there, and wait till it's hit,
1696 then single-step until exiting the jump pad. */
1697 lwp->exit_jump_pad_bkpt
1698 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1702 debug_printf ("Checking whether LWP %ld needs to move out of "
1703 "the jump pad...it does\n",
1704 lwpid_of (current_thread));
1705 current_thread = saved_thread;
1712 /* If we get a synchronous signal while collecting, *and*
1713 while executing the (relocated) original instruction,
1714 reset the PC to point at the tpoint address, before
1715 reporting to GDB. Otherwise, it's an IPA lib bug: just
1716 report the signal to GDB, and pray for the best. */
1718 lwp->collecting_fast_tracepoint = 0;
1721 && (status.adjusted_insn_addr <= lwp->stop_pc
1722 && lwp->stop_pc < status.adjusted_insn_addr_end))
1725 struct regcache *regcache;
1727 /* The si_addr on a few signals references the address
1728 of the faulting instruction. Adjust that as
1730 if ((WSTOPSIG (*wstat) == SIGILL
1731 || WSTOPSIG (*wstat) == SIGFPE
1732 || WSTOPSIG (*wstat) == SIGBUS
1733 || WSTOPSIG (*wstat) == SIGSEGV)
1734 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
1735 (PTRACE_TYPE_ARG3) 0, &info) == 0
1736 /* Final check just to make sure we don't clobber
1737 the siginfo of non-kernel-sent signals. */
1738 && (uintptr_t) info.si_addr == lwp->stop_pc)
1740 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1741 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
1742 (PTRACE_TYPE_ARG3) 0, &info);
1745 regcache = get_thread_regcache (current_thread, 1);
1746 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1747 lwp->stop_pc = status.tpoint_addr;
1749 /* Cancel any fast tracepoint lock this thread was
1751 force_unlock_trace_buffer ();
1754 if (lwp->exit_jump_pad_bkpt != NULL)
1757 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1758 "stopping all threads momentarily.\n");
1760 stop_all_lwps (1, lwp);
1762 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1763 lwp->exit_jump_pad_bkpt = NULL;
1765 unstop_all_lwps (1, lwp);
1767 gdb_assert (lwp->suspended >= 0);
1773 debug_printf ("Checking whether LWP %ld needs to move out of the "
1775 lwpid_of (current_thread));
1777 current_thread = saved_thread;
1781 /* Enqueue one signal in the "signals to report later when out of the
1785 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1787 struct pending_signals *p_sig;
1788 struct thread_info *thread = get_lwp_thread (lwp);
1791 debug_printf ("Deferring signal %d for LWP %ld.\n",
1792 WSTOPSIG (*wstat), lwpid_of (thread));
1796 struct pending_signals *sig;
1798 for (sig = lwp->pending_signals_to_report;
1801 debug_printf (" Already queued %d\n",
1804 debug_printf (" (no more currently queued signals)\n");
1807 /* Don't enqueue non-RT signals if they are already in the deferred
1808 queue. (SIGSTOP being the easiest signal to see ending up here
1810 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1812 struct pending_signals *sig;
1814 for (sig = lwp->pending_signals_to_report;
1818 if (sig->signal == WSTOPSIG (*wstat))
1821 debug_printf ("Not requeuing already queued non-RT signal %d"
1830 p_sig = xmalloc (sizeof (*p_sig));
1831 p_sig->prev = lwp->pending_signals_to_report;
1832 p_sig->signal = WSTOPSIG (*wstat);
1833 memset (&p_sig->info, 0, sizeof (siginfo_t));
1834 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1837 lwp->pending_signals_to_report = p_sig;
1840 /* Dequeue one signal from the "signals to report later when out of
1841 the jump pad" list. */
1844 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1846 struct thread_info *thread = get_lwp_thread (lwp);
1848 if (lwp->pending_signals_to_report != NULL)
1850 struct pending_signals **p_sig;
1852 p_sig = &lwp->pending_signals_to_report;
1853 while ((*p_sig)->prev != NULL)
1854 p_sig = &(*p_sig)->prev;
1856 *wstat = W_STOPCODE ((*p_sig)->signal);
1857 if ((*p_sig)->info.si_signo != 0)
1858 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1864 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1865 WSTOPSIG (*wstat), lwpid_of (thread));
1869 struct pending_signals *sig;
1871 for (sig = lwp->pending_signals_to_report;
1874 debug_printf (" Still queued %d\n",
1877 debug_printf (" (no more queued signals)\n");
1886 /* Fetch the possibly triggered data watchpoint info and store it in
1889 On some archs, like x86, that use debug registers to set
1890 watchpoints, it's possible that the way to know which watched
1891 address trapped, is to check the register that is used to select
1892 which address to watch. Problem is, between setting the watchpoint
1893 and reading back which data address trapped, the user may change
1894 the set of watchpoints, and, as a consequence, GDB changes the
1895 debug registers in the inferior. To avoid reading back a stale
1896 stopped-data-address when that happens, we cache in LP the fact
1897 that a watchpoint trapped, and the corresponding data address, as
1898 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
1899 registers meanwhile, we have the cached data we can rely on. */
1902 check_stopped_by_watchpoint (struct lwp_info *child)
1904 if (the_low_target.stopped_by_watchpoint != NULL)
1906 struct thread_info *saved_thread;
1908 saved_thread = current_thread;
1909 current_thread = get_lwp_thread (child);
1911 if (the_low_target.stopped_by_watchpoint ())
1913 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
1915 if (the_low_target.stopped_data_address != NULL)
1916 child->stopped_data_address
1917 = the_low_target.stopped_data_address ();
1919 child->stopped_data_address = 0;
1922 current_thread = saved_thread;
1925 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
1928 /* Do low-level handling of the event, and check if we should go on
1929 and pass it to caller code. Return the affected lwp if we are, or
1932 static struct lwp_info *
1933 linux_low_filter_event (int lwpid, int wstat)
1935 struct lwp_info *child;
1936 struct thread_info *thread;
1937 int have_stop_pc = 0;
1939 child = find_lwp_pid (pid_to_ptid (lwpid));
1941 /* If we didn't find a process, one of two things presumably happened:
1942 - A process we started and then detached from has exited. Ignore it.
1943 - A process we are controlling has forked and the new child's stop
1944 was reported to us by the kernel. Save its PID. */
1945 if (child == NULL && WIFSTOPPED (wstat))
1947 add_to_pid_list (&stopped_pids, lwpid, wstat);
1950 else if (child == NULL)
1953 thread = get_lwp_thread (child);
1957 child->last_status = wstat;
1959 /* Check if the thread has exited. */
1960 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
1963 debug_printf ("LLFE: %d exited.\n", lwpid);
1964 if (num_lwps (pid_of (thread)) > 1)
1967 /* If there is at least one more LWP, then the exit signal was
1968 not the end of the debugged application and should be
1975 /* This was the last lwp in the process. Since events are
1976 serialized to GDB core, and we can't report this one
1977 right now, but GDB core and the other target layers will
1978 want to be notified about the exit code/signal, leave the
1979 status pending for the next time we're able to report
1981 mark_lwp_dead (child, wstat);
1986 gdb_assert (WIFSTOPPED (wstat));
1988 if (WIFSTOPPED (wstat))
1990 struct process_info *proc;
1992 /* Architecture-specific setup after inferior is running. This
1993 needs to happen after we have attached to the inferior and it
1994 is stopped for the first time, but before we access any
1995 inferior registers. */
1996 proc = find_process_pid (pid_of (thread));
1997 if (proc->priv->new_inferior)
1999 struct thread_info *saved_thread;
2001 saved_thread = current_thread;
2002 current_thread = thread;
2004 the_low_target.arch_setup ();
2006 current_thread = saved_thread;
2008 proc->priv->new_inferior = 0;
2012 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2014 struct process_info *proc = find_process_pid (pid_of (thread));
2016 linux_enable_event_reporting (lwpid, proc->attached);
2017 child->must_set_ptrace_flags = 0;
2020 /* Be careful to not overwrite stop_pc until
2021 check_stopped_by_breakpoint is called. */
2022 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2023 && linux_is_extended_waitstatus (wstat))
2025 child->stop_pc = get_pc (child);
2026 handle_extended_wait (child, wstat);
2030 /* Check first whether this was a SW/HW breakpoint before checking
2031 watchpoints, because at least s390 can't tell the data address of
2032 hardware watchpoint hits, and returns stopped-by-watchpoint as
2033 long as there's a watchpoint set. */
2034 if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
2036 if (check_stopped_by_breakpoint (child))
2040 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2041 or hardware watchpoint. Check which is which if we got
2042 TARGET_STOPPED_BY_HW_BREAKPOINT. */
2043 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2044 && (child->stop_reason == TARGET_STOPPED_BY_NO_REASON
2045 || child->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
2046 check_stopped_by_watchpoint (child);
2049 child->stop_pc = get_pc (child);
2051 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2052 && child->stop_expected)
2055 debug_printf ("Expected stop.\n");
2056 child->stop_expected = 0;
2058 if (thread->last_resume_kind == resume_stop)
2060 /* We want to report the stop to the core. Treat the
2061 SIGSTOP as a normal event. */
2063 else if (stopping_threads != NOT_STOPPING_THREADS)
2065 /* Stopping threads. We don't want this SIGSTOP to end up
2071 /* Filter out the event. */
2072 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2077 child->status_pending_p = 1;
2078 child->status_pending = wstat;
2082 /* Resume LWPs that are currently stopped without any pending status
2083 to report, but are resumed from the core's perspective. */
2086 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2088 struct thread_info *thread = (struct thread_info *) entry;
2089 struct lwp_info *lp = get_thread_lwp (thread);
2092 && !lp->status_pending_p
2093 && thread->last_resume_kind != resume_stop
2094 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2096 int step = thread->last_resume_kind == resume_step;
2099 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2100 target_pid_to_str (ptid_of (thread)),
2101 paddress (lp->stop_pc),
2104 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2108 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2109 match FILTER_PTID (leaving others pending). The PTIDs can be:
2110 minus_one_ptid, to specify any child; a pid PTID, specifying all
2111 lwps of a thread group; or a PTID representing a single lwp. Store
2112 the stop status through the status pointer WSTAT. OPTIONS is
2113 passed to the waitpid call. Return 0 if no event was found and
2114 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2115 was found. Return the PID of the stopped child otherwise. */
2118 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2119 int *wstatp, int options)
2121 struct thread_info *event_thread;
2122 struct lwp_info *event_child, *requested_child;
2123 sigset_t block_mask, prev_mask;
2126 /* N.B. event_thread points to the thread_info struct that contains
2127 event_child. Keep them in sync. */
2128 event_thread = NULL;
2130 requested_child = NULL;
2132 /* Check for a lwp with a pending status. */
2134 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2136 event_thread = (struct thread_info *)
2137 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2138 if (event_thread != NULL)
2139 event_child = get_thread_lwp (event_thread);
2140 if (debug_threads && event_thread)
2141 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2143 else if (!ptid_equal (filter_ptid, null_ptid))
2145 requested_child = find_lwp_pid (filter_ptid);
2147 if (stopping_threads == NOT_STOPPING_THREADS
2148 && requested_child->status_pending_p
2149 && requested_child->collecting_fast_tracepoint)
2151 enqueue_one_deferred_signal (requested_child,
2152 &requested_child->status_pending);
2153 requested_child->status_pending_p = 0;
2154 requested_child->status_pending = 0;
2155 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2158 if (requested_child->suspended
2159 && requested_child->status_pending_p)
2161 internal_error (__FILE__, __LINE__,
2162 "requesting an event out of a"
2163 " suspended child?");
2166 if (requested_child->status_pending_p)
2168 event_child = requested_child;
2169 event_thread = get_lwp_thread (event_child);
2173 if (event_child != NULL)
2176 debug_printf ("Got an event from pending child %ld (%04x)\n",
2177 lwpid_of (event_thread), event_child->status_pending);
2178 *wstatp = event_child->status_pending;
2179 event_child->status_pending_p = 0;
2180 event_child->status_pending = 0;
2181 current_thread = event_thread;
2182 return lwpid_of (event_thread);
2185 /* But if we don't find a pending event, we'll have to wait.
2187 We only enter this loop if no process has a pending wait status.
2188 Thus any action taken in response to a wait status inside this
2189 loop is responding as soon as we detect the status, not after any
2192 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2193 all signals while here. */
2194 sigfillset (&block_mask);
2195 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2197 /* Always pull all events out of the kernel. We'll randomly select
2198 an event LWP out of all that have events, to prevent
2200 while (event_child == NULL)
2204 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2207 - If the thread group leader exits while other threads in the
2208 thread group still exist, waitpid(TGID, ...) hangs. That
2209 waitpid won't return an exit status until the other threads
2210 in the group are reaped.
2212 - When a non-leader thread execs, that thread just vanishes
2213 without reporting an exit (so we'd hang if we waited for it
2214 explicitly in that case). The exec event is reported to
2215 the TGID pid (although we don't currently enable exec
2218 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2221 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2222 ret, errno ? strerror (errno) : "ERRNO-OK");
2228 debug_printf ("LLW: waitpid %ld received %s\n",
2229 (long) ret, status_to_str (*wstatp));
2232 /* Filter all events. IOW, leave all events pending. We'll
2233 randomly select an event LWP out of all that have events
2235 linux_low_filter_event (ret, *wstatp);
2236 /* Retry until nothing comes out of waitpid. A single
2237 SIGCHLD can indicate more than one child stopped. */
2241 /* Now that we've pulled all events out of the kernel, resume
2242 LWPs that don't have an interesting event to report. */
2243 if (stopping_threads == NOT_STOPPING_THREADS)
2244 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2246 /* ... and find an LWP with a status to report to the core, if
2248 event_thread = (struct thread_info *)
2249 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2250 if (event_thread != NULL)
2252 event_child = get_thread_lwp (event_thread);
2253 *wstatp = event_child->status_pending;
2254 event_child->status_pending_p = 0;
2255 event_child->status_pending = 0;
2259 /* Check for zombie thread group leaders. Those can't be reaped
2260 until all other threads in the thread group are. */
2261 check_zombie_leaders ();
2263 /* If there are no resumed children left in the set of LWPs we
2264 want to wait for, bail. We can't just block in
2265 waitpid/sigsuspend, because lwps might have been left stopped
2266 in trace-stop state, and we'd be stuck forever waiting for
2267 their status to change (which would only happen if we resumed
2268 them). Even if WNOHANG is set, this return code is preferred
2269 over 0 (below), as it is more detailed. */
2270 if ((find_inferior (&all_threads,
2271 not_stopped_callback,
2272 &wait_ptid) == NULL))
2275 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2276 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2280 /* No interesting event to report to the caller. */
2281 if ((options & WNOHANG))
2284 debug_printf ("WNOHANG set, no event found\n");
2286 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2290 /* Block until we get an event reported with SIGCHLD. */
2292 debug_printf ("sigsuspend'ing\n");
2294 sigsuspend (&prev_mask);
2295 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2299 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2301 current_thread = event_thread;
2303 /* Check for thread exit. */
2304 if (! WIFSTOPPED (*wstatp))
2306 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2309 debug_printf ("LWP %d is the last lwp of process. "
2310 "Process %ld exiting.\n",
2311 pid_of (event_thread), lwpid_of (event_thread));
2312 return lwpid_of (event_thread);
2315 return lwpid_of (event_thread);
2318 /* Wait for an event from child(ren) PTID. PTIDs can be:
2319 minus_one_ptid, to specify any child; a pid PTID, specifying all
2320 lwps of a thread group; or a PTID representing a single lwp. Store
2321 the stop status through the status pointer WSTAT. OPTIONS is
2322 passed to the waitpid call. Return 0 if no event was found and
2323 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2324 was found. Return the PID of the stopped child otherwise. */
2327 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2329 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2332 /* Count the LWP's that have had events. */
2335 count_events_callback (struct inferior_list_entry *entry, void *data)
2337 struct thread_info *thread = (struct thread_info *) entry;
2338 struct lwp_info *lp = get_thread_lwp (thread);
2341 gdb_assert (count != NULL);
2343 /* Count only resumed LWPs that have an event pending. */
2344 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2345 && lp->status_pending_p)
2351 /* Select the LWP (if any) that is currently being single-stepped. */
2354 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2356 struct thread_info *thread = (struct thread_info *) entry;
2357 struct lwp_info *lp = get_thread_lwp (thread);
2359 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2360 && thread->last_resume_kind == resume_step
2361 && lp->status_pending_p)
2367 /* Select the Nth LWP that has had an event. */
2370 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2372 struct thread_info *thread = (struct thread_info *) entry;
2373 struct lwp_info *lp = get_thread_lwp (thread);
2374 int *selector = data;
2376 gdb_assert (selector != NULL);
2378 /* Select only resumed LWPs that have an event pending. */
2379 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2380 && lp->status_pending_p)
2381 if ((*selector)-- == 0)
2387 /* Select one LWP out of those that have events pending. */
2390 select_event_lwp (struct lwp_info **orig_lp)
2393 int random_selector;
2394 struct thread_info *event_thread = NULL;
2396 /* In all-stop, give preference to the LWP that is being
2397 single-stepped. There will be at most one, and it's the LWP that
2398 the core is most interested in. If we didn't do this, then we'd
2399 have to handle pending step SIGTRAPs somehow in case the core
2400 later continues the previously-stepped thread, otherwise we'd
2401 report the pending SIGTRAP, and the core, not having stepped the
2402 thread, wouldn't understand what the trap was for, and therefore
2403 would report it to the user as a random signal. */
2407 = (struct thread_info *) find_inferior (&all_threads,
2408 select_singlestep_lwp_callback,
2410 if (event_thread != NULL)
2413 debug_printf ("SEL: Select single-step %s\n",
2414 target_pid_to_str (ptid_of (event_thread)));
2417 if (event_thread == NULL)
2419 /* No single-stepping LWP. Select one at random, out of those
2420 which have had events. */
2422 /* First see how many events we have. */
2423 find_inferior (&all_threads, count_events_callback, &num_events);
2424 gdb_assert (num_events > 0);
2426 /* Now randomly pick a LWP out of those that have had
2428 random_selector = (int)
2429 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2431 if (debug_threads && num_events > 1)
2432 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2433 num_events, random_selector);
2436 = (struct thread_info *) find_inferior (&all_threads,
2437 select_event_lwp_callback,
2441 if (event_thread != NULL)
2443 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2445 /* Switch the event LWP. */
2446 *orig_lp = event_lp;
2450 /* Decrement the suspend count of an LWP. */
2453 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2455 struct thread_info *thread = (struct thread_info *) entry;
2456 struct lwp_info *lwp = get_thread_lwp (thread);
2458 /* Ignore EXCEPT. */
2464 gdb_assert (lwp->suspended >= 0);
2468 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2472 unsuspend_all_lwps (struct lwp_info *except)
2474 find_inferior (&all_threads, unsuspend_one_lwp, except);
2477 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2478 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2480 static int lwp_running (struct inferior_list_entry *entry, void *data);
2481 static ptid_t linux_wait_1 (ptid_t ptid,
2482 struct target_waitstatus *ourstatus,
2483 int target_options);
2485 /* Stabilize threads (move out of jump pads).
2487 If a thread is midway collecting a fast tracepoint, we need to
2488 finish the collection and move it out of the jump pad before
2489 reporting the signal.
2491 This avoids recursion while collecting (when a signal arrives
2492 midway, and the signal handler itself collects), which would trash
2493 the trace buffer. In case the user set a breakpoint in a signal
2494 handler, this avoids the backtrace showing the jump pad, etc..
2495 Most importantly, there are certain things we can't do safely if
2496 threads are stopped in a jump pad (or in its callee's). For
2499 - starting a new trace run. A thread still collecting the
2500 previous run, could trash the trace buffer when resumed. The trace
2501 buffer control structures would have been reset but the thread had
2502 no way to tell. The thread could even midway memcpy'ing to the
2503 buffer, which would mean that when resumed, it would clobber the
2504 trace buffer that had been set for a new run.
2506 - we can't rewrite/reuse the jump pads for new tracepoints
2507 safely. Say you do tstart while a thread is stopped midway while
2508 collecting. When the thread is later resumed, it finishes the
2509 collection, and returns to the jump pad, to execute the original
2510 instruction that was under the tracepoint jump at the time the
2511 older run had been started. If the jump pad had been rewritten
2512 since for something else in the new run, the thread would now
2513 execute the wrong / random instructions. */
2516 linux_stabilize_threads (void)
2518 struct thread_info *saved_thread;
2519 struct thread_info *thread_stuck;
2522 = (struct thread_info *) find_inferior (&all_threads,
2523 stuck_in_jump_pad_callback,
2525 if (thread_stuck != NULL)
2528 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2529 lwpid_of (thread_stuck));
2533 saved_thread = current_thread;
2535 stabilizing_threads = 1;
2538 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2540 /* Loop until all are stopped out of the jump pads. */
2541 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2543 struct target_waitstatus ourstatus;
2544 struct lwp_info *lwp;
2547 /* Note that we go through the full wait even loop. While
2548 moving threads out of jump pad, we need to be able to step
2549 over internal breakpoints and such. */
2550 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2552 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2554 lwp = get_thread_lwp (current_thread);
2559 if (ourstatus.value.sig != GDB_SIGNAL_0
2560 || current_thread->last_resume_kind == resume_stop)
2562 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2563 enqueue_one_deferred_signal (lwp, &wstat);
2568 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2570 stabilizing_threads = 0;
2572 current_thread = saved_thread;
2577 = (struct thread_info *) find_inferior (&all_threads,
2578 stuck_in_jump_pad_callback,
2580 if (thread_stuck != NULL)
2581 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2582 lwpid_of (thread_stuck));
2586 static void async_file_mark (void);
2588 /* Convenience function that is called when the kernel reports an
2589 event that is not passed out to GDB. */
2592 ignore_event (struct target_waitstatus *ourstatus)
2594 /* If we got an event, there may still be others, as a single
2595 SIGCHLD can indicate more than one child stopped. This forces
2596 another target_wait call. */
2599 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2603 /* Wait for process, returns status. */
2606 linux_wait_1 (ptid_t ptid,
2607 struct target_waitstatus *ourstatus, int target_options)
2610 struct lwp_info *event_child;
2613 int step_over_finished;
2614 int bp_explains_trap;
2615 int maybe_internal_trap;
2623 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2626 /* Translate generic target options into linux options. */
2628 if (target_options & TARGET_WNOHANG)
2631 bp_explains_trap = 0;
2634 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2636 if (ptid_equal (step_over_bkpt, null_ptid))
2637 pid = linux_wait_for_event (ptid, &w, options);
2641 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2642 target_pid_to_str (step_over_bkpt));
2643 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2648 gdb_assert (target_options & TARGET_WNOHANG);
2652 debug_printf ("linux_wait_1 ret = null_ptid, "
2653 "TARGET_WAITKIND_IGNORE\n");
2657 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2664 debug_printf ("linux_wait_1 ret = null_ptid, "
2665 "TARGET_WAITKIND_NO_RESUMED\n");
2669 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2673 event_child = get_thread_lwp (current_thread);
2675 /* linux_wait_for_event only returns an exit status for the last
2676 child of a process. Report it. */
2677 if (WIFEXITED (w) || WIFSIGNALED (w))
2681 ourstatus->kind = TARGET_WAITKIND_EXITED;
2682 ourstatus->value.integer = WEXITSTATUS (w);
2686 debug_printf ("linux_wait_1 ret = %s, exited with "
2688 target_pid_to_str (ptid_of (current_thread)),
2695 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2696 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2700 debug_printf ("linux_wait_1 ret = %s, terminated with "
2702 target_pid_to_str (ptid_of (current_thread)),
2708 return ptid_of (current_thread);
2711 /* If step-over executes a breakpoint instruction, it means a
2712 gdb/gdbserver breakpoint had been planted on top of a permanent
2713 breakpoint. The PC has been adjusted by
2714 check_stopped_by_breakpoint to point at the breakpoint address.
2715 Advance the PC manually past the breakpoint, otherwise the
2716 program would keep trapping the permanent breakpoint forever. */
2717 if (!ptid_equal (step_over_bkpt, null_ptid)
2718 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2720 unsigned int increment_pc = the_low_target.breakpoint_len;
2724 debug_printf ("step-over for %s executed software breakpoint\n",
2725 target_pid_to_str (ptid_of (current_thread)));
2728 if (increment_pc != 0)
2730 struct regcache *regcache
2731 = get_thread_regcache (current_thread, 1);
2733 event_child->stop_pc += increment_pc;
2734 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2736 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
2737 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
2741 /* If this event was not handled before, and is not a SIGTRAP, we
2742 report it. SIGILL and SIGSEGV are also treated as traps in case
2743 a breakpoint is inserted at the current PC. If this target does
2744 not support internal breakpoints at all, we also report the
2745 SIGTRAP without further processing; it's of no concern to us. */
2747 = (supports_breakpoints ()
2748 && (WSTOPSIG (w) == SIGTRAP
2749 || ((WSTOPSIG (w) == SIGILL
2750 || WSTOPSIG (w) == SIGSEGV)
2751 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2753 if (maybe_internal_trap)
2755 /* Handle anything that requires bookkeeping before deciding to
2756 report the event or continue waiting. */
2758 /* First check if we can explain the SIGTRAP with an internal
2759 breakpoint, or if we should possibly report the event to GDB.
2760 Do this before anything that may remove or insert a
2762 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2764 /* We have a SIGTRAP, possibly a step-over dance has just
2765 finished. If so, tweak the state machine accordingly,
2766 reinsert breakpoints and delete any reinsert (software
2767 single-step) breakpoints. */
2768 step_over_finished = finish_step_over (event_child);
2770 /* Now invoke the callbacks of any internal breakpoints there. */
2771 check_breakpoints (event_child->stop_pc);
2773 /* Handle tracepoint data collecting. This may overflow the
2774 trace buffer, and cause a tracing stop, removing
2776 trace_event = handle_tracepoints (event_child);
2778 if (bp_explains_trap)
2780 /* If we stepped or ran into an internal breakpoint, we've
2781 already handled it. So next time we resume (from this
2782 PC), we should step over it. */
2784 debug_printf ("Hit a gdbserver breakpoint.\n");
2786 if (breakpoint_here (event_child->stop_pc))
2787 event_child->need_step_over = 1;
2792 /* We have some other signal, possibly a step-over dance was in
2793 progress, and it should be cancelled too. */
2794 step_over_finished = finish_step_over (event_child);
2797 /* We have all the data we need. Either report the event to GDB, or
2798 resume threads and keep waiting for more. */
2800 /* If we're collecting a fast tracepoint, finish the collection and
2801 move out of the jump pad before delivering a signal. See
2802 linux_stabilize_threads. */
2805 && WSTOPSIG (w) != SIGTRAP
2806 && supports_fast_tracepoints ()
2807 && agent_loaded_p ())
2810 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2811 "to defer or adjust it.\n",
2812 WSTOPSIG (w), lwpid_of (current_thread));
2814 /* Allow debugging the jump pad itself. */
2815 if (current_thread->last_resume_kind != resume_step
2816 && maybe_move_out_of_jump_pad (event_child, &w))
2818 enqueue_one_deferred_signal (event_child, &w);
2821 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2822 WSTOPSIG (w), lwpid_of (current_thread));
2824 linux_resume_one_lwp (event_child, 0, 0, NULL);
2826 return ignore_event (ourstatus);
2830 if (event_child->collecting_fast_tracepoint)
2833 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2834 "Check if we're already there.\n",
2835 lwpid_of (current_thread),
2836 event_child->collecting_fast_tracepoint);
2840 event_child->collecting_fast_tracepoint
2841 = linux_fast_tracepoint_collecting (event_child, NULL);
2843 if (event_child->collecting_fast_tracepoint != 1)
2845 /* No longer need this breakpoint. */
2846 if (event_child->exit_jump_pad_bkpt != NULL)
2849 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2850 "stopping all threads momentarily.\n");
2852 /* Other running threads could hit this breakpoint.
2853 We don't handle moribund locations like GDB does,
2854 instead we always pause all threads when removing
2855 breakpoints, so that any step-over or
2856 decr_pc_after_break adjustment is always taken
2857 care of while the breakpoint is still
2859 stop_all_lwps (1, event_child);
2861 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2862 event_child->exit_jump_pad_bkpt = NULL;
2864 unstop_all_lwps (1, event_child);
2866 gdb_assert (event_child->suspended >= 0);
2870 if (event_child->collecting_fast_tracepoint == 0)
2873 debug_printf ("fast tracepoint finished "
2874 "collecting successfully.\n");
2876 /* We may have a deferred signal to report. */
2877 if (dequeue_one_deferred_signal (event_child, &w))
2880 debug_printf ("dequeued one signal.\n");
2885 debug_printf ("no deferred signals.\n");
2887 if (stabilizing_threads)
2889 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2890 ourstatus->value.sig = GDB_SIGNAL_0;
2894 debug_printf ("linux_wait_1 ret = %s, stopped "
2895 "while stabilizing threads\n",
2896 target_pid_to_str (ptid_of (current_thread)));
2900 return ptid_of (current_thread);
2906 /* Check whether GDB would be interested in this event. */
2908 /* If GDB is not interested in this signal, don't stop other
2909 threads, and don't report it to GDB. Just resume the inferior
2910 right away. We do this for threading-related signals as well as
2911 any that GDB specifically requested we ignore. But never ignore
2912 SIGSTOP if we sent it ourselves, and do not ignore signals when
2913 stepping - they may require special handling to skip the signal
2914 handler. Also never ignore signals that could be caused by a
2916 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2919 && current_thread->last_resume_kind != resume_step
2921 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2922 (current_process ()->priv->thread_db != NULL
2923 && (WSTOPSIG (w) == __SIGRTMIN
2924 || WSTOPSIG (w) == __SIGRTMIN + 1))
2927 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
2928 && !(WSTOPSIG (w) == SIGSTOP
2929 && current_thread->last_resume_kind == resume_stop)
2930 && !linux_wstatus_maybe_breakpoint (w))))
2932 siginfo_t info, *info_p;
2935 debug_printf ("Ignored signal %d for LWP %ld.\n",
2936 WSTOPSIG (w), lwpid_of (current_thread));
2938 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2939 (PTRACE_TYPE_ARG3) 0, &info) == 0)
2943 linux_resume_one_lwp (event_child, event_child->stepping,
2944 WSTOPSIG (w), info_p);
2945 return ignore_event (ourstatus);
2948 /* Note that all addresses are always "out of the step range" when
2949 there's no range to begin with. */
2950 in_step_range = lwp_in_step_range (event_child);
2952 /* If GDB wanted this thread to single step, and the thread is out
2953 of the step range, we always want to report the SIGTRAP, and let
2954 GDB handle it. Watchpoints should always be reported. So should
2955 signals we can't explain. A SIGTRAP we can't explain could be a
2956 GDB breakpoint --- we may or not support Z0 breakpoints. If we
2957 do, we're be able to handle GDB breakpoints on top of internal
2958 breakpoints, by handling the internal breakpoint and still
2959 reporting the event to GDB. If we don't, we're out of luck, GDB
2960 won't see the breakpoint hit. */
2961 report_to_gdb = (!maybe_internal_trap
2962 || (current_thread->last_resume_kind == resume_step
2964 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
2965 || (!step_over_finished && !in_step_range
2966 && !bp_explains_trap && !trace_event)
2967 || (gdb_breakpoint_here (event_child->stop_pc)
2968 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
2969 && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
2971 run_breakpoint_commands (event_child->stop_pc);
2973 /* We found no reason GDB would want us to stop. We either hit one
2974 of our own breakpoints, or finished an internal step GDB
2975 shouldn't know about. */
2980 if (bp_explains_trap)
2981 debug_printf ("Hit a gdbserver breakpoint.\n");
2982 if (step_over_finished)
2983 debug_printf ("Step-over finished.\n");
2985 debug_printf ("Tracepoint event.\n");
2986 if (lwp_in_step_range (event_child))
2987 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
2988 paddress (event_child->stop_pc),
2989 paddress (event_child->step_range_start),
2990 paddress (event_child->step_range_end));
2993 /* We're not reporting this breakpoint to GDB, so apply the
2994 decr_pc_after_break adjustment to the inferior's regcache
2997 if (the_low_target.set_pc != NULL)
2999 struct regcache *regcache
3000 = get_thread_regcache (current_thread, 1);
3001 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3004 /* We may have finished stepping over a breakpoint. If so,
3005 we've stopped and suspended all LWPs momentarily except the
3006 stepping one. This is where we resume them all again. We're
3007 going to keep waiting, so use proceed, which handles stepping
3008 over the next breakpoint. */
3010 debug_printf ("proceeding all threads.\n");
3012 if (step_over_finished)
3013 unsuspend_all_lwps (event_child);
3015 proceed_all_lwps ();
3016 return ignore_event (ourstatus);
3021 if (current_thread->last_resume_kind == resume_step)
3023 if (event_child->step_range_start == event_child->step_range_end)
3024 debug_printf ("GDB wanted to single-step, reporting event.\n");
3025 else if (!lwp_in_step_range (event_child))
3026 debug_printf ("Out of step range, reporting event.\n");
3028 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3029 debug_printf ("Stopped by watchpoint.\n");
3030 else if (gdb_breakpoint_here (event_child->stop_pc))
3031 debug_printf ("Stopped by GDB breakpoint.\n");
3033 debug_printf ("Hit a non-gdbserver trap event.\n");
3036 /* Alright, we're going to report a stop. */
3038 if (!stabilizing_threads)
3040 /* In all-stop, stop all threads. */
3042 stop_all_lwps (0, NULL);
3044 /* If we're not waiting for a specific LWP, choose an event LWP
3045 from among those that have had events. Giving equal priority
3046 to all LWPs that have had events helps prevent
3048 if (ptid_equal (ptid, minus_one_ptid))
3050 event_child->status_pending_p = 1;
3051 event_child->status_pending = w;
3053 select_event_lwp (&event_child);
3055 /* current_thread and event_child must stay in sync. */
3056 current_thread = get_lwp_thread (event_child);
3058 event_child->status_pending_p = 0;
3059 w = event_child->status_pending;
3062 if (step_over_finished)
3066 /* If we were doing a step-over, all other threads but
3067 the stepping one had been paused in start_step_over,
3068 with their suspend counts incremented. We don't want
3069 to do a full unstop/unpause, because we're in
3070 all-stop mode (so we want threads stopped), but we
3071 still need to unsuspend the other threads, to
3072 decrement their `suspended' count back. */
3073 unsuspend_all_lwps (event_child);
3077 /* If we just finished a step-over, then all threads had
3078 been momentarily paused. In all-stop, that's fine,
3079 we want threads stopped by now anyway. In non-stop,
3080 we need to re-resume threads that GDB wanted to be
3082 unstop_all_lwps (1, event_child);
3086 /* Stabilize threads (move out of jump pads). */
3088 stabilize_threads ();
3092 /* If we just finished a step-over, then all threads had been
3093 momentarily paused. In all-stop, that's fine, we want
3094 threads stopped by now anyway. In non-stop, we need to
3095 re-resume threads that GDB wanted to be running. */
3096 if (step_over_finished)
3097 unstop_all_lwps (1, event_child);
3100 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3102 /* Now that we've selected our final event LWP, un-adjust its PC if
3103 it was a software breakpoint, and the client doesn't know we can
3104 adjust the breakpoint ourselves. */
3105 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3106 && !swbreak_feature)
3108 int decr_pc = the_low_target.decr_pc_after_break;
3112 struct regcache *regcache
3113 = get_thread_regcache (current_thread, 1);
3114 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3118 if (current_thread->last_resume_kind == resume_stop
3119 && WSTOPSIG (w) == SIGSTOP)
3121 /* A thread that has been requested to stop by GDB with vCont;t,
3122 and it stopped cleanly, so report as SIG0. The use of
3123 SIGSTOP is an implementation detail. */
3124 ourstatus->value.sig = GDB_SIGNAL_0;
3126 else if (current_thread->last_resume_kind == resume_stop
3127 && WSTOPSIG (w) != SIGSTOP)
3129 /* A thread that has been requested to stop by GDB with vCont;t,
3130 but, it stopped for other reasons. */
3131 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3135 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3138 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3142 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3143 target_pid_to_str (ptid_of (current_thread)),
3144 ourstatus->kind, ourstatus->value.sig);
3148 return ptid_of (current_thread);
3151 /* Get rid of any pending event in the pipe. */
3153 async_file_flush (void)
3159 ret = read (linux_event_pipe[0], &buf, 1);
3160 while (ret >= 0 || (ret == -1 && errno == EINTR));
3163 /* Put something in the pipe, so the event loop wakes up. */
3165 async_file_mark (void)
3169 async_file_flush ();
3172 ret = write (linux_event_pipe[1], "+", 1);
3173 while (ret == 0 || (ret == -1 && errno == EINTR));
3175 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3176 be awakened anyway. */
3180 linux_wait (ptid_t ptid,
3181 struct target_waitstatus *ourstatus, int target_options)
3185 /* Flush the async file first. */
3186 if (target_is_async_p ())
3187 async_file_flush ();
3191 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3193 while ((target_options & TARGET_WNOHANG) == 0
3194 && ptid_equal (event_ptid, null_ptid)
3195 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3197 /* If at least one stop was reported, there may be more. A single
3198 SIGCHLD can signal more than one child stop. */
3199 if (target_is_async_p ()
3200 && (target_options & TARGET_WNOHANG) != 0
3201 && !ptid_equal (event_ptid, null_ptid))
3207 /* Send a signal to an LWP. */
3210 kill_lwp (unsigned long lwpid, int signo)
3212 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3213 fails, then we are not using nptl threads and we should be using kill. */
3217 static int tkill_failed;
3224 ret = syscall (__NR_tkill, lwpid, signo);
3225 if (errno != ENOSYS)
3232 return kill (lwpid, signo);
3236 linux_stop_lwp (struct lwp_info *lwp)
3242 send_sigstop (struct lwp_info *lwp)
3246 pid = lwpid_of (get_lwp_thread (lwp));
3248 /* If we already have a pending stop signal for this process, don't
3250 if (lwp->stop_expected)
3253 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3259 debug_printf ("Sending sigstop to lwp %d\n", pid);
3261 lwp->stop_expected = 1;
3262 kill_lwp (pid, SIGSTOP);
3266 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3268 struct thread_info *thread = (struct thread_info *) entry;
3269 struct lwp_info *lwp = get_thread_lwp (thread);
3271 /* Ignore EXCEPT. */
3282 /* Increment the suspend count of an LWP, and stop it, if not stopped
3285 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3288 struct thread_info *thread = (struct thread_info *) entry;
3289 struct lwp_info *lwp = get_thread_lwp (thread);
3291 /* Ignore EXCEPT. */
3297 return send_sigstop_callback (entry, except);
3301 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3303 /* It's dead, really. */
3306 /* Store the exit status for later. */
3307 lwp->status_pending_p = 1;
3308 lwp->status_pending = wstat;
3310 /* Prevent trying to stop it. */
3313 /* No further stops are expected from a dead lwp. */
3314 lwp->stop_expected = 0;
3317 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3320 wait_for_sigstop (void)
3322 struct thread_info *saved_thread;
3327 saved_thread = current_thread;
3328 if (saved_thread != NULL)
3329 saved_tid = saved_thread->entry.id;
3331 saved_tid = null_ptid; /* avoid bogus unused warning */
3334 debug_printf ("wait_for_sigstop: pulling events\n");
3336 /* Passing NULL_PTID as filter indicates we want all events to be
3337 left pending. Eventually this returns when there are no
3338 unwaited-for children left. */
3339 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3341 gdb_assert (ret == -1);
3343 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3344 current_thread = saved_thread;
3348 debug_printf ("Previously current thread died.\n");
3352 /* We can't change the current inferior behind GDB's back,
3353 otherwise, a subsequent command may apply to the wrong
3355 current_thread = NULL;
3359 /* Set a valid thread as current. */
3360 set_desired_thread (0);
3365 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3366 move it out, because we need to report the stop event to GDB. For
3367 example, if the user puts a breakpoint in the jump pad, it's
3368 because she wants to debug it. */
3371 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3373 struct thread_info *thread = (struct thread_info *) entry;
3374 struct lwp_info *lwp = get_thread_lwp (thread);
3376 gdb_assert (lwp->suspended == 0);
3377 gdb_assert (lwp->stopped);
3379 /* Allow debugging the jump pad, gdb_collect, etc.. */
3380 return (supports_fast_tracepoints ()
3381 && agent_loaded_p ()
3382 && (gdb_breakpoint_here (lwp->stop_pc)
3383 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3384 || thread->last_resume_kind == resume_step)
3385 && linux_fast_tracepoint_collecting (lwp, NULL));
3389 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3391 struct thread_info *thread = (struct thread_info *) entry;
3392 struct lwp_info *lwp = get_thread_lwp (thread);
3395 gdb_assert (lwp->suspended == 0);
3396 gdb_assert (lwp->stopped);
3398 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3400 /* Allow debugging the jump pad, gdb_collect, etc. */
3401 if (!gdb_breakpoint_here (lwp->stop_pc)
3402 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3403 && thread->last_resume_kind != resume_step
3404 && maybe_move_out_of_jump_pad (lwp, wstat))
3407 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3412 lwp->status_pending_p = 0;
3413 enqueue_one_deferred_signal (lwp, wstat);
3416 debug_printf ("Signal %d for LWP %ld deferred "
3418 WSTOPSIG (*wstat), lwpid_of (thread));
3421 linux_resume_one_lwp (lwp, 0, 0, NULL);
3428 lwp_running (struct inferior_list_entry *entry, void *data)
3430 struct thread_info *thread = (struct thread_info *) entry;
3431 struct lwp_info *lwp = get_thread_lwp (thread);
3440 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3441 If SUSPEND, then also increase the suspend count of every LWP,
3445 stop_all_lwps (int suspend, struct lwp_info *except)
3447 /* Should not be called recursively. */
3448 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3453 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3454 suspend ? "stop-and-suspend" : "stop",
3456 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3460 stopping_threads = (suspend
3461 ? STOPPING_AND_SUSPENDING_THREADS
3462 : STOPPING_THREADS);
3465 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3467 find_inferior (&all_threads, send_sigstop_callback, except);
3468 wait_for_sigstop ();
3469 stopping_threads = NOT_STOPPING_THREADS;
3473 debug_printf ("stop_all_lwps done, setting stopping_threads "
3474 "back to !stopping\n");
3479 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
3480 SIGNAL is nonzero, give it that signal. */
3483 linux_resume_one_lwp_throw (struct lwp_info *lwp,
3484 int step, int signal, siginfo_t *info)
3486 struct thread_info *thread = get_lwp_thread (lwp);
3487 struct thread_info *saved_thread;
3488 int fast_tp_collecting;
3490 if (lwp->stopped == 0)
3493 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3495 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3497 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3498 user used the "jump" command, or "set $pc = foo"). */
3499 if (lwp->stop_pc != get_pc (lwp))
3501 /* Collecting 'while-stepping' actions doesn't make sense
3503 release_while_stepping_state_list (thread);
3506 /* If we have pending signals or status, and a new signal, enqueue the
3507 signal. Also enqueue the signal if we are waiting to reinsert a
3508 breakpoint; it will be picked up again below. */
3510 && (lwp->status_pending_p
3511 || lwp->pending_signals != NULL
3512 || lwp->bp_reinsert != 0
3513 || fast_tp_collecting))
3515 struct pending_signals *p_sig;
3516 p_sig = xmalloc (sizeof (*p_sig));
3517 p_sig->prev = lwp->pending_signals;
3518 p_sig->signal = signal;
3520 memset (&p_sig->info, 0, sizeof (siginfo_t));
3522 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3523 lwp->pending_signals = p_sig;
3526 if (lwp->status_pending_p)
3529 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3530 " has pending status\n",
3531 lwpid_of (thread), step ? "step" : "continue", signal,
3532 lwp->stop_expected ? "expected" : "not expected");
3536 saved_thread = current_thread;
3537 current_thread = thread;
3540 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3541 lwpid_of (thread), step ? "step" : "continue", signal,
3542 lwp->stop_expected ? "expected" : "not expected");
3544 /* This bit needs some thinking about. If we get a signal that
3545 we must report while a single-step reinsert is still pending,
3546 we often end up resuming the thread. It might be better to
3547 (ew) allow a stack of pending events; then we could be sure that
3548 the reinsert happened right away and not lose any signals.
3550 Making this stack would also shrink the window in which breakpoints are
3551 uninserted (see comment in linux_wait_for_lwp) but not enough for
3552 complete correctness, so it won't solve that problem. It may be
3553 worthwhile just to solve this one, however. */
3554 if (lwp->bp_reinsert != 0)
3557 debug_printf (" pending reinsert at 0x%s\n",
3558 paddress (lwp->bp_reinsert));
3560 if (can_hardware_single_step ())
3562 if (fast_tp_collecting == 0)
3565 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3567 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3574 /* Postpone any pending signal. It was enqueued above. */
3578 if (fast_tp_collecting == 1)
3581 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3582 " (exit-jump-pad-bkpt)\n",
3585 /* Postpone any pending signal. It was enqueued above. */
3588 else if (fast_tp_collecting == 2)
3591 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3592 " single-stepping\n",
3595 if (can_hardware_single_step ())
3599 internal_error (__FILE__, __LINE__,
3600 "moving out of jump pad single-stepping"
3601 " not implemented on this target");
3604 /* Postpone any pending signal. It was enqueued above. */
3608 /* If we have while-stepping actions in this thread set it stepping.
3609 If we have a signal to deliver, it may or may not be set to
3610 SIG_IGN, we don't know. Assume so, and allow collecting
3611 while-stepping into a signal handler. A possible smart thing to
3612 do would be to set an internal breakpoint at the signal return
3613 address, continue, and carry on catching this while-stepping
3614 action only when that breakpoint is hit. A future
3616 if (thread->while_stepping != NULL
3617 && can_hardware_single_step ())
3620 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3625 if (the_low_target.get_pc != NULL)
3627 struct regcache *regcache = get_thread_regcache (current_thread, 1);
3629 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
3633 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
3634 (long) lwp->stop_pc);
3638 /* If we have pending signals, consume one unless we are trying to
3639 reinsert a breakpoint or we're trying to finish a fast tracepoint
3641 if (lwp->pending_signals != NULL
3642 && lwp->bp_reinsert == 0
3643 && fast_tp_collecting == 0)
3645 struct pending_signals **p_sig;
3647 p_sig = &lwp->pending_signals;
3648 while ((*p_sig)->prev != NULL)
3649 p_sig = &(*p_sig)->prev;
3651 signal = (*p_sig)->signal;
3652 if ((*p_sig)->info.si_signo != 0)
3653 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3660 if (the_low_target.prepare_to_resume != NULL)
3661 the_low_target.prepare_to_resume (lwp);
3663 regcache_invalidate_thread (thread);
3665 lwp->stepping = step;
3666 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
3667 (PTRACE_TYPE_ARG3) 0,
3668 /* Coerce to a uintptr_t first to avoid potential gcc warning
3669 of coercing an 8 byte integer to a 4 byte pointer. */
3670 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3672 current_thread = saved_thread;
3674 perror_with_name ("resuming thread");
3676 /* Successfully resumed. Clear state that no longer makes sense,
3677 and mark the LWP as running. Must not do this before resuming
3678 otherwise if that fails other code will be confused. E.g., we'd
3679 later try to stop the LWP and hang forever waiting for a stop
3680 status. Note that we must not throw after this is cleared,
3681 otherwise handle_zombie_lwp_error would get confused. */
3683 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3686 /* Called when we try to resume a stopped LWP and that errors out. If
3687 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
3688 or about to become), discard the error, clear any pending status
3689 the LWP may have, and return true (we'll collect the exit status
3690 soon enough). Otherwise, return false. */
3693 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
3695 struct thread_info *thread = get_lwp_thread (lp);
3697 /* If we get an error after resuming the LWP successfully, we'd
3698 confuse !T state for the LWP being gone. */
3699 gdb_assert (lp->stopped);
3701 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
3702 because even if ptrace failed with ESRCH, the tracee may be "not
3703 yet fully dead", but already refusing ptrace requests. In that
3704 case the tracee has 'R (Running)' state for a little bit
3705 (observed in Linux 3.18). See also the note on ESRCH in the
3706 ptrace(2) man page. Instead, check whether the LWP has any state
3707 other than ptrace-stopped. */
3709 /* Don't assume anything if /proc/PID/status can't be read. */
3710 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3712 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3713 lp->status_pending_p = 0;
3719 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
3720 disappears while we try to resume it. */
3723 linux_resume_one_lwp (struct lwp_info *lwp,
3724 int step, int signal, siginfo_t *info)
3728 linux_resume_one_lwp_throw (lwp, step, signal, info);
3730 CATCH (ex, RETURN_MASK_ERROR)
3732 if (!check_ptrace_stopped_lwp_gone (lwp))
3733 throw_exception (ex);
3738 struct thread_resume_array
3740 struct thread_resume *resume;
3744 /* This function is called once per thread via find_inferior.
3745 ARG is a pointer to a thread_resume_array struct.
3746 We look up the thread specified by ENTRY in ARG, and mark the thread
3747 with a pointer to the appropriate resume request.
3749 This algorithm is O(threads * resume elements), but resume elements
3750 is small (and will remain small at least until GDB supports thread
3754 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3756 struct thread_info *thread = (struct thread_info *) entry;
3757 struct lwp_info *lwp = get_thread_lwp (thread);
3759 struct thread_resume_array *r;
3763 for (ndx = 0; ndx < r->n; ndx++)
3765 ptid_t ptid = r->resume[ndx].thread;
3766 if (ptid_equal (ptid, minus_one_ptid)
3767 || ptid_equal (ptid, entry->id)
3768 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3770 || (ptid_get_pid (ptid) == pid_of (thread)
3771 && (ptid_is_pid (ptid)
3772 || ptid_get_lwp (ptid) == -1)))
3774 if (r->resume[ndx].kind == resume_stop
3775 && thread->last_resume_kind == resume_stop)
3778 debug_printf ("already %s LWP %ld at GDB's request\n",
3779 (thread->last_status.kind
3780 == TARGET_WAITKIND_STOPPED)
3788 lwp->resume = &r->resume[ndx];
3789 thread->last_resume_kind = lwp->resume->kind;
3791 lwp->step_range_start = lwp->resume->step_range_start;
3792 lwp->step_range_end = lwp->resume->step_range_end;
3794 /* If we had a deferred signal to report, dequeue one now.
3795 This can happen if LWP gets more than one signal while
3796 trying to get out of a jump pad. */
3798 && !lwp->status_pending_p
3799 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3801 lwp->status_pending_p = 1;
3804 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3805 "leaving status pending.\n",
3806 WSTOPSIG (lwp->status_pending),
3814 /* No resume action for this thread. */
3820 /* find_inferior callback for linux_resume.
3821 Set *FLAG_P if this lwp has an interesting status pending. */
3824 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3826 struct thread_info *thread = (struct thread_info *) entry;
3827 struct lwp_info *lwp = get_thread_lwp (thread);
3829 /* LWPs which will not be resumed are not interesting, because
3830 we might not wait for them next time through linux_wait. */
3831 if (lwp->resume == NULL)
3834 if (thread_still_has_status_pending_p (thread))
3835 * (int *) flag_p = 1;
3840 /* Return 1 if this lwp that GDB wants running is stopped at an
3841 internal breakpoint that we need to step over. It assumes that any
3842 required STOP_PC adjustment has already been propagated to the
3843 inferior's regcache. */
3846 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3848 struct thread_info *thread = (struct thread_info *) entry;
3849 struct lwp_info *lwp = get_thread_lwp (thread);
3850 struct thread_info *saved_thread;
3853 /* LWPs which will not be resumed are not interesting, because we
3854 might not wait for them next time through linux_wait. */
3859 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
3864 if (thread->last_resume_kind == resume_stop)
3867 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
3873 gdb_assert (lwp->suspended >= 0);
3878 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
3883 if (!lwp->need_step_over)
3886 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
3889 if (lwp->status_pending_p)
3892 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
3898 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3902 /* If the PC has changed since we stopped, then don't do anything,
3903 and let the breakpoint/tracepoint be hit. This happens if, for
3904 instance, GDB handled the decr_pc_after_break subtraction itself,
3905 GDB is OOL stepping this thread, or the user has issued a "jump"
3906 command, or poked thread's registers herself. */
3907 if (pc != lwp->stop_pc)
3910 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
3911 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3913 paddress (lwp->stop_pc), paddress (pc));
3915 lwp->need_step_over = 0;
3919 saved_thread = current_thread;
3920 current_thread = thread;
3922 /* We can only step over breakpoints we know about. */
3923 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3925 /* Don't step over a breakpoint that GDB expects to hit
3926 though. If the condition is being evaluated on the target's side
3927 and it evaluate to false, step over this breakpoint as well. */
3928 if (gdb_breakpoint_here (pc)
3929 && gdb_condition_true_at_breakpoint (pc)
3930 && gdb_no_commands_at_breakpoint (pc))
3933 debug_printf ("Need step over [LWP %ld]? yes, but found"
3934 " GDB breakpoint at 0x%s; skipping step over\n",
3935 lwpid_of (thread), paddress (pc));
3937 current_thread = saved_thread;
3943 debug_printf ("Need step over [LWP %ld]? yes, "
3944 "found breakpoint at 0x%s\n",
3945 lwpid_of (thread), paddress (pc));
3947 /* We've found an lwp that needs stepping over --- return 1 so
3948 that find_inferior stops looking. */
3949 current_thread = saved_thread;
3951 /* If the step over is cancelled, this is set again. */
3952 lwp->need_step_over = 0;
3957 current_thread = saved_thread;
3960 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
3962 lwpid_of (thread), paddress (pc));
3967 /* Start a step-over operation on LWP. When LWP stopped at a
3968 breakpoint, to make progress, we need to remove the breakpoint out
3969 of the way. If we let other threads run while we do that, they may
3970 pass by the breakpoint location and miss hitting it. To avoid
3971 that, a step-over momentarily stops all threads while LWP is
3972 single-stepped while the breakpoint is temporarily uninserted from
3973 the inferior. When the single-step finishes, we reinsert the
3974 breakpoint, and let all threads that are supposed to be running,
3977 On targets that don't support hardware single-step, we don't
3978 currently support full software single-stepping. Instead, we only
3979 support stepping over the thread event breakpoint, by asking the
3980 low target where to place a reinsert breakpoint. Since this
3981 routine assumes the breakpoint being stepped over is a thread event
3982 breakpoint, it usually assumes the return address of the current
3983 function is a good enough place to set the reinsert breakpoint. */
3986 start_step_over (struct lwp_info *lwp)
3988 struct thread_info *thread = get_lwp_thread (lwp);
3989 struct thread_info *saved_thread;
3994 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
3997 stop_all_lwps (1, lwp);
3998 gdb_assert (lwp->suspended == 0);
4001 debug_printf ("Done stopping all threads for step-over.\n");
4003 /* Note, we should always reach here with an already adjusted PC,
4004 either by GDB (if we're resuming due to GDB's request), or by our
4005 caller, if we just finished handling an internal breakpoint GDB
4006 shouldn't care about. */
4009 saved_thread = current_thread;
4010 current_thread = thread;
4012 lwp->bp_reinsert = pc;
4013 uninsert_breakpoints_at (pc);
4014 uninsert_fast_tracepoint_jumps_at (pc);
4016 if (can_hardware_single_step ())
4022 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
4023 set_reinsert_breakpoint (raddr);
4027 current_thread = saved_thread;
4029 linux_resume_one_lwp (lwp, step, 0, NULL);
4031 /* Require next event from this LWP. */
4032 step_over_bkpt = thread->entry.id;
4036 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4037 start_step_over, if still there, and delete any reinsert
4038 breakpoints we've set, on non hardware single-step targets. */
4041 finish_step_over (struct lwp_info *lwp)
4043 if (lwp->bp_reinsert != 0)
4046 debug_printf ("Finished step over.\n");
4048 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4049 may be no breakpoint to reinsert there by now. */
4050 reinsert_breakpoints_at (lwp->bp_reinsert);
4051 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4053 lwp->bp_reinsert = 0;
4055 /* Delete any software-single-step reinsert breakpoints. No
4056 longer needed. We don't have to worry about other threads
4057 hitting this trap, and later not being able to explain it,
4058 because we were stepping over a breakpoint, and we hold all
4059 threads but LWP stopped while doing that. */
4060 if (!can_hardware_single_step ())
4061 delete_reinsert_breakpoints ();
4063 step_over_bkpt = null_ptid;
4070 /* This function is called once per thread. We check the thread's resume
4071 request, which will tell us whether to resume, step, or leave the thread
4072 stopped; and what signal, if any, it should be sent.
4074 For threads which we aren't explicitly told otherwise, we preserve
4075 the stepping flag; this is used for stepping over gdbserver-placed
4078 If pending_flags was set in any thread, we queue any needed
4079 signals, since we won't actually resume. We already have a pending
4080 event to report, so we don't need to preserve any step requests;
4081 they should be re-issued if necessary. */
4084 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4086 struct thread_info *thread = (struct thread_info *) entry;
4087 struct lwp_info *lwp = get_thread_lwp (thread);
4089 int leave_all_stopped = * (int *) arg;
4092 if (lwp->resume == NULL)
4095 if (lwp->resume->kind == resume_stop)
4098 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4103 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4105 /* Stop the thread, and wait for the event asynchronously,
4106 through the event loop. */
4112 debug_printf ("already stopped LWP %ld\n",
4115 /* The LWP may have been stopped in an internal event that
4116 was not meant to be notified back to GDB (e.g., gdbserver
4117 breakpoint), so we should be reporting a stop event in
4120 /* If the thread already has a pending SIGSTOP, this is a
4121 no-op. Otherwise, something later will presumably resume
4122 the thread and this will cause it to cancel any pending
4123 operation, due to last_resume_kind == resume_stop. If
4124 the thread already has a pending status to report, we
4125 will still report it the next time we wait - see
4126 status_pending_p_callback. */
4128 /* If we already have a pending signal to report, then
4129 there's no need to queue a SIGSTOP, as this means we're
4130 midway through moving the LWP out of the jumppad, and we
4131 will report the pending signal as soon as that is
4133 if (lwp->pending_signals_to_report == NULL)
4137 /* For stop requests, we're done. */
4139 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4143 /* If this thread which is about to be resumed has a pending status,
4144 then don't resume any threads - we can just report the pending
4145 status. Make sure to queue any signals that would otherwise be
4146 sent. In all-stop mode, we do this decision based on if *any*
4147 thread has a pending status. If there's a thread that needs the
4148 step-over-breakpoint dance, then don't resume any other thread
4149 but that particular one. */
4150 leave_pending = (lwp->status_pending_p || leave_all_stopped);
4155 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4157 step = (lwp->resume->kind == resume_step);
4158 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
4163 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4165 /* If we have a new signal, enqueue the signal. */
4166 if (lwp->resume->sig != 0)
4168 struct pending_signals *p_sig;
4169 p_sig = xmalloc (sizeof (*p_sig));
4170 p_sig->prev = lwp->pending_signals;
4171 p_sig->signal = lwp->resume->sig;
4172 memset (&p_sig->info, 0, sizeof (siginfo_t));
4174 /* If this is the same signal we were previously stopped by,
4175 make sure to queue its siginfo. We can ignore the return
4176 value of ptrace; if it fails, we'll skip
4177 PTRACE_SETSIGINFO. */
4178 if (WIFSTOPPED (lwp->last_status)
4179 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
4180 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4183 lwp->pending_signals = p_sig;
4187 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4193 linux_resume (struct thread_resume *resume_info, size_t n)
4195 struct thread_resume_array array = { resume_info, n };
4196 struct thread_info *need_step_over = NULL;
4198 int leave_all_stopped;
4203 debug_printf ("linux_resume:\n");
4206 find_inferior (&all_threads, linux_set_resume_request, &array);
4208 /* If there is a thread which would otherwise be resumed, which has
4209 a pending status, then don't resume any threads - we can just
4210 report the pending status. Make sure to queue any signals that
4211 would otherwise be sent. In non-stop mode, we'll apply this
4212 logic to each thread individually. We consume all pending events
4213 before considering to start a step-over (in all-stop). */
4216 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
4218 /* If there is a thread which would otherwise be resumed, which is
4219 stopped at a breakpoint that needs stepping over, then don't
4220 resume any threads - have it step over the breakpoint with all
4221 other threads stopped, then resume all threads again. Make sure
4222 to queue any signals that would otherwise be delivered or
4224 if (!any_pending && supports_breakpoints ())
4226 = (struct thread_info *) find_inferior (&all_threads,
4227 need_step_over_p, NULL);
4229 leave_all_stopped = (need_step_over != NULL || any_pending);
4233 if (need_step_over != NULL)
4234 debug_printf ("Not resuming all, need step over\n");
4235 else if (any_pending)
4236 debug_printf ("Not resuming, all-stop and found "
4237 "an LWP with pending status\n");
4239 debug_printf ("Resuming, no pending status or step over needed\n");
4242 /* Even if we're leaving threads stopped, queue all signals we'd
4243 otherwise deliver. */
4244 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4247 start_step_over (get_thread_lwp (need_step_over));
4251 debug_printf ("linux_resume done\n");
4256 /* This function is called once per thread. We check the thread's
4257 last resume request, which will tell us whether to resume, step, or
4258 leave the thread stopped. Any signal the client requested to be
4259 delivered has already been enqueued at this point.
4261 If any thread that GDB wants running is stopped at an internal
4262 breakpoint that needs stepping over, we start a step-over operation
4263 on that particular thread, and leave all others stopped. */
4266 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4268 struct thread_info *thread = (struct thread_info *) entry;
4269 struct lwp_info *lwp = get_thread_lwp (thread);
4276 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4281 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4285 if (thread->last_resume_kind == resume_stop
4286 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4289 debug_printf (" client wants LWP to remain %ld stopped\n",
4294 if (lwp->status_pending_p)
4297 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4302 gdb_assert (lwp->suspended >= 0);
4307 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4311 if (thread->last_resume_kind == resume_stop
4312 && lwp->pending_signals_to_report == NULL
4313 && lwp->collecting_fast_tracepoint == 0)
4315 /* We haven't reported this LWP as stopped yet (otherwise, the
4316 last_status.kind check above would catch it, and we wouldn't
4317 reach here. This LWP may have been momentarily paused by a
4318 stop_all_lwps call while handling for example, another LWP's
4319 step-over. In that case, the pending expected SIGSTOP signal
4320 that was queued at vCont;t handling time will have already
4321 been consumed by wait_for_sigstop, and so we need to requeue
4322 another one here. Note that if the LWP already has a SIGSTOP
4323 pending, this is a no-op. */
4326 debug_printf ("Client wants LWP %ld to stop. "
4327 "Making sure it has a SIGSTOP pending\n",
4333 step = thread->last_resume_kind == resume_step;
4334 linux_resume_one_lwp (lwp, step, 0, NULL);
4339 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4341 struct thread_info *thread = (struct thread_info *) entry;
4342 struct lwp_info *lwp = get_thread_lwp (thread);
4348 gdb_assert (lwp->suspended >= 0);
4350 return proceed_one_lwp (entry, except);
4353 /* When we finish a step-over, set threads running again. If there's
4354 another thread that may need a step-over, now's the time to start
4355 it. Eventually, we'll move all threads past their breakpoints. */
4358 proceed_all_lwps (void)
4360 struct thread_info *need_step_over;
4362 /* If there is a thread which would otherwise be resumed, which is
4363 stopped at a breakpoint that needs stepping over, then don't
4364 resume any threads - have it step over the breakpoint with all
4365 other threads stopped, then resume all threads again. */
4367 if (supports_breakpoints ())
4370 = (struct thread_info *) find_inferior (&all_threads,
4371 need_step_over_p, NULL);
4373 if (need_step_over != NULL)
4376 debug_printf ("proceed_all_lwps: found "
4377 "thread %ld needing a step-over\n",
4378 lwpid_of (need_step_over));
4380 start_step_over (get_thread_lwp (need_step_over));
4386 debug_printf ("Proceeding, no step-over needed\n");
4388 find_inferior (&all_threads, proceed_one_lwp, NULL);
4391 /* Stopped LWPs that the client wanted to be running, that don't have
4392 pending statuses, are set to run again, except for EXCEPT, if not
4393 NULL. This undoes a stop_all_lwps call. */
4396 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4402 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4403 lwpid_of (get_lwp_thread (except)));
4405 debug_printf ("unstopping all lwps\n");
4409 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4411 find_inferior (&all_threads, proceed_one_lwp, except);
4415 debug_printf ("unstop_all_lwps done\n");
4421 #ifdef HAVE_LINUX_REGSETS
4423 #define use_linux_regsets 1
4425 /* Returns true if REGSET has been disabled. */
4428 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4430 return (info->disabled_regsets != NULL
4431 && info->disabled_regsets[regset - info->regsets]);
4434 /* Disable REGSET. */
4437 disable_regset (struct regsets_info *info, struct regset_info *regset)
4441 dr_offset = regset - info->regsets;
4442 if (info->disabled_regsets == NULL)
4443 info->disabled_regsets = xcalloc (1, info->num_regsets);
4444 info->disabled_regsets[dr_offset] = 1;
4448 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4449 struct regcache *regcache)
4451 struct regset_info *regset;
4452 int saw_general_regs = 0;
4456 pid = lwpid_of (current_thread);
4457 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4462 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4465 buf = xmalloc (regset->size);
4467 nt_type = regset->nt_type;
4471 iov.iov_len = regset->size;
4472 data = (void *) &iov;
4478 res = ptrace (regset->get_request, pid,
4479 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4481 res = ptrace (regset->get_request, pid, data, nt_type);
4487 /* If we get EIO on a regset, do not try it again for
4488 this process mode. */
4489 disable_regset (regsets_info, regset);
4491 else if (errno == ENODATA)
4493 /* ENODATA may be returned if the regset is currently
4494 not "active". This can happen in normal operation,
4495 so suppress the warning in this case. */
4500 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4507 if (regset->type == GENERAL_REGS)
4508 saw_general_regs = 1;
4509 regset->store_function (regcache, buf);
4513 if (saw_general_regs)
4520 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4521 struct regcache *regcache)
4523 struct regset_info *regset;
4524 int saw_general_regs = 0;
4528 pid = lwpid_of (current_thread);
4529 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4534 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4535 || regset->fill_function == NULL)
4538 buf = xmalloc (regset->size);
4540 /* First fill the buffer with the current register set contents,
4541 in case there are any items in the kernel's regset that are
4542 not in gdbserver's regcache. */
4544 nt_type = regset->nt_type;
4548 iov.iov_len = regset->size;
4549 data = (void *) &iov;
4555 res = ptrace (regset->get_request, pid,
4556 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4558 res = ptrace (regset->get_request, pid, data, nt_type);
4563 /* Then overlay our cached registers on that. */
4564 regset->fill_function (regcache, buf);
4566 /* Only now do we write the register set. */
4568 res = ptrace (regset->set_request, pid,
4569 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4571 res = ptrace (regset->set_request, pid, data, nt_type);
4579 /* If we get EIO on a regset, do not try it again for
4580 this process mode. */
4581 disable_regset (regsets_info, regset);
4583 else if (errno == ESRCH)
4585 /* At this point, ESRCH should mean the process is
4586 already gone, in which case we simply ignore attempts
4587 to change its registers. See also the related
4588 comment in linux_resume_one_lwp. */
4594 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4597 else if (regset->type == GENERAL_REGS)
4598 saw_general_regs = 1;
4601 if (saw_general_regs)
4607 #else /* !HAVE_LINUX_REGSETS */
4609 #define use_linux_regsets 0
4610 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4611 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4615 /* Return 1 if register REGNO is supported by one of the regset ptrace
4616 calls or 0 if it has to be transferred individually. */
4619 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4621 unsigned char mask = 1 << (regno % 8);
4622 size_t index = regno / 8;
4624 return (use_linux_regsets
4625 && (regs_info->regset_bitmap == NULL
4626 || (regs_info->regset_bitmap[index] & mask) != 0));
4629 #ifdef HAVE_LINUX_USRREGS
4632 register_addr (const struct usrregs_info *usrregs, int regnum)
4636 if (regnum < 0 || regnum >= usrregs->num_regs)
4637 error ("Invalid register number %d.", regnum);
4639 addr = usrregs->regmap[regnum];
4644 /* Fetch one register. */
4646 fetch_register (const struct usrregs_info *usrregs,
4647 struct regcache *regcache, int regno)
4654 if (regno >= usrregs->num_regs)
4656 if ((*the_low_target.cannot_fetch_register) (regno))
4659 regaddr = register_addr (usrregs, regno);
4663 size = ((register_size (regcache->tdesc, regno)
4664 + sizeof (PTRACE_XFER_TYPE) - 1)
4665 & -sizeof (PTRACE_XFER_TYPE));
4666 buf = alloca (size);
4668 pid = lwpid_of (current_thread);
4669 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4672 *(PTRACE_XFER_TYPE *) (buf + i) =
4673 ptrace (PTRACE_PEEKUSER, pid,
4674 /* Coerce to a uintptr_t first to avoid potential gcc warning
4675 of coercing an 8 byte integer to a 4 byte pointer. */
4676 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
4677 regaddr += sizeof (PTRACE_XFER_TYPE);
4679 error ("reading register %d: %s", regno, strerror (errno));
4682 if (the_low_target.supply_ptrace_register)
4683 the_low_target.supply_ptrace_register (regcache, regno, buf);
4685 supply_register (regcache, regno, buf);
4688 /* Store one register. */
4690 store_register (const struct usrregs_info *usrregs,
4691 struct regcache *regcache, int regno)
4698 if (regno >= usrregs->num_regs)
4700 if ((*the_low_target.cannot_store_register) (regno))
4703 regaddr = register_addr (usrregs, regno);
4707 size = ((register_size (regcache->tdesc, regno)
4708 + sizeof (PTRACE_XFER_TYPE) - 1)
4709 & -sizeof (PTRACE_XFER_TYPE));
4710 buf = alloca (size);
4711 memset (buf, 0, size);
4713 if (the_low_target.collect_ptrace_register)
4714 the_low_target.collect_ptrace_register (regcache, regno, buf);
4716 collect_register (regcache, regno, buf);
4718 pid = lwpid_of (current_thread);
4719 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4722 ptrace (PTRACE_POKEUSER, pid,
4723 /* Coerce to a uintptr_t first to avoid potential gcc warning
4724 about coercing an 8 byte integer to a 4 byte pointer. */
4725 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4726 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
4729 /* At this point, ESRCH should mean the process is
4730 already gone, in which case we simply ignore attempts
4731 to change its registers. See also the related
4732 comment in linux_resume_one_lwp. */
4736 if ((*the_low_target.cannot_store_register) (regno) == 0)
4737 error ("writing register %d: %s", regno, strerror (errno));
4739 regaddr += sizeof (PTRACE_XFER_TYPE);
4743 /* Fetch all registers, or just one, from the child process.
4744 If REGNO is -1, do this for all registers, skipping any that are
4745 assumed to have been retrieved by regsets_fetch_inferior_registers,
4746 unless ALL is non-zero.
4747 Otherwise, REGNO specifies which register (so we can save time). */
4749 usr_fetch_inferior_registers (const struct regs_info *regs_info,
4750 struct regcache *regcache, int regno, int all)
4752 struct usrregs_info *usr = regs_info->usrregs;
4756 for (regno = 0; regno < usr->num_regs; regno++)
4757 if (all || !linux_register_in_regsets (regs_info, regno))
4758 fetch_register (usr, regcache, regno);
4761 fetch_register (usr, regcache, regno);
4764 /* Store our register values back into the inferior.
4765 If REGNO is -1, do this for all registers, skipping any that are
4766 assumed to have been saved by regsets_store_inferior_registers,
4767 unless ALL is non-zero.
4768 Otherwise, REGNO specifies which register (so we can save time). */
4770 usr_store_inferior_registers (const struct regs_info *regs_info,
4771 struct regcache *regcache, int regno, int all)
4773 struct usrregs_info *usr = regs_info->usrregs;
4777 for (regno = 0; regno < usr->num_regs; regno++)
4778 if (all || !linux_register_in_regsets (regs_info, regno))
4779 store_register (usr, regcache, regno);
4782 store_register (usr, regcache, regno);
4785 #else /* !HAVE_LINUX_USRREGS */
4787 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4788 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4794 linux_fetch_registers (struct regcache *regcache, int regno)
4798 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4802 if (the_low_target.fetch_register != NULL
4803 && regs_info->usrregs != NULL)
4804 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
4805 (*the_low_target.fetch_register) (regcache, regno);
4807 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4808 if (regs_info->usrregs != NULL)
4809 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
4813 if (the_low_target.fetch_register != NULL
4814 && (*the_low_target.fetch_register) (regcache, regno))
4817 use_regsets = linux_register_in_regsets (regs_info, regno);
4819 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4821 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4822 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
4827 linux_store_registers (struct regcache *regcache, int regno)
4831 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4835 all = regsets_store_inferior_registers (regs_info->regsets_info,
4837 if (regs_info->usrregs != NULL)
4838 usr_store_inferior_registers (regs_info, regcache, regno, all);
4842 use_regsets = linux_register_in_regsets (regs_info, regno);
4844 all = regsets_store_inferior_registers (regs_info->regsets_info,
4846 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4847 usr_store_inferior_registers (regs_info, regcache, regno, 1);
4852 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4853 to debugger memory starting at MYADDR. */
4856 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4858 int pid = lwpid_of (current_thread);
4859 register PTRACE_XFER_TYPE *buffer;
4860 register CORE_ADDR addr;
4867 /* Try using /proc. Don't bother for one word. */
4868 if (len >= 3 * sizeof (long))
4872 /* We could keep this file open and cache it - possibly one per
4873 thread. That requires some juggling, but is even faster. */
4874 sprintf (filename, "/proc/%d/mem", pid);
4875 fd = open (filename, O_RDONLY | O_LARGEFILE);
4879 /* If pread64 is available, use it. It's faster if the kernel
4880 supports it (only one syscall), and it's 64-bit safe even on
4881 32-bit platforms (for instance, SPARC debugging a SPARC64
4884 bytes = pread64 (fd, myaddr, len, memaddr);
4887 if (lseek (fd, memaddr, SEEK_SET) != -1)
4888 bytes = read (fd, myaddr, len);
4895 /* Some data was read, we'll try to get the rest with ptrace. */
4905 /* Round starting address down to longword boundary. */
4906 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4907 /* Round ending address up; get number of longwords that makes. */
4908 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4909 / sizeof (PTRACE_XFER_TYPE));
4910 /* Allocate buffer of that many longwords. */
4911 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4913 /* Read all the longwords */
4915 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4917 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4918 about coercing an 8 byte integer to a 4 byte pointer. */
4919 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4920 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4921 (PTRACE_TYPE_ARG4) 0);
4927 /* Copy appropriate bytes out of the buffer. */
4930 i *= sizeof (PTRACE_XFER_TYPE);
4931 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
4933 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4940 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4941 memory at MEMADDR. On failure (cannot write to the inferior)
4942 returns the value of errno. Always succeeds if LEN is zero. */
4945 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4948 /* Round starting address down to longword boundary. */
4949 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4950 /* Round ending address up; get number of longwords that makes. */
4952 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4953 / sizeof (PTRACE_XFER_TYPE);
4955 /* Allocate buffer of that many longwords. */
4956 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4957 alloca (count * sizeof (PTRACE_XFER_TYPE));
4959 int pid = lwpid_of (current_thread);
4963 /* Zero length write always succeeds. */
4969 /* Dump up to four bytes. */
4970 unsigned int val = * (unsigned int *) myaddr;
4976 val = val & 0xffffff;
4977 debug_printf ("Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4978 val, (long)memaddr);
4981 /* Fill start and end extra bytes of buffer with existing memory data. */
4984 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4985 about coercing an 8 byte integer to a 4 byte pointer. */
4986 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4987 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4988 (PTRACE_TYPE_ARG4) 0);
4996 = ptrace (PTRACE_PEEKTEXT, pid,
4997 /* Coerce to a uintptr_t first to avoid potential gcc warning
4998 about coercing an 8 byte integer to a 4 byte pointer. */
4999 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5000 * sizeof (PTRACE_XFER_TYPE)),
5001 (PTRACE_TYPE_ARG4) 0);
5006 /* Copy data to be written over corresponding part of buffer. */
5008 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5011 /* Write the entire buffer. */
5013 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5016 ptrace (PTRACE_POKETEXT, pid,
5017 /* Coerce to a uintptr_t first to avoid potential gcc warning
5018 about coercing an 8 byte integer to a 4 byte pointer. */
5019 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5020 (PTRACE_TYPE_ARG4) buffer[i]);
5029 linux_look_up_symbols (void)
5031 #ifdef USE_THREAD_DB
5032 struct process_info *proc = current_process ();
5034 if (proc->priv->thread_db != NULL)
5037 /* If the kernel supports tracing clones, then we don't need to
5038 use the magic thread event breakpoint to learn about
5040 thread_db_init (!linux_supports_traceclone ());
5045 linux_request_interrupt (void)
5047 extern unsigned long signal_pid;
5049 /* Send a SIGINT to the process group. This acts just like the user
5050 typed a ^C on the controlling terminal. */
5051 kill (-signal_pid, SIGINT);
5054 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5055 to debugger memory starting at MYADDR. */
5058 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5060 char filename[PATH_MAX];
5062 int pid = lwpid_of (current_thread);
5064 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5066 fd = open (filename, O_RDONLY);
5070 if (offset != (CORE_ADDR) 0
5071 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5074 n = read (fd, myaddr, len);
5081 /* These breakpoint and watchpoint related wrapper functions simply
5082 pass on the function call if the target has registered a
5083 corresponding function. */
5086 linux_supports_z_point_type (char z_type)
5088 return (the_low_target.supports_z_point_type != NULL
5089 && the_low_target.supports_z_point_type (z_type));
5093 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5094 int size, struct raw_breakpoint *bp)
5096 if (the_low_target.insert_point != NULL)
5097 return the_low_target.insert_point (type, addr, size, bp);
5099 /* Unsupported (see target.h). */
5104 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5105 int size, struct raw_breakpoint *bp)
5107 if (the_low_target.remove_point != NULL)
5108 return the_low_target.remove_point (type, addr, size, bp);
5110 /* Unsupported (see target.h). */
5114 /* Implement the to_stopped_by_sw_breakpoint target_ops
5118 linux_stopped_by_sw_breakpoint (void)
5120 struct lwp_info *lwp = get_thread_lwp (current_thread);
5122 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5125 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5129 linux_supports_stopped_by_sw_breakpoint (void)
5131 return USE_SIGTRAP_SIGINFO;
5134 /* Implement the to_stopped_by_hw_breakpoint target_ops
5138 linux_stopped_by_hw_breakpoint (void)
5140 struct lwp_info *lwp = get_thread_lwp (current_thread);
5142 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5145 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5149 linux_supports_stopped_by_hw_breakpoint (void)
5151 return USE_SIGTRAP_SIGINFO;
5155 linux_stopped_by_watchpoint (void)
5157 struct lwp_info *lwp = get_thread_lwp (current_thread);
5159 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5163 linux_stopped_data_address (void)
5165 struct lwp_info *lwp = get_thread_lwp (current_thread);
5167 return lwp->stopped_data_address;
5170 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5171 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5172 && defined(PT_TEXT_END_ADDR)
5174 /* This is only used for targets that define PT_TEXT_ADDR,
5175 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5176 the target has different ways of acquiring this information, like
5179 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5180 to tell gdb about. */
5183 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5185 unsigned long text, text_end, data;
5186 int pid = lwpid_of (get_thread_lwp (current_thread));
5190 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5191 (PTRACE_TYPE_ARG4) 0);
5192 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5193 (PTRACE_TYPE_ARG4) 0);
5194 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5195 (PTRACE_TYPE_ARG4) 0);
5199 /* Both text and data offsets produced at compile-time (and so
5200 used by gdb) are relative to the beginning of the program,
5201 with the data segment immediately following the text segment.
5202 However, the actual runtime layout in memory may put the data
5203 somewhere else, so when we send gdb a data base-address, we
5204 use the real data base address and subtract the compile-time
5205 data base-address from it (which is just the length of the
5206 text segment). BSS immediately follows data in both
5209 *data_p = data - (text_end - text);
5218 linux_qxfer_osdata (const char *annex,
5219 unsigned char *readbuf, unsigned const char *writebuf,
5220 CORE_ADDR offset, int len)
5222 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5225 /* Convert a native/host siginfo object, into/from the siginfo in the
5226 layout of the inferiors' architecture. */
5229 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
5233 if (the_low_target.siginfo_fixup != NULL)
5234 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5236 /* If there was no callback, or the callback didn't do anything,
5237 then just do a straight memcpy. */
5241 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5243 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5248 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5249 unsigned const char *writebuf, CORE_ADDR offset, int len)
5253 char inf_siginfo[sizeof (siginfo_t)];
5255 if (current_thread == NULL)
5258 pid = lwpid_of (current_thread);
5261 debug_printf ("%s siginfo for lwp %d.\n",
5262 readbuf != NULL ? "Reading" : "Writing",
5265 if (offset >= sizeof (siginfo))
5268 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5271 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5272 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5273 inferior with a 64-bit GDBSERVER should look the same as debugging it
5274 with a 32-bit GDBSERVER, we need to convert it. */
5275 siginfo_fixup (&siginfo, inf_siginfo, 0);
5277 if (offset + len > sizeof (siginfo))
5278 len = sizeof (siginfo) - offset;
5280 if (readbuf != NULL)
5281 memcpy (readbuf, inf_siginfo + offset, len);
5284 memcpy (inf_siginfo + offset, writebuf, len);
5286 /* Convert back to ptrace layout before flushing it out. */
5287 siginfo_fixup (&siginfo, inf_siginfo, 1);
5289 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5296 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5297 so we notice when children change state; as the handler for the
5298 sigsuspend in my_waitpid. */
5301 sigchld_handler (int signo)
5303 int old_errno = errno;
5309 /* fprintf is not async-signal-safe, so call write
5311 if (write (2, "sigchld_handler\n",
5312 sizeof ("sigchld_handler\n") - 1) < 0)
5313 break; /* just ignore */
5317 if (target_is_async_p ())
5318 async_file_mark (); /* trigger a linux_wait */
5324 linux_supports_non_stop (void)
5330 linux_async (int enable)
5332 int previous = target_is_async_p ();
5335 debug_printf ("linux_async (%d), previous=%d\n",
5338 if (previous != enable)
5341 sigemptyset (&mask);
5342 sigaddset (&mask, SIGCHLD);
5344 sigprocmask (SIG_BLOCK, &mask, NULL);
5348 if (pipe (linux_event_pipe) == -1)
5350 linux_event_pipe[0] = -1;
5351 linux_event_pipe[1] = -1;
5352 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5354 warning ("creating event pipe failed.");
5358 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5359 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5361 /* Register the event loop handler. */
5362 add_file_handler (linux_event_pipe[0],
5363 handle_target_event, NULL);
5365 /* Always trigger a linux_wait. */
5370 delete_file_handler (linux_event_pipe[0]);
5372 close (linux_event_pipe[0]);
5373 close (linux_event_pipe[1]);
5374 linux_event_pipe[0] = -1;
5375 linux_event_pipe[1] = -1;
5378 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5385 linux_start_non_stop (int nonstop)
5387 /* Register or unregister from event-loop accordingly. */
5388 linux_async (nonstop);
5390 if (target_is_async_p () != (nonstop != 0))
5397 linux_supports_multi_process (void)
5403 linux_supports_disable_randomization (void)
5405 #ifdef HAVE_PERSONALITY
5413 linux_supports_agent (void)
5419 linux_supports_range_stepping (void)
5421 if (*the_low_target.supports_range_stepping == NULL)
5424 return (*the_low_target.supports_range_stepping) ();
5427 /* Enumerate spufs IDs for process PID. */
5429 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5435 struct dirent *entry;
5437 sprintf (path, "/proc/%ld/fd", pid);
5438 dir = opendir (path);
5443 while ((entry = readdir (dir)) != NULL)
5449 fd = atoi (entry->d_name);
5453 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5454 if (stat (path, &st) != 0)
5456 if (!S_ISDIR (st.st_mode))
5459 if (statfs (path, &stfs) != 0)
5461 if (stfs.f_type != SPUFS_MAGIC)
5464 if (pos >= offset && pos + 4 <= offset + len)
5466 *(unsigned int *)(buf + pos - offset) = fd;
5476 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5477 object type, using the /proc file system. */
5479 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5480 unsigned const char *writebuf,
5481 CORE_ADDR offset, int len)
5483 long pid = lwpid_of (current_thread);
5488 if (!writebuf && !readbuf)
5496 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5499 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5500 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5505 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5512 ret = write (fd, writebuf, (size_t) len);
5514 ret = read (fd, readbuf, (size_t) len);
5520 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5521 struct target_loadseg
5523 /* Core address to which the segment is mapped. */
5525 /* VMA recorded in the program header. */
5527 /* Size of this segment in memory. */
5531 # if defined PT_GETDSBT
5532 struct target_loadmap
5534 /* Protocol version number, must be zero. */
5536 /* Pointer to the DSBT table, its size, and the DSBT index. */
5537 unsigned *dsbt_table;
5538 unsigned dsbt_size, dsbt_index;
5539 /* Number of segments in this map. */
5541 /* The actual memory map. */
5542 struct target_loadseg segs[/*nsegs*/];
5544 # define LINUX_LOADMAP PT_GETDSBT
5545 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5546 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5548 struct target_loadmap
5550 /* Protocol version number, must be zero. */
5552 /* Number of segments in this map. */
5554 /* The actual memory map. */
5555 struct target_loadseg segs[/*nsegs*/];
5557 # define LINUX_LOADMAP PTRACE_GETFDPIC
5558 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5559 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5563 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5564 unsigned char *myaddr, unsigned int len)
5566 int pid = lwpid_of (current_thread);
5568 struct target_loadmap *data = NULL;
5569 unsigned int actual_length, copy_length;
5571 if (strcmp (annex, "exec") == 0)
5572 addr = (int) LINUX_LOADMAP_EXEC;
5573 else if (strcmp (annex, "interp") == 0)
5574 addr = (int) LINUX_LOADMAP_INTERP;
5578 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5584 actual_length = sizeof (struct target_loadmap)
5585 + sizeof (struct target_loadseg) * data->nsegs;
5587 if (offset < 0 || offset > actual_length)
5590 copy_length = actual_length - offset < len ? actual_length - offset : len;
5591 memcpy (myaddr, (char *) data + offset, copy_length);
5595 # define linux_read_loadmap NULL
5596 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5599 linux_process_qsupported (const char *query)
5601 if (the_low_target.process_qsupported != NULL)
5602 the_low_target.process_qsupported (query);
5606 linux_supports_tracepoints (void)
5608 if (*the_low_target.supports_tracepoints == NULL)
5611 return (*the_low_target.supports_tracepoints) ();
5615 linux_read_pc (struct regcache *regcache)
5617 if (the_low_target.get_pc == NULL)
5620 return (*the_low_target.get_pc) (regcache);
5624 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5626 gdb_assert (the_low_target.set_pc != NULL);
5628 (*the_low_target.set_pc) (regcache, pc);
5632 linux_thread_stopped (struct thread_info *thread)
5634 return get_thread_lwp (thread)->stopped;
5637 /* This exposes stop-all-threads functionality to other modules. */
5640 linux_pause_all (int freeze)
5642 stop_all_lwps (freeze, NULL);
5645 /* This exposes unstop-all-threads functionality to other gdbserver
5649 linux_unpause_all (int unfreeze)
5651 unstop_all_lwps (unfreeze, NULL);
5655 linux_prepare_to_access_memory (void)
5657 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5660 linux_pause_all (1);
5665 linux_done_accessing_memory (void)
5667 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5670 linux_unpause_all (1);
5674 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5675 CORE_ADDR collector,
5678 CORE_ADDR *jump_entry,
5679 CORE_ADDR *trampoline,
5680 ULONGEST *trampoline_size,
5681 unsigned char *jjump_pad_insn,
5682 ULONGEST *jjump_pad_insn_size,
5683 CORE_ADDR *adjusted_insn_addr,
5684 CORE_ADDR *adjusted_insn_addr_end,
5687 return (*the_low_target.install_fast_tracepoint_jump_pad)
5688 (tpoint, tpaddr, collector, lockaddr, orig_size,
5689 jump_entry, trampoline, trampoline_size,
5690 jjump_pad_insn, jjump_pad_insn_size,
5691 adjusted_insn_addr, adjusted_insn_addr_end,
5695 static struct emit_ops *
5696 linux_emit_ops (void)
5698 if (the_low_target.emit_ops != NULL)
5699 return (*the_low_target.emit_ops) ();
5705 linux_get_min_fast_tracepoint_insn_len (void)
5707 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5710 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5713 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5714 CORE_ADDR *phdr_memaddr, int *num_phdr)
5716 char filename[PATH_MAX];
5718 const int auxv_size = is_elf64
5719 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5720 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5722 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5724 fd = open (filename, O_RDONLY);
5730 while (read (fd, buf, auxv_size) == auxv_size
5731 && (*phdr_memaddr == 0 || *num_phdr == 0))
5735 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5737 switch (aux->a_type)
5740 *phdr_memaddr = aux->a_un.a_val;
5743 *num_phdr = aux->a_un.a_val;
5749 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5751 switch (aux->a_type)
5754 *phdr_memaddr = aux->a_un.a_val;
5757 *num_phdr = aux->a_un.a_val;
5765 if (*phdr_memaddr == 0 || *num_phdr == 0)
5767 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5768 "phdr_memaddr = %ld, phdr_num = %d",
5769 (long) *phdr_memaddr, *num_phdr);
5776 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5779 get_dynamic (const int pid, const int is_elf64)
5781 CORE_ADDR phdr_memaddr, relocation;
5783 unsigned char *phdr_buf;
5784 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5786 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5789 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5790 phdr_buf = alloca (num_phdr * phdr_size);
5792 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5795 /* Compute relocation: it is expected to be 0 for "regular" executables,
5796 non-zero for PIE ones. */
5798 for (i = 0; relocation == -1 && i < num_phdr; i++)
5801 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5803 if (p->p_type == PT_PHDR)
5804 relocation = phdr_memaddr - p->p_vaddr;
5808 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5810 if (p->p_type == PT_PHDR)
5811 relocation = phdr_memaddr - p->p_vaddr;
5814 if (relocation == -1)
5816 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5817 any real world executables, including PIE executables, have always
5818 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5819 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5820 or present DT_DEBUG anyway (fpc binaries are statically linked).
5822 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5824 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5829 for (i = 0; i < num_phdr; i++)
5833 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5835 if (p->p_type == PT_DYNAMIC)
5836 return p->p_vaddr + relocation;
5840 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5842 if (p->p_type == PT_DYNAMIC)
5843 return p->p_vaddr + relocation;
5850 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5851 can be 0 if the inferior does not yet have the library list initialized.
5852 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5853 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
5856 get_r_debug (const int pid, const int is_elf64)
5858 CORE_ADDR dynamic_memaddr;
5859 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5860 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5863 dynamic_memaddr = get_dynamic (pid, is_elf64);
5864 if (dynamic_memaddr == 0)
5867 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5871 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5872 #ifdef DT_MIPS_RLD_MAP
5876 unsigned char buf[sizeof (Elf64_Xword)];
5880 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5882 if (linux_read_memory (dyn->d_un.d_val,
5883 rld_map.buf, sizeof (rld_map.buf)) == 0)
5888 #endif /* DT_MIPS_RLD_MAP */
5890 if (dyn->d_tag == DT_DEBUG && map == -1)
5891 map = dyn->d_un.d_val;
5893 if (dyn->d_tag == DT_NULL)
5898 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5899 #ifdef DT_MIPS_RLD_MAP
5903 unsigned char buf[sizeof (Elf32_Word)];
5907 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5909 if (linux_read_memory (dyn->d_un.d_val,
5910 rld_map.buf, sizeof (rld_map.buf)) == 0)
5915 #endif /* DT_MIPS_RLD_MAP */
5917 if (dyn->d_tag == DT_DEBUG && map == -1)
5918 map = dyn->d_un.d_val;
5920 if (dyn->d_tag == DT_NULL)
5924 dynamic_memaddr += dyn_size;
5930 /* Read one pointer from MEMADDR in the inferior. */
5933 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5937 /* Go through a union so this works on either big or little endian
5938 hosts, when the inferior's pointer size is smaller than the size
5939 of CORE_ADDR. It is assumed the inferior's endianness is the
5940 same of the superior's. */
5943 CORE_ADDR core_addr;
5948 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5951 if (ptr_size == sizeof (CORE_ADDR))
5952 *ptr = addr.core_addr;
5953 else if (ptr_size == sizeof (unsigned int))
5956 gdb_assert_not_reached ("unhandled pointer size");
5961 struct link_map_offsets
5963 /* Offset and size of r_debug.r_version. */
5964 int r_version_offset;
5966 /* Offset and size of r_debug.r_map. */
5969 /* Offset to l_addr field in struct link_map. */
5972 /* Offset to l_name field in struct link_map. */
5975 /* Offset to l_ld field in struct link_map. */
5978 /* Offset to l_next field in struct link_map. */
5981 /* Offset to l_prev field in struct link_map. */
5985 /* Construct qXfer:libraries-svr4:read reply. */
5988 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5989 unsigned const char *writebuf,
5990 CORE_ADDR offset, int len)
5993 unsigned document_len;
5994 struct process_info_private *const priv = current_process ()->priv;
5995 char filename[PATH_MAX];
5998 static const struct link_map_offsets lmo_32bit_offsets =
6000 0, /* r_version offset. */
6001 4, /* r_debug.r_map offset. */
6002 0, /* l_addr offset in link_map. */
6003 4, /* l_name offset in link_map. */
6004 8, /* l_ld offset in link_map. */
6005 12, /* l_next offset in link_map. */
6006 16 /* l_prev offset in link_map. */
6009 static const struct link_map_offsets lmo_64bit_offsets =
6011 0, /* r_version offset. */
6012 8, /* r_debug.r_map offset. */
6013 0, /* l_addr offset in link_map. */
6014 8, /* l_name offset in link_map. */
6015 16, /* l_ld offset in link_map. */
6016 24, /* l_next offset in link_map. */
6017 32 /* l_prev offset in link_map. */
6019 const struct link_map_offsets *lmo;
6020 unsigned int machine;
6022 CORE_ADDR lm_addr = 0, lm_prev = 0;
6023 int allocated = 1024;
6025 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6026 int header_done = 0;
6028 if (writebuf != NULL)
6030 if (readbuf == NULL)
6033 pid = lwpid_of (current_thread);
6034 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6035 is_elf64 = elf_64_file_p (filename, &machine);
6036 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6037 ptr_size = is_elf64 ? 8 : 4;
6039 while (annex[0] != '\0')
6045 sep = strchr (annex, '=');
6050 if (len == 5 && startswith (annex, "start"))
6052 else if (len == 4 && startswith (annex, "prev"))
6056 annex = strchr (sep, ';');
6063 annex = decode_address_to_semicolon (addrp, sep + 1);
6070 if (priv->r_debug == 0)
6071 priv->r_debug = get_r_debug (pid, is_elf64);
6073 /* We failed to find DT_DEBUG. Such situation will not change
6074 for this inferior - do not retry it. Report it to GDB as
6075 E01, see for the reasons at the GDB solib-svr4.c side. */
6076 if (priv->r_debug == (CORE_ADDR) -1)
6079 if (priv->r_debug != 0)
6081 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6082 (unsigned char *) &r_version,
6083 sizeof (r_version)) != 0
6086 warning ("unexpected r_debug version %d", r_version);
6088 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6089 &lm_addr, ptr_size) != 0)
6091 warning ("unable to read r_map from 0x%lx",
6092 (long) priv->r_debug + lmo->r_map_offset);
6097 document = xmalloc (allocated);
6098 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6099 p = document + strlen (document);
6102 && read_one_ptr (lm_addr + lmo->l_name_offset,
6103 &l_name, ptr_size) == 0
6104 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6105 &l_addr, ptr_size) == 0
6106 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6107 &l_ld, ptr_size) == 0
6108 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6109 &l_prev, ptr_size) == 0
6110 && read_one_ptr (lm_addr + lmo->l_next_offset,
6111 &l_next, ptr_size) == 0)
6113 unsigned char libname[PATH_MAX];
6115 if (lm_prev != l_prev)
6117 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6118 (long) lm_prev, (long) l_prev);
6122 /* Ignore the first entry even if it has valid name as the first entry
6123 corresponds to the main executable. The first entry should not be
6124 skipped if the dynamic loader was loaded late by a static executable
6125 (see solib-svr4.c parameter ignore_first). But in such case the main
6126 executable does not have PT_DYNAMIC present and this function already
6127 exited above due to failed get_r_debug. */
6130 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6135 /* Not checking for error because reading may stop before
6136 we've got PATH_MAX worth of characters. */
6138 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6139 libname[sizeof (libname) - 1] = '\0';
6140 if (libname[0] != '\0')
6142 /* 6x the size for xml_escape_text below. */
6143 size_t len = 6 * strlen ((char *) libname);
6148 /* Terminate `<library-list-svr4'. */
6153 while (allocated < p - document + len + 200)
6155 /* Expand to guarantee sufficient storage. */
6156 uintptr_t document_len = p - document;
6158 document = xrealloc (document, 2 * allocated);
6160 p = document + document_len;
6163 name = xml_escape_text ((char *) libname);
6164 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
6165 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6166 name, (unsigned long) lm_addr,
6167 (unsigned long) l_addr, (unsigned long) l_ld);
6178 /* Empty list; terminate `<library-list-svr4'. */
6182 strcpy (p, "</library-list-svr4>");
6184 document_len = strlen (document);
6185 if (offset < document_len)
6186 document_len -= offset;
6189 if (len > document_len)
6192 memcpy (readbuf, document + offset, len);
6198 #ifdef HAVE_LINUX_BTRACE
6200 /* See to_enable_btrace target method. */
6202 static struct btrace_target_info *
6203 linux_low_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
6205 struct btrace_target_info *tinfo;
6207 tinfo = linux_enable_btrace (ptid, conf);
6209 if (tinfo != NULL && tinfo->ptr_bits == 0)
6211 struct thread_info *thread = find_thread_ptid (ptid);
6212 struct regcache *regcache = get_thread_regcache (thread, 0);
6214 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
6220 /* See to_disable_btrace target method. */
6223 linux_low_disable_btrace (struct btrace_target_info *tinfo)
6225 enum btrace_error err;
6227 err = linux_disable_btrace (tinfo);
6228 return (err == BTRACE_ERR_NONE ? 0 : -1);
6231 /* See to_read_btrace target method. */
6234 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
6237 struct btrace_data btrace;
6238 struct btrace_block *block;
6239 enum btrace_error err;
6242 btrace_data_init (&btrace);
6244 err = linux_read_btrace (&btrace, tinfo, type);
6245 if (err != BTRACE_ERR_NONE)
6247 if (err == BTRACE_ERR_OVERFLOW)
6248 buffer_grow_str0 (buffer, "E.Overflow.");
6250 buffer_grow_str0 (buffer, "E.Generic Error.");
6252 btrace_data_fini (&btrace);
6256 switch (btrace.format)
6258 case BTRACE_FORMAT_NONE:
6259 buffer_grow_str0 (buffer, "E.No Trace.");
6262 case BTRACE_FORMAT_BTS:
6263 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6264 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6267 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
6269 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6270 paddress (block->begin), paddress (block->end));
6272 buffer_grow_str0 (buffer, "</btrace>\n");
6276 buffer_grow_str0 (buffer, "E.Unknown Trace Format.");
6278 btrace_data_fini (&btrace);
6282 btrace_data_fini (&btrace);
6286 /* See to_btrace_conf target method. */
6289 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
6290 struct buffer *buffer)
6292 const struct btrace_config *conf;
6294 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6295 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6297 conf = linux_btrace_conf (tinfo);
6300 switch (conf->format)
6302 case BTRACE_FORMAT_NONE:
6305 case BTRACE_FORMAT_BTS:
6306 buffer_xml_printf (buffer, "<bts");
6307 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6308 buffer_xml_printf (buffer, " />\n");
6313 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6316 #endif /* HAVE_LINUX_BTRACE */
6318 /* See nat/linux-nat.h. */
6321 current_lwp_ptid (void)
6323 return ptid_of (current_thread);
6326 static struct target_ops linux_target_ops = {
6327 linux_create_inferior,
6336 linux_fetch_registers,
6337 linux_store_registers,
6338 linux_prepare_to_access_memory,
6339 linux_done_accessing_memory,
6342 linux_look_up_symbols,
6343 linux_request_interrupt,
6345 linux_supports_z_point_type,
6348 linux_stopped_by_sw_breakpoint,
6349 linux_supports_stopped_by_sw_breakpoint,
6350 linux_stopped_by_hw_breakpoint,
6351 linux_supports_stopped_by_hw_breakpoint,
6352 linux_stopped_by_watchpoint,
6353 linux_stopped_data_address,
6354 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6355 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6356 && defined(PT_TEXT_END_ADDR)
6361 #ifdef USE_THREAD_DB
6362 thread_db_get_tls_address,
6367 hostio_last_error_from_errno,
6370 linux_supports_non_stop,
6372 linux_start_non_stop,
6373 linux_supports_multi_process,
6374 #ifdef USE_THREAD_DB
6375 thread_db_handle_monitor_command,
6379 linux_common_core_of_thread,
6381 linux_process_qsupported,
6382 linux_supports_tracepoints,
6385 linux_thread_stopped,
6389 linux_stabilize_threads,
6390 linux_install_fast_tracepoint_jump_pad,
6392 linux_supports_disable_randomization,
6393 linux_get_min_fast_tracepoint_insn_len,
6394 linux_qxfer_libraries_svr4,
6395 linux_supports_agent,
6396 #ifdef HAVE_LINUX_BTRACE
6397 linux_supports_btrace,
6398 linux_low_enable_btrace,
6399 linux_low_disable_btrace,
6400 linux_low_read_btrace,
6401 linux_low_btrace_conf,
6409 linux_supports_range_stepping,
6413 linux_init_signals ()
6415 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6416 to find what the cancel signal actually is. */
6417 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6418 signal (__SIGRTMIN+1, SIG_IGN);
6422 #ifdef HAVE_LINUX_REGSETS
6424 initialize_regsets_info (struct regsets_info *info)
6426 for (info->num_regsets = 0;
6427 info->regsets[info->num_regsets].size >= 0;
6428 info->num_regsets++)
6434 initialize_low (void)
6436 struct sigaction sigchld_action;
6437 memset (&sigchld_action, 0, sizeof (sigchld_action));
6438 set_target_ops (&linux_target_ops);
6439 set_breakpoint_data (the_low_target.breakpoint,
6440 the_low_target.breakpoint_len);
6441 linux_init_signals ();
6442 linux_ptrace_init_warnings ();
6444 sigchld_action.sa_handler = sigchld_handler;
6445 sigemptyset (&sigchld_action.sa_mask);
6446 sigchld_action.sa_flags = SA_RESTART;
6447 sigaction (SIGCHLD, &sigchld_action, NULL);
6449 initialize_low_arch ();