1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2014 Free Software Foundation, Inc.
4 This file is part of GDB.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
27 #include <sys/ptrace.h>
28 #include "nat/linux-ptrace.h"
29 #include "nat/linux-procfs.h"
31 #include <sys/ioctl.h>
34 #include <sys/syscall.h>
38 #include <sys/types.h>
43 #include "filestuff.h"
44 #include "tracepoint.h"
47 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
48 then ELFMAG0 will have been defined. If it didn't get included by
49 gdb_proc_service.h then including it will likely introduce a duplicate
50 definition of elf_fpregset_t. */
55 #define SPUFS_MAGIC 0x23c9b64e
58 #ifdef HAVE_PERSONALITY
59 # include <sys/personality.h>
60 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
61 # define ADDR_NO_RANDOMIZE 0x0040000
70 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
73 /* This is the kernel's hard limit. Not to be confused with
79 /* Some targets did not define these ptrace constants from the start,
80 so gdbserver defines them locally here. In the future, these may
81 be removed after they are added to asm/ptrace.h. */
82 #if !(defined(PT_TEXT_ADDR) \
83 || defined(PT_DATA_ADDR) \
84 || defined(PT_TEXT_END_ADDR))
85 #if defined(__mcoldfire__)
86 /* These are still undefined in 3.10 kernels. */
87 #define PT_TEXT_ADDR 49*4
88 #define PT_DATA_ADDR 50*4
89 #define PT_TEXT_END_ADDR 51*4
90 /* BFIN already defines these since at least 2.6.32 kernels. */
92 #define PT_TEXT_ADDR 220
93 #define PT_TEXT_END_ADDR 224
94 #define PT_DATA_ADDR 228
95 /* These are still undefined in 3.10 kernels. */
96 #elif defined(__TMS320C6X__)
97 #define PT_TEXT_ADDR (0x10000*4)
98 #define PT_DATA_ADDR (0x10004*4)
99 #define PT_TEXT_END_ADDR (0x10008*4)
103 #ifdef HAVE_LINUX_BTRACE
104 # include "nat/linux-btrace.h"
107 #ifndef HAVE_ELF32_AUXV_T
108 /* Copied from glibc's elf.h. */
111 uint32_t a_type; /* Entry type */
114 uint32_t a_val; /* Integer value */
115 /* We use to have pointer elements added here. We cannot do that,
116 though, since it does not work when using 32-bit definitions
117 on 64-bit platforms and vice versa. */
122 #ifndef HAVE_ELF64_AUXV_T
123 /* Copied from glibc's elf.h. */
126 uint64_t a_type; /* Entry type */
129 uint64_t a_val; /* Integer value */
130 /* We use to have pointer elements added here. We cannot do that,
131 though, since it does not work when using 32-bit definitions
132 on 64-bit platforms and vice versa. */
137 /* A list of all unknown processes which receive stop signals. Some
138 other process will presumably claim each of these as forked
139 children momentarily. */
141 struct simple_pid_list
143 /* The process ID. */
146 /* The status as reported by waitpid. */
150 struct simple_pid_list *next;
152 struct simple_pid_list *stopped_pids;
154 /* Trivial list manipulation functions to keep track of a list of new
155 stopped processes. */
158 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
160 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
163 new_pid->status = status;
164 new_pid->next = *listp;
169 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
171 struct simple_pid_list **p;
173 for (p = listp; *p != NULL; p = &(*p)->next)
174 if ((*p)->pid == pid)
176 struct simple_pid_list *next = (*p)->next;
178 *statusp = (*p)->status;
186 enum stopping_threads_kind
188 /* Not stopping threads presently. */
189 NOT_STOPPING_THREADS,
191 /* Stopping threads. */
194 /* Stopping and suspending threads. */
195 STOPPING_AND_SUSPENDING_THREADS
198 /* This is set while stop_all_lwps is in effect. */
199 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
201 /* FIXME make into a target method? */
202 int using_threads = 1;
204 /* True if we're presently stabilizing threads (moving them out of
206 static int stabilizing_threads;
208 static void linux_resume_one_lwp (struct lwp_info *lwp,
209 int step, int signal, siginfo_t *info);
210 static void linux_resume (struct thread_resume *resume_info, size_t n);
211 static void stop_all_lwps (int suspend, struct lwp_info *except);
212 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
213 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
214 int *wstat, int options);
215 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
216 static struct lwp_info *add_lwp (ptid_t ptid);
217 static int linux_stopped_by_watchpoint (void);
218 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
219 static void proceed_all_lwps (void);
220 static int finish_step_over (struct lwp_info *lwp);
221 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
222 static int kill_lwp (unsigned long lwpid, int signo);
224 /* True if the low target can hardware single-step. Such targets
225 don't need a BREAKPOINT_REINSERT_ADDR callback. */
228 can_hardware_single_step (void)
230 return (the_low_target.breakpoint_reinsert_addr == NULL);
233 /* True if the low target supports memory breakpoints. If so, we'll
234 have a GET_PC implementation. */
237 supports_breakpoints (void)
239 return (the_low_target.get_pc != NULL);
242 /* Returns true if this target can support fast tracepoints. This
243 does not mean that the in-process agent has been loaded in the
247 supports_fast_tracepoints (void)
249 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
252 /* True if LWP is stopped in its stepping range. */
255 lwp_in_step_range (struct lwp_info *lwp)
257 CORE_ADDR pc = lwp->stop_pc;
259 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
262 struct pending_signals
266 struct pending_signals *prev;
269 /* The read/write ends of the pipe registered as waitable file in the
271 static int linux_event_pipe[2] = { -1, -1 };
273 /* True if we're currently in async mode. */
274 #define target_is_async_p() (linux_event_pipe[0] != -1)
276 static void send_sigstop (struct lwp_info *lwp);
277 static void wait_for_sigstop (void);
279 /* Return non-zero if HEADER is a 64-bit ELF file. */
282 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
284 if (header->e_ident[EI_MAG0] == ELFMAG0
285 && header->e_ident[EI_MAG1] == ELFMAG1
286 && header->e_ident[EI_MAG2] == ELFMAG2
287 && header->e_ident[EI_MAG3] == ELFMAG3)
289 *machine = header->e_machine;
290 return header->e_ident[EI_CLASS] == ELFCLASS64;
297 /* Return non-zero if FILE is a 64-bit ELF file,
298 zero if the file is not a 64-bit ELF file,
299 and -1 if the file is not accessible or doesn't exist. */
302 elf_64_file_p (const char *file, unsigned int *machine)
307 fd = open (file, O_RDONLY);
311 if (read (fd, &header, sizeof (header)) != sizeof (header))
318 return elf_64_header_p (&header, machine);
321 /* Accepts an integer PID; Returns true if the executable PID is
322 running is a 64-bit ELF file.. */
325 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
329 sprintf (file, "/proc/%d/exe", pid);
330 return elf_64_file_p (file, machine);
334 delete_lwp (struct lwp_info *lwp)
336 struct thread_info *thr = get_lwp_thread (lwp);
339 debug_printf ("deleting %ld\n", lwpid_of (thr));
342 free (lwp->arch_private);
346 /* Add a process to the common process list, and set its private
349 static struct process_info *
350 linux_add_process (int pid, int attached)
352 struct process_info *proc;
354 proc = add_process (pid, attached);
355 proc->private = xcalloc (1, sizeof (*proc->private));
357 /* Set the arch when the first LWP stops. */
358 proc->private->new_inferior = 1;
360 if (the_low_target.new_process != NULL)
361 proc->private->arch_private = the_low_target.new_process ();
366 /* Handle a GNU/Linux extended wait response. If we see a clone
367 event, we need to add the new LWP to our list (and not report the
368 trap to higher layers). */
371 handle_extended_wait (struct lwp_info *event_child, int wstat)
373 int event = linux_ptrace_get_extended_event (wstat);
374 struct thread_info *event_thr = get_lwp_thread (event_child);
375 struct lwp_info *new_lwp;
377 if (event == PTRACE_EVENT_CLONE)
380 unsigned long new_pid;
383 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
386 /* If we haven't already seen the new PID stop, wait for it now. */
387 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
389 /* The new child has a pending SIGSTOP. We can't affect it until it
390 hits the SIGSTOP, but we're already attached. */
392 ret = my_waitpid (new_pid, &status, __WALL);
395 perror_with_name ("waiting for new child");
396 else if (ret != new_pid)
397 warning ("wait returned unexpected PID %d", ret);
398 else if (!WIFSTOPPED (status))
399 warning ("wait returned unexpected status 0x%x", status);
403 debug_printf ("HEW: Got clone event "
404 "from LWP %ld, new child is LWP %ld\n",
405 lwpid_of (event_thr), new_pid);
407 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
408 new_lwp = add_lwp (ptid);
410 /* Either we're going to immediately resume the new thread
411 or leave it stopped. linux_resume_one_lwp is a nop if it
412 thinks the thread is currently running, so set this first
413 before calling linux_resume_one_lwp. */
414 new_lwp->stopped = 1;
416 /* If we're suspending all threads, leave this one suspended
418 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
419 new_lwp->suspended = 1;
421 /* Normally we will get the pending SIGSTOP. But in some cases
422 we might get another signal delivered to the group first.
423 If we do get another signal, be sure not to lose it. */
424 if (WSTOPSIG (status) == SIGSTOP)
426 if (stopping_threads != NOT_STOPPING_THREADS)
427 new_lwp->stop_pc = get_stop_pc (new_lwp);
429 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
433 new_lwp->stop_expected = 1;
435 if (stopping_threads != NOT_STOPPING_THREADS)
437 new_lwp->stop_pc = get_stop_pc (new_lwp);
438 new_lwp->status_pending_p = 1;
439 new_lwp->status_pending = status;
442 /* Pass the signal on. This is what GDB does - except
443 shouldn't we really report it instead? */
444 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
447 /* Always resume the current thread. If we are stopping
448 threads, it will have a pending SIGSTOP; we may as well
450 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
454 /* Return the PC as read from the regcache of LWP, without any
458 get_pc (struct lwp_info *lwp)
460 struct thread_info *saved_thread;
461 struct regcache *regcache;
464 if (the_low_target.get_pc == NULL)
467 saved_thread = current_thread;
468 current_thread = get_lwp_thread (lwp);
470 regcache = get_thread_regcache (current_thread, 1);
471 pc = (*the_low_target.get_pc) (regcache);
474 debug_printf ("pc is 0x%lx\n", (long) pc);
476 current_thread = saved_thread;
480 /* This function should only be called if LWP got a SIGTRAP.
481 The SIGTRAP could mean several things.
483 On i386, where decr_pc_after_break is non-zero:
484 If we were single-stepping this process using PTRACE_SINGLESTEP,
485 we will get only the one SIGTRAP (even if the instruction we
486 stepped over was a breakpoint). The value of $eip will be the
488 If we continue the process using PTRACE_CONT, we will get a
489 SIGTRAP when we hit a breakpoint. The value of $eip will be
490 the instruction after the breakpoint (i.e. needs to be
491 decremented). If we report the SIGTRAP to GDB, we must also
492 report the undecremented PC. If we cancel the SIGTRAP, we
493 must resume at the decremented PC.
495 (Presumably, not yet tested) On a non-decr_pc_after_break machine
496 with hardware or kernel single-step:
497 If we single-step over a breakpoint instruction, our PC will
498 point at the following instruction. If we continue and hit a
499 breakpoint instruction, our PC will point at the breakpoint
503 get_stop_pc (struct lwp_info *lwp)
507 if (the_low_target.get_pc == NULL)
510 stop_pc = get_pc (lwp);
512 if (WSTOPSIG (lwp->last_status) == SIGTRAP
514 && !lwp->stopped_by_watchpoint
515 && !linux_is_extended_waitstatus (lwp->last_status))
516 stop_pc -= the_low_target.decr_pc_after_break;
519 debug_printf ("stop pc is 0x%lx\n", (long) stop_pc);
524 static struct lwp_info *
525 add_lwp (ptid_t ptid)
527 struct lwp_info *lwp;
529 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
530 memset (lwp, 0, sizeof (*lwp));
532 if (the_low_target.new_thread != NULL)
533 lwp->arch_private = the_low_target.new_thread ();
535 lwp->thread = add_thread (ptid, lwp);
540 /* Start an inferior process and returns its pid.
541 ALLARGS is a vector of program-name and args. */
544 linux_create_inferior (char *program, char **allargs)
546 #ifdef HAVE_PERSONALITY
547 int personality_orig = 0, personality_set = 0;
549 struct lwp_info *new_lwp;
553 #ifdef HAVE_PERSONALITY
554 if (disable_randomization)
557 personality_orig = personality (0xffffffff);
558 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
561 personality (personality_orig | ADDR_NO_RANDOMIZE);
563 if (errno != 0 || (personality_set
564 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
565 warning ("Error disabling address space randomization: %s",
570 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
576 perror_with_name ("fork");
581 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
583 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
584 signal (__SIGRTMIN + 1, SIG_DFL);
589 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
590 stdout to stderr so that inferior i/o doesn't corrupt the connection.
591 Also, redirect stdin to /dev/null. */
592 if (remote_connection_is_stdio ())
595 open ("/dev/null", O_RDONLY);
597 if (write (2, "stdin/stdout redirected\n",
598 sizeof ("stdin/stdout redirected\n") - 1) < 0)
600 /* Errors ignored. */;
604 execv (program, allargs);
606 execvp (program, allargs);
608 fprintf (stderr, "Cannot exec %s: %s.\n", program,
614 #ifdef HAVE_PERSONALITY
618 personality (personality_orig);
620 warning ("Error restoring address space randomization: %s",
625 linux_add_process (pid, 0);
627 ptid = ptid_build (pid, pid, 0);
628 new_lwp = add_lwp (ptid);
629 new_lwp->must_set_ptrace_flags = 1;
635 linux_attach_fail_reason_string (ptid_t ptid, int err)
637 static char *reason_string;
638 struct buffer buffer;
640 long lwpid = ptid_get_lwp (ptid);
642 xfree (reason_string);
644 buffer_init (&buffer);
645 linux_ptrace_attach_fail_reason (lwpid, &buffer);
646 buffer_grow_str0 (&buffer, "");
647 warnings = buffer_finish (&buffer);
648 if (warnings[0] != '\0')
649 reason_string = xstrprintf ("%s (%d), %s",
650 strerror (err), err, warnings);
652 reason_string = xstrprintf ("%s (%d)",
653 strerror (err), err);
655 return reason_string;
658 /* Attach to an inferior process. */
661 linux_attach_lwp (ptid_t ptid)
663 struct lwp_info *new_lwp;
664 int lwpid = ptid_get_lwp (ptid);
666 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
670 new_lwp = add_lwp (ptid);
672 /* We need to wait for SIGSTOP before being able to make the next
673 ptrace call on this LWP. */
674 new_lwp->must_set_ptrace_flags = 1;
676 if (linux_proc_pid_is_stopped (lwpid))
679 debug_printf ("Attached to a stopped process\n");
681 /* The process is definitely stopped. It is in a job control
682 stop, unless the kernel predates the TASK_STOPPED /
683 TASK_TRACED distinction, in which case it might be in a
684 ptrace stop. Make sure it is in a ptrace stop; from there we
685 can kill it, signal it, et cetera.
687 First make sure there is a pending SIGSTOP. Since we are
688 already attached, the process can not transition from stopped
689 to running without a PTRACE_CONT; so we know this signal will
690 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
691 probably already in the queue (unless this kernel is old
692 enough to use TASK_STOPPED for ptrace stops); but since
693 SIGSTOP is not an RT signal, it can only be queued once. */
694 kill_lwp (lwpid, SIGSTOP);
696 /* Finally, resume the stopped process. This will deliver the
697 SIGSTOP (or a higher priority signal, just like normal
698 PTRACE_ATTACH), which we'll catch later on. */
699 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
702 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
705 There are several cases to consider here:
707 1) gdbserver has already attached to the process and is being notified
708 of a new thread that is being created.
709 In this case we should ignore that SIGSTOP and resume the
710 process. This is handled below by setting stop_expected = 1,
711 and the fact that add_thread sets last_resume_kind ==
714 2) This is the first thread (the process thread), and we're attaching
715 to it via attach_inferior.
716 In this case we want the process thread to stop.
717 This is handled by having linux_attach set last_resume_kind ==
718 resume_stop after we return.
720 If the pid we are attaching to is also the tgid, we attach to and
721 stop all the existing threads. Otherwise, we attach to pid and
722 ignore any other threads in the same group as this pid.
724 3) GDB is connecting to gdbserver and is requesting an enumeration of all
726 In this case we want the thread to stop.
727 FIXME: This case is currently not properly handled.
728 We should wait for the SIGSTOP but don't. Things work apparently
729 because enough time passes between when we ptrace (ATTACH) and when
730 gdb makes the next ptrace call on the thread.
732 On the other hand, if we are currently trying to stop all threads, we
733 should treat the new thread as if we had sent it a SIGSTOP. This works
734 because we are guaranteed that the add_lwp call above added us to the
735 end of the list, and so the new thread has not yet reached
736 wait_for_sigstop (but will). */
737 new_lwp->stop_expected = 1;
742 /* Attach to PID. If PID is the tgid, attach to it and all
746 linux_attach (unsigned long pid)
748 ptid_t ptid = ptid_build (pid, pid, 0);
751 /* Attach to PID. We will check for other threads
753 err = linux_attach_lwp (ptid);
755 error ("Cannot attach to process %ld: %s",
756 pid, linux_attach_fail_reason_string (ptid, err));
758 linux_add_process (pid, 1);
762 struct thread_info *thread;
764 /* Don't ignore the initial SIGSTOP if we just attached to this
765 process. It will be collected by wait shortly. */
766 thread = find_thread_ptid (ptid_build (pid, pid, 0));
767 thread->last_resume_kind = resume_stop;
770 if (linux_proc_get_tgid (pid) == pid)
775 sprintf (pathname, "/proc/%ld/task", pid);
777 dir = opendir (pathname);
781 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
786 /* At this point we attached to the tgid. Scan the task for
788 int new_threads_found;
791 while (iterations < 2)
795 new_threads_found = 0;
796 /* Add all the other threads. While we go through the
797 threads, new threads may be spawned. Cycle through
798 the list of threads until we have done two iterations without
799 finding new threads. */
800 while ((dp = readdir (dir)) != NULL)
806 lwp = strtoul (dp->d_name, NULL, 10);
808 ptid = ptid_build (pid, lwp, 0);
810 /* Is this a new thread? */
811 if (lwp != 0 && find_thread_ptid (ptid) == NULL)
816 debug_printf ("Found new lwp %ld\n", lwp);
818 err = linux_attach_lwp (ptid);
820 warning ("Cannot attach to lwp %ld: %s",
822 linux_attach_fail_reason_string (ptid, err));
828 if (!new_threads_found)
849 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
851 struct counter *counter = args;
853 if (ptid_get_pid (entry->id) == counter->pid)
855 if (++counter->count > 1)
863 last_thread_of_process_p (int pid)
865 struct counter counter = { pid , 0 };
867 return (find_inferior (&all_threads,
868 second_thread_of_pid_p, &counter) == NULL);
874 linux_kill_one_lwp (struct lwp_info *lwp)
876 struct thread_info *thr = get_lwp_thread (lwp);
877 int pid = lwpid_of (thr);
879 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
880 there is no signal context, and ptrace(PTRACE_KILL) (or
881 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
882 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
883 alternative is to kill with SIGKILL. We only need one SIGKILL
884 per process, not one for each thread. But since we still support
885 linuxthreads, and we also support debugging programs using raw
886 clone without CLONE_THREAD, we send one for each thread. For
887 years, we used PTRACE_KILL only, so we're being a bit paranoid
888 about some old kernels where PTRACE_KILL might work better
889 (dubious if there are any such, but that's why it's paranoia), so
890 we try SIGKILL first, PTRACE_KILL second, and so we're fine
894 kill_lwp (pid, SIGKILL);
897 int save_errno = errno;
899 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
900 target_pid_to_str (ptid_of (thr)),
901 save_errno ? strerror (save_errno) : "OK");
905 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
908 int save_errno = errno;
910 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
911 target_pid_to_str (ptid_of (thr)),
912 save_errno ? strerror (save_errno) : "OK");
916 /* Kill LWP and wait for it to die. */
919 kill_wait_lwp (struct lwp_info *lwp)
921 struct thread_info *thr = get_lwp_thread (lwp);
922 int pid = ptid_get_pid (ptid_of (thr));
923 int lwpid = ptid_get_lwp (ptid_of (thr));
928 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
932 linux_kill_one_lwp (lwp);
934 /* Make sure it died. Notes:
936 - The loop is most likely unnecessary.
938 - We don't use linux_wait_for_event as that could delete lwps
939 while we're iterating over them. We're not interested in
940 any pending status at this point, only in making sure all
941 wait status on the kernel side are collected until the
944 - We don't use __WALL here as the __WALL emulation relies on
945 SIGCHLD, and killing a stopped process doesn't generate
946 one, nor an exit status.
948 res = my_waitpid (lwpid, &wstat, 0);
949 if (res == -1 && errno == ECHILD)
950 res = my_waitpid (lwpid, &wstat, __WCLONE);
951 } while (res > 0 && WIFSTOPPED (wstat));
953 gdb_assert (res > 0);
956 /* Callback for `find_inferior'. Kills an lwp of a given process,
957 except the leader. */
960 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
962 struct thread_info *thread = (struct thread_info *) entry;
963 struct lwp_info *lwp = get_thread_lwp (thread);
964 int pid = * (int *) args;
966 if (ptid_get_pid (entry->id) != pid)
969 /* We avoid killing the first thread here, because of a Linux kernel (at
970 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
971 the children get a chance to be reaped, it will remain a zombie
974 if (lwpid_of (thread) == pid)
977 debug_printf ("lkop: is last of process %s\n",
978 target_pid_to_str (entry->id));
989 struct process_info *process;
990 struct lwp_info *lwp;
992 process = find_process_pid (pid);
996 /* If we're killing a running inferior, make sure it is stopped
997 first, as PTRACE_KILL will not work otherwise. */
998 stop_all_lwps (0, NULL);
1000 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1002 /* See the comment in linux_kill_one_lwp. We did not kill the first
1003 thread in the list, so do so now. */
1004 lwp = find_lwp_pid (pid_to_ptid (pid));
1009 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1013 kill_wait_lwp (lwp);
1015 the_target->mourn (process);
1017 /* Since we presently can only stop all lwps of all processes, we
1018 need to unstop lwps of other processes. */
1019 unstop_all_lwps (0, NULL);
1023 /* Get pending signal of THREAD, for detaching purposes. This is the
1024 signal the thread last stopped for, which we need to deliver to the
1025 thread when detaching, otherwise, it'd be suppressed/lost. */
1028 get_detach_signal (struct thread_info *thread)
1030 enum gdb_signal signo = GDB_SIGNAL_0;
1032 struct lwp_info *lp = get_thread_lwp (thread);
1034 if (lp->status_pending_p)
1035 status = lp->status_pending;
1038 /* If the thread had been suspended by gdbserver, and it stopped
1039 cleanly, then it'll have stopped with SIGSTOP. But we don't
1040 want to deliver that SIGSTOP. */
1041 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1042 || thread->last_status.value.sig == GDB_SIGNAL_0)
1045 /* Otherwise, we may need to deliver the signal we
1047 status = lp->last_status;
1050 if (!WIFSTOPPED (status))
1053 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1054 target_pid_to_str (ptid_of (thread)));
1058 /* Extended wait statuses aren't real SIGTRAPs. */
1059 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1062 debug_printf ("GPS: lwp %s had stopped with extended "
1063 "status: no pending signal\n",
1064 target_pid_to_str (ptid_of (thread)));
1068 signo = gdb_signal_from_host (WSTOPSIG (status));
1070 if (program_signals_p && !program_signals[signo])
1073 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1074 target_pid_to_str (ptid_of (thread)),
1075 gdb_signal_to_string (signo));
1078 else if (!program_signals_p
1079 /* If we have no way to know which signals GDB does not
1080 want to have passed to the program, assume
1081 SIGTRAP/SIGINT, which is GDB's default. */
1082 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1085 debug_printf ("GPS: lwp %s had signal %s, "
1086 "but we don't know if we should pass it. "
1087 "Default to not.\n",
1088 target_pid_to_str (ptid_of (thread)),
1089 gdb_signal_to_string (signo));
1095 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1096 target_pid_to_str (ptid_of (thread)),
1097 gdb_signal_to_string (signo));
1099 return WSTOPSIG (status);
1104 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1106 struct thread_info *thread = (struct thread_info *) entry;
1107 struct lwp_info *lwp = get_thread_lwp (thread);
1108 int pid = * (int *) args;
1111 if (ptid_get_pid (entry->id) != pid)
1114 /* If there is a pending SIGSTOP, get rid of it. */
1115 if (lwp->stop_expected)
1118 debug_printf ("Sending SIGCONT to %s\n",
1119 target_pid_to_str (ptid_of (thread)));
1121 kill_lwp (lwpid_of (thread), SIGCONT);
1122 lwp->stop_expected = 0;
1125 /* Flush any pending changes to the process's registers. */
1126 regcache_invalidate_thread (thread);
1128 /* Pass on any pending signal for this thread. */
1129 sig = get_detach_signal (thread);
1131 /* Finally, let it resume. */
1132 if (the_low_target.prepare_to_resume != NULL)
1133 the_low_target.prepare_to_resume (lwp);
1134 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1135 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1136 error (_("Can't detach %s: %s"),
1137 target_pid_to_str (ptid_of (thread)),
1145 linux_detach (int pid)
1147 struct process_info *process;
1149 process = find_process_pid (pid);
1150 if (process == NULL)
1153 /* Stop all threads before detaching. First, ptrace requires that
1154 the thread is stopped to sucessfully detach. Second, thread_db
1155 may need to uninstall thread event breakpoints from memory, which
1156 only works with a stopped process anyway. */
1157 stop_all_lwps (0, NULL);
1159 #ifdef USE_THREAD_DB
1160 thread_db_detach (process);
1163 /* Stabilize threads (move out of jump pads). */
1164 stabilize_threads ();
1166 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1168 the_target->mourn (process);
1170 /* Since we presently can only stop all lwps of all processes, we
1171 need to unstop lwps of other processes. */
1172 unstop_all_lwps (0, NULL);
1176 /* Remove all LWPs that belong to process PROC from the lwp list. */
1179 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1181 struct thread_info *thread = (struct thread_info *) entry;
1182 struct lwp_info *lwp = get_thread_lwp (thread);
1183 struct process_info *process = proc;
1185 if (pid_of (thread) == pid_of (process))
1192 linux_mourn (struct process_info *process)
1194 struct process_info_private *priv;
1196 #ifdef USE_THREAD_DB
1197 thread_db_mourn (process);
1200 find_inferior (&all_threads, delete_lwp_callback, process);
1202 /* Freeing all private data. */
1203 priv = process->private;
1204 free (priv->arch_private);
1206 process->private = NULL;
1208 remove_process (process);
1212 linux_join (int pid)
1217 ret = my_waitpid (pid, &status, 0);
1218 if (WIFEXITED (status) || WIFSIGNALED (status))
1220 } while (ret != -1 || errno != ECHILD);
1223 /* Return nonzero if the given thread is still alive. */
1225 linux_thread_alive (ptid_t ptid)
1227 struct lwp_info *lwp = find_lwp_pid (ptid);
1229 /* We assume we always know if a thread exits. If a whole process
1230 exited but we still haven't been able to report it to GDB, we'll
1231 hold on to the last lwp of the dead process. */
1238 /* Return 1 if this lwp has an interesting status pending. */
1240 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1242 struct thread_info *thread = (struct thread_info *) entry;
1243 struct lwp_info *lwp = get_thread_lwp (thread);
1244 ptid_t ptid = * (ptid_t *) arg;
1246 /* Check if we're only interested in events from a specific process
1248 if (!ptid_equal (minus_one_ptid, ptid)
1249 && ptid_get_pid (ptid) != ptid_get_pid (thread->entry.id))
1252 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1253 report any status pending the LWP may have. */
1254 if (thread->last_resume_kind == resume_stop
1255 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1258 return lwp->status_pending_p;
1262 same_lwp (struct inferior_list_entry *entry, void *data)
1264 ptid_t ptid = *(ptid_t *) data;
1267 if (ptid_get_lwp (ptid) != 0)
1268 lwp = ptid_get_lwp (ptid);
1270 lwp = ptid_get_pid (ptid);
1272 if (ptid_get_lwp (entry->id) == lwp)
1279 find_lwp_pid (ptid_t ptid)
1281 struct inferior_list_entry *thread
1282 = find_inferior (&all_threads, same_lwp, &ptid);
1287 return get_thread_lwp ((struct thread_info *) thread);
1290 /* Return the number of known LWPs in the tgid given by PID. */
1295 struct inferior_list_entry *inf, *tmp;
1298 ALL_INFERIORS (&all_threads, inf, tmp)
1300 if (ptid_get_pid (inf->id) == pid)
1307 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1308 their exits until all other threads in the group have exited. */
1311 check_zombie_leaders (void)
1313 struct process_info *proc, *tmp;
1315 ALL_PROCESSES (proc, tmp)
1317 pid_t leader_pid = pid_of (proc);
1318 struct lwp_info *leader_lp;
1320 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1323 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1324 "num_lwps=%d, zombie=%d\n",
1325 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1326 linux_proc_pid_is_zombie (leader_pid));
1328 if (leader_lp != NULL
1329 /* Check if there are other threads in the group, as we may
1330 have raced with the inferior simply exiting. */
1331 && !last_thread_of_process_p (leader_pid)
1332 && linux_proc_pid_is_zombie (leader_pid))
1334 /* A leader zombie can mean one of two things:
1336 - It exited, and there's an exit status pending
1337 available, or only the leader exited (not the whole
1338 program). In the latter case, we can't waitpid the
1339 leader's exit status until all other threads are gone.
1341 - There are 3 or more threads in the group, and a thread
1342 other than the leader exec'd. On an exec, the Linux
1343 kernel destroys all other threads (except the execing
1344 one) in the thread group, and resets the execing thread's
1345 tid to the tgid. No exit notification is sent for the
1346 execing thread -- from the ptracer's perspective, it
1347 appears as though the execing thread just vanishes.
1348 Until we reap all other threads except the leader and the
1349 execing thread, the leader will be zombie, and the
1350 execing thread will be in `D (disc sleep)'. As soon as
1351 all other threads are reaped, the execing thread changes
1352 it's tid to the tgid, and the previous (zombie) leader
1353 vanishes, giving place to the "new" leader. We could try
1354 distinguishing the exit and exec cases, by waiting once
1355 more, and seeing if something comes out, but it doesn't
1356 sound useful. The previous leader _does_ go away, and
1357 we'll re-add the new one once we see the exec event
1358 (which is just the same as what would happen if the
1359 previous leader did exit voluntarily before some other
1364 "CZL: Thread group leader %d zombie "
1365 "(it exited, or another thread execd).\n",
1368 delete_lwp (leader_lp);
1373 /* Callback for `find_inferior'. Returns the first LWP that is not
1374 stopped. ARG is a PTID filter. */
1377 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1379 struct thread_info *thr = (struct thread_info *) entry;
1380 struct lwp_info *lwp;
1381 ptid_t filter = *(ptid_t *) arg;
1383 if (!ptid_match (ptid_of (thr), filter))
1386 lwp = get_thread_lwp (thr);
1393 /* This function should only be called if the LWP got a SIGTRAP.
1395 Handle any tracepoint steps or hits. Return true if a tracepoint
1396 event was handled, 0 otherwise. */
1399 handle_tracepoints (struct lwp_info *lwp)
1401 struct thread_info *tinfo = get_lwp_thread (lwp);
1402 int tpoint_related_event = 0;
1404 /* If this tracepoint hit causes a tracing stop, we'll immediately
1405 uninsert tracepoints. To do this, we temporarily pause all
1406 threads, unpatch away, and then unpause threads. We need to make
1407 sure the unpausing doesn't resume LWP too. */
1410 /* And we need to be sure that any all-threads-stopping doesn't try
1411 to move threads out of the jump pads, as it could deadlock the
1412 inferior (LWP could be in the jump pad, maybe even holding the
1415 /* Do any necessary step collect actions. */
1416 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1418 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1420 /* See if we just hit a tracepoint and do its main collect
1422 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1426 gdb_assert (lwp->suspended == 0);
1427 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1429 if (tpoint_related_event)
1432 debug_printf ("got a tracepoint event\n");
1439 /* Convenience wrapper. Returns true if LWP is presently collecting a
1443 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1444 struct fast_tpoint_collect_status *status)
1446 CORE_ADDR thread_area;
1447 struct thread_info *thread = get_lwp_thread (lwp);
1449 if (the_low_target.get_thread_area == NULL)
1452 /* Get the thread area address. This is used to recognize which
1453 thread is which when tracing with the in-process agent library.
1454 We don't read anything from the address, and treat it as opaque;
1455 it's the address itself that we assume is unique per-thread. */
1456 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1459 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1462 /* The reason we resume in the caller, is because we want to be able
1463 to pass lwp->status_pending as WSTAT, and we need to clear
1464 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1465 refuses to resume. */
1468 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1470 struct thread_info *saved_thread;
1472 saved_thread = current_thread;
1473 current_thread = get_lwp_thread (lwp);
1476 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1477 && supports_fast_tracepoints ()
1478 && agent_loaded_p ())
1480 struct fast_tpoint_collect_status status;
1484 debug_printf ("Checking whether LWP %ld needs to move out of the "
1486 lwpid_of (current_thread));
1488 r = linux_fast_tracepoint_collecting (lwp, &status);
1491 || (WSTOPSIG (*wstat) != SIGILL
1492 && WSTOPSIG (*wstat) != SIGFPE
1493 && WSTOPSIG (*wstat) != SIGSEGV
1494 && WSTOPSIG (*wstat) != SIGBUS))
1496 lwp->collecting_fast_tracepoint = r;
1500 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1502 /* Haven't executed the original instruction yet.
1503 Set breakpoint there, and wait till it's hit,
1504 then single-step until exiting the jump pad. */
1505 lwp->exit_jump_pad_bkpt
1506 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1510 debug_printf ("Checking whether LWP %ld needs to move out of "
1511 "the jump pad...it does\n",
1512 lwpid_of (current_thread));
1513 current_thread = saved_thread;
1520 /* If we get a synchronous signal while collecting, *and*
1521 while executing the (relocated) original instruction,
1522 reset the PC to point at the tpoint address, before
1523 reporting to GDB. Otherwise, it's an IPA lib bug: just
1524 report the signal to GDB, and pray for the best. */
1526 lwp->collecting_fast_tracepoint = 0;
1529 && (status.adjusted_insn_addr <= lwp->stop_pc
1530 && lwp->stop_pc < status.adjusted_insn_addr_end))
1533 struct regcache *regcache;
1535 /* The si_addr on a few signals references the address
1536 of the faulting instruction. Adjust that as
1538 if ((WSTOPSIG (*wstat) == SIGILL
1539 || WSTOPSIG (*wstat) == SIGFPE
1540 || WSTOPSIG (*wstat) == SIGBUS
1541 || WSTOPSIG (*wstat) == SIGSEGV)
1542 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
1543 (PTRACE_TYPE_ARG3) 0, &info) == 0
1544 /* Final check just to make sure we don't clobber
1545 the siginfo of non-kernel-sent signals. */
1546 && (uintptr_t) info.si_addr == lwp->stop_pc)
1548 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1549 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
1550 (PTRACE_TYPE_ARG3) 0, &info);
1553 regcache = get_thread_regcache (current_thread, 1);
1554 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1555 lwp->stop_pc = status.tpoint_addr;
1557 /* Cancel any fast tracepoint lock this thread was
1559 force_unlock_trace_buffer ();
1562 if (lwp->exit_jump_pad_bkpt != NULL)
1565 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1566 "stopping all threads momentarily.\n");
1568 stop_all_lwps (1, lwp);
1569 cancel_breakpoints ();
1571 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1572 lwp->exit_jump_pad_bkpt = NULL;
1574 unstop_all_lwps (1, lwp);
1576 gdb_assert (lwp->suspended >= 0);
1582 debug_printf ("Checking whether LWP %ld needs to move out of the "
1584 lwpid_of (current_thread));
1586 current_thread = saved_thread;
1590 /* Enqueue one signal in the "signals to report later when out of the
1594 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1596 struct pending_signals *p_sig;
1597 struct thread_info *thread = get_lwp_thread (lwp);
1600 debug_printf ("Deferring signal %d for LWP %ld.\n",
1601 WSTOPSIG (*wstat), lwpid_of (thread));
1605 struct pending_signals *sig;
1607 for (sig = lwp->pending_signals_to_report;
1610 debug_printf (" Already queued %d\n",
1613 debug_printf (" (no more currently queued signals)\n");
1616 /* Don't enqueue non-RT signals if they are already in the deferred
1617 queue. (SIGSTOP being the easiest signal to see ending up here
1619 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1621 struct pending_signals *sig;
1623 for (sig = lwp->pending_signals_to_report;
1627 if (sig->signal == WSTOPSIG (*wstat))
1630 debug_printf ("Not requeuing already queued non-RT signal %d"
1639 p_sig = xmalloc (sizeof (*p_sig));
1640 p_sig->prev = lwp->pending_signals_to_report;
1641 p_sig->signal = WSTOPSIG (*wstat);
1642 memset (&p_sig->info, 0, sizeof (siginfo_t));
1643 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1646 lwp->pending_signals_to_report = p_sig;
1649 /* Dequeue one signal from the "signals to report later when out of
1650 the jump pad" list. */
1653 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1655 struct thread_info *thread = get_lwp_thread (lwp);
1657 if (lwp->pending_signals_to_report != NULL)
1659 struct pending_signals **p_sig;
1661 p_sig = &lwp->pending_signals_to_report;
1662 while ((*p_sig)->prev != NULL)
1663 p_sig = &(*p_sig)->prev;
1665 *wstat = W_STOPCODE ((*p_sig)->signal);
1666 if ((*p_sig)->info.si_signo != 0)
1667 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1673 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1674 WSTOPSIG (*wstat), lwpid_of (thread));
1678 struct pending_signals *sig;
1680 for (sig = lwp->pending_signals_to_report;
1683 debug_printf (" Still queued %d\n",
1686 debug_printf (" (no more queued signals)\n");
1695 /* Arrange for a breakpoint to be hit again later. We don't keep the
1696 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1697 will handle the current event, eventually we will resume this LWP,
1698 and this breakpoint will trap again. */
1701 cancel_breakpoint (struct lwp_info *lwp)
1703 struct thread_info *saved_thread;
1705 /* There's nothing to do if we don't support breakpoints. */
1706 if (!supports_breakpoints ())
1709 /* breakpoint_at reads from current inferior. */
1710 saved_thread = current_thread;
1711 current_thread = get_lwp_thread (lwp);
1713 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1716 debug_printf ("CB: Push back breakpoint for %s\n",
1717 target_pid_to_str (ptid_of (current_thread)));
1719 /* Back up the PC if necessary. */
1720 if (the_low_target.decr_pc_after_break)
1722 struct regcache *regcache
1723 = get_thread_regcache (current_thread, 1);
1724 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1727 current_thread = saved_thread;
1733 debug_printf ("CB: No breakpoint found at %s for [%s]\n",
1734 paddress (lwp->stop_pc),
1735 target_pid_to_str (ptid_of (current_thread)));
1738 current_thread = saved_thread;
1742 /* Return true if the event in LP may be caused by breakpoint. */
1745 lp_status_maybe_breakpoint (struct lwp_info *lp)
1747 return (lp->status_pending_p
1748 && WIFSTOPPED (lp->status_pending)
1749 && (WSTOPSIG (lp->status_pending) == SIGTRAP
1750 /* SIGILL and SIGSEGV are also treated as traps in case a
1751 breakpoint is inserted at the current PC. */
1752 || WSTOPSIG (lp->status_pending) == SIGILL
1753 || WSTOPSIG (lp->status_pending) == SIGSEGV));
1756 /* Do low-level handling of the event, and check if we should go on
1757 and pass it to caller code. Return the affected lwp if we are, or
1760 static struct lwp_info *
1761 linux_low_filter_event (ptid_t filter_ptid, int lwpid, int wstat)
1763 struct lwp_info *child;
1764 struct thread_info *thread;
1766 child = find_lwp_pid (pid_to_ptid (lwpid));
1768 /* If we didn't find a process, one of two things presumably happened:
1769 - A process we started and then detached from has exited. Ignore it.
1770 - A process we are controlling has forked and the new child's stop
1771 was reported to us by the kernel. Save its PID. */
1772 if (child == NULL && WIFSTOPPED (wstat))
1774 add_to_pid_list (&stopped_pids, lwpid, wstat);
1777 else if (child == NULL)
1780 thread = get_lwp_thread (child);
1784 child->last_status = wstat;
1786 if (WIFSTOPPED (wstat))
1788 struct process_info *proc;
1790 /* Architecture-specific setup after inferior is running. This
1791 needs to happen after we have attached to the inferior and it
1792 is stopped for the first time, but before we access any
1793 inferior registers. */
1794 proc = find_process_pid (pid_of (thread));
1795 if (proc->private->new_inferior)
1797 struct thread_info *saved_thread;
1799 saved_thread = current_thread;
1800 current_thread = thread;
1802 the_low_target.arch_setup ();
1804 current_thread = saved_thread;
1806 proc->private->new_inferior = 0;
1810 /* Store the STOP_PC, with adjustment applied. This depends on the
1811 architecture being defined already (so that CHILD has a valid
1812 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1814 if (WIFSTOPPED (wstat))
1817 && the_low_target.get_pc != NULL)
1819 struct thread_info *saved_thread;
1820 struct regcache *regcache;
1823 saved_thread = current_thread;
1824 current_thread = thread;
1825 regcache = get_thread_regcache (current_thread, 1);
1826 pc = (*the_low_target.get_pc) (regcache);
1827 debug_printf ("linux_low_filter_event: pc is 0x%lx\n", (long) pc);
1828 current_thread = saved_thread;
1831 child->stop_pc = get_stop_pc (child);
1834 /* Fetch the possibly triggered data watchpoint info and store it in
1837 On some archs, like x86, that use debug registers to set
1838 watchpoints, it's possible that the way to know which watched
1839 address trapped, is to check the register that is used to select
1840 which address to watch. Problem is, between setting the
1841 watchpoint and reading back which data address trapped, the user
1842 may change the set of watchpoints, and, as a consequence, GDB
1843 changes the debug registers in the inferior. To avoid reading
1844 back a stale stopped-data-address when that happens, we cache in
1845 LP the fact that a watchpoint trapped, and the corresponding data
1846 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1847 changes the debug registers meanwhile, we have the cached data we
1850 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP)
1852 if (the_low_target.stopped_by_watchpoint == NULL)
1854 child->stopped_by_watchpoint = 0;
1858 struct thread_info *saved_thread;
1860 saved_thread = current_thread;
1861 current_thread = thread;
1863 child->stopped_by_watchpoint
1864 = the_low_target.stopped_by_watchpoint ();
1866 if (child->stopped_by_watchpoint)
1868 if (the_low_target.stopped_data_address != NULL)
1869 child->stopped_data_address
1870 = the_low_target.stopped_data_address ();
1872 child->stopped_data_address = 0;
1875 current_thread = saved_thread;
1879 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
1881 linux_enable_event_reporting (lwpid);
1882 child->must_set_ptrace_flags = 0;
1885 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
1886 && linux_is_extended_waitstatus (wstat))
1888 handle_extended_wait (child, wstat);
1892 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
1893 && child->stop_expected)
1896 debug_printf ("Expected stop.\n");
1897 child->stop_expected = 0;
1899 if (thread->last_resume_kind == resume_stop)
1901 /* We want to report the stop to the core. Treat the
1902 SIGSTOP as a normal event. */
1904 else if (stopping_threads != NOT_STOPPING_THREADS)
1906 /* Stopping threads. We don't want this SIGSTOP to end up
1907 pending in the FILTER_PTID handling below. */
1912 /* Filter out the event. */
1913 linux_resume_one_lwp (child, child->stepping, 0, NULL);
1918 /* Check if the thread has exited. */
1919 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat))
1920 && num_lwps (pid_of (thread)) > 1)
1923 debug_printf ("LLW: %d exited.\n", lwpid);
1925 /* If there is at least one more LWP, then the exit signal
1926 was not the end of the debugged application and should be
1932 if (!ptid_match (ptid_of (thread), filter_ptid))
1935 debug_printf ("LWP %d got an event %06x, leaving pending.\n",
1938 if (WIFSTOPPED (wstat))
1940 child->status_pending_p = 1;
1941 child->status_pending = wstat;
1943 if (WSTOPSIG (wstat) != SIGSTOP)
1945 /* Cancel breakpoint hits. The breakpoint may be
1946 removed before we fetch events from this process to
1947 report to the core. It is best not to assume the
1948 moribund breakpoints heuristic always handles these
1949 cases --- it could be too many events go through to
1950 the core before this one is handled. All-stop always
1951 cancels breakpoint hits in all threads. */
1953 && lp_status_maybe_breakpoint (child)
1954 && cancel_breakpoint (child))
1956 /* Throw away the SIGTRAP. */
1957 child->status_pending_p = 0;
1960 debug_printf ("LLW: LWP %d hit a breakpoint while"
1961 " waiting for another process;"
1962 " cancelled it\n", lwpid);
1966 else if (WIFEXITED (wstat) || WIFSIGNALED (wstat))
1969 debug_printf ("LLWE: process %d exited while fetching "
1970 "event from another LWP\n", lwpid);
1972 /* This was the last lwp in the process. Since events are
1973 serialized to GDB core, and we can't report this one
1974 right now, but GDB core and the other target layers will
1975 want to be notified about the exit code/signal, leave the
1976 status pending for the next time we're able to report
1978 mark_lwp_dead (child, wstat);
1987 /* When the event-loop is doing a step-over, this points at the thread
1989 ptid_t step_over_bkpt;
1991 /* Wait for an event from child(ren) WAIT_PTID, and return any that
1992 match FILTER_PTID (leaving others pending). The PTIDs can be:
1993 minus_one_ptid, to specify any child; a pid PTID, specifying all
1994 lwps of a thread group; or a PTID representing a single lwp. Store
1995 the stop status through the status pointer WSTAT. OPTIONS is
1996 passed to the waitpid call. Return 0 if no event was found and
1997 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
1998 was found. Return the PID of the stopped child otherwise. */
2001 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2002 int *wstatp, int options)
2004 struct thread_info *event_thread;
2005 struct lwp_info *event_child, *requested_child;
2006 sigset_t block_mask, prev_mask;
2009 /* N.B. event_thread points to the thread_info struct that contains
2010 event_child. Keep them in sync. */
2011 event_thread = NULL;
2013 requested_child = NULL;
2015 /* Check for a lwp with a pending status. */
2017 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2019 event_thread = (struct thread_info *)
2020 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2021 if (event_thread != NULL)
2022 event_child = get_thread_lwp (event_thread);
2023 if (debug_threads && event_thread)
2024 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2026 else if (!ptid_equal (filter_ptid, null_ptid))
2028 requested_child = find_lwp_pid (filter_ptid);
2030 if (stopping_threads == NOT_STOPPING_THREADS
2031 && requested_child->status_pending_p
2032 && requested_child->collecting_fast_tracepoint)
2034 enqueue_one_deferred_signal (requested_child,
2035 &requested_child->status_pending);
2036 requested_child->status_pending_p = 0;
2037 requested_child->status_pending = 0;
2038 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2041 if (requested_child->suspended
2042 && requested_child->status_pending_p)
2044 internal_error (__FILE__, __LINE__,
2045 "requesting an event out of a"
2046 " suspended child?");
2049 if (requested_child->status_pending_p)
2051 event_child = requested_child;
2052 event_thread = get_lwp_thread (event_child);
2056 if (event_child != NULL)
2059 debug_printf ("Got an event from pending child %ld (%04x)\n",
2060 lwpid_of (event_thread), event_child->status_pending);
2061 *wstatp = event_child->status_pending;
2062 event_child->status_pending_p = 0;
2063 event_child->status_pending = 0;
2064 current_thread = event_thread;
2065 return lwpid_of (event_thread);
2068 /* But if we don't find a pending event, we'll have to wait.
2070 We only enter this loop if no process has a pending wait status.
2071 Thus any action taken in response to a wait status inside this
2072 loop is responding as soon as we detect the status, not after any
2075 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2076 all signals while here. */
2077 sigfillset (&block_mask);
2078 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2080 while (event_child == NULL)
2084 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2087 - If the thread group leader exits while other threads in the
2088 thread group still exist, waitpid(TGID, ...) hangs. That
2089 waitpid won't return an exit status until the other threads
2090 in the group are reaped.
2092 - When a non-leader thread execs, that thread just vanishes
2093 without reporting an exit (so we'd hang if we waited for it
2094 explicitly in that case). The exec event is reported to
2095 the TGID pid (although we don't currently enable exec
2098 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2101 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2102 ret, errno ? strerror (errno) : "ERRNO-OK");
2108 debug_printf ("LLW: waitpid %ld received %s\n",
2109 (long) ret, status_to_str (*wstatp));
2112 event_child = linux_low_filter_event (filter_ptid,
2114 if (event_child != NULL)
2116 /* We got an event to report to the core. */
2117 event_thread = get_lwp_thread (event_child);
2121 /* Retry until nothing comes out of waitpid. A single
2122 SIGCHLD can indicate more than one child stopped. */
2126 /* Check for zombie thread group leaders. Those can't be reaped
2127 until all other threads in the thread group are. */
2128 check_zombie_leaders ();
2130 /* If there are no resumed children left in the set of LWPs we
2131 want to wait for, bail. We can't just block in
2132 waitpid/sigsuspend, because lwps might have been left stopped
2133 in trace-stop state, and we'd be stuck forever waiting for
2134 their status to change (which would only happen if we resumed
2135 them). Even if WNOHANG is set, this return code is preferred
2136 over 0 (below), as it is more detailed. */
2137 if ((find_inferior (&all_threads,
2138 not_stopped_callback,
2139 &wait_ptid) == NULL))
2142 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2143 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2147 /* No interesting event to report to the caller. */
2148 if ((options & WNOHANG))
2151 debug_printf ("WNOHANG set, no event found\n");
2153 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2157 /* Block until we get an event reported with SIGCHLD. */
2159 debug_printf ("sigsuspend'ing\n");
2161 sigsuspend (&prev_mask);
2162 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2166 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2168 current_thread = event_thread;
2170 /* Check for thread exit. */
2171 if (! WIFSTOPPED (*wstatp))
2173 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2176 debug_printf ("LWP %d is the last lwp of process. "
2177 "Process %ld exiting.\n",
2178 pid_of (event_thread), lwpid_of (event_thread));
2179 return lwpid_of (event_thread);
2182 return lwpid_of (event_thread);
2185 /* Wait for an event from child(ren) PTID. PTIDs can be:
2186 minus_one_ptid, to specify any child; a pid PTID, specifying all
2187 lwps of a thread group; or a PTID representing a single lwp. Store
2188 the stop status through the status pointer WSTAT. OPTIONS is
2189 passed to the waitpid call. Return 0 if no event was found and
2190 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2191 was found. Return the PID of the stopped child otherwise. */
2194 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2196 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2199 /* Count the LWP's that have had events. */
2202 count_events_callback (struct inferior_list_entry *entry, void *data)
2204 struct thread_info *thread = (struct thread_info *) entry;
2205 struct lwp_info *lp = get_thread_lwp (thread);
2208 gdb_assert (count != NULL);
2210 /* Count only resumed LWPs that have a SIGTRAP event pending that
2211 should be reported to GDB. */
2212 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2213 && thread->last_resume_kind != resume_stop
2214 && lp_status_maybe_breakpoint (lp)
2215 && !breakpoint_inserted_here (lp->stop_pc))
2221 /* Select the LWP (if any) that is currently being single-stepped. */
2224 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2226 struct thread_info *thread = (struct thread_info *) entry;
2227 struct lwp_info *lp = get_thread_lwp (thread);
2229 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2230 && thread->last_resume_kind == resume_step
2231 && lp->status_pending_p)
2237 /* Select the Nth LWP that has had a SIGTRAP event that should be
2241 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2243 struct thread_info *thread = (struct thread_info *) entry;
2244 struct lwp_info *lp = get_thread_lwp (thread);
2245 int *selector = data;
2247 gdb_assert (selector != NULL);
2249 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2250 if (thread->last_resume_kind != resume_stop
2251 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2252 && lp_status_maybe_breakpoint (lp)
2253 && !breakpoint_inserted_here (lp->stop_pc))
2254 if ((*selector)-- == 0)
2261 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
2263 struct thread_info *thread = (struct thread_info *) entry;
2264 struct lwp_info *lp = get_thread_lwp (thread);
2265 struct lwp_info *event_lp = data;
2267 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2271 /* If a LWP other than the LWP that we're reporting an event for has
2272 hit a GDB breakpoint (as opposed to some random trap signal),
2273 then just arrange for it to hit it again later. We don't keep
2274 the SIGTRAP status and don't forward the SIGTRAP signal to the
2275 LWP. We will handle the current event, eventually we will resume
2276 all LWPs, and this one will get its breakpoint trap again.
2278 If we do not do this, then we run the risk that the user will
2279 delete or disable the breakpoint, but the LWP will have already
2282 if (thread->last_resume_kind != resume_stop
2283 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2284 && lp_status_maybe_breakpoint (lp)
2286 && !lp->stopped_by_watchpoint
2287 && cancel_breakpoint (lp))
2288 /* Throw away the SIGTRAP. */
2289 lp->status_pending_p = 0;
2295 linux_cancel_breakpoints (void)
2297 find_inferior (&all_threads, cancel_breakpoints_callback, NULL);
2300 /* Select one LWP out of those that have events pending. */
2303 select_event_lwp (struct lwp_info **orig_lp)
2306 int random_selector;
2307 struct thread_info *event_thread;
2309 /* Give preference to any LWP that is being single-stepped. */
2311 = (struct thread_info *) find_inferior (&all_threads,
2312 select_singlestep_lwp_callback,
2314 if (event_thread != NULL)
2317 debug_printf ("SEL: Select single-step %s\n",
2318 target_pid_to_str (ptid_of (event_thread)));
2322 /* No single-stepping LWP. Select one at random, out of those
2323 which have had SIGTRAP events. */
2325 /* First see how many SIGTRAP events we have. */
2326 find_inferior (&all_threads, count_events_callback, &num_events);
2328 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2329 random_selector = (int)
2330 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2332 if (debug_threads && num_events > 1)
2333 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2334 num_events, random_selector);
2337 = (struct thread_info *) find_inferior (&all_threads,
2338 select_event_lwp_callback,
2342 if (event_thread != NULL)
2344 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2346 /* Switch the event LWP. */
2347 *orig_lp = event_lp;
2351 /* Decrement the suspend count of an LWP. */
2354 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2356 struct thread_info *thread = (struct thread_info *) entry;
2357 struct lwp_info *lwp = get_thread_lwp (thread);
2359 /* Ignore EXCEPT. */
2365 gdb_assert (lwp->suspended >= 0);
2369 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2373 unsuspend_all_lwps (struct lwp_info *except)
2375 find_inferior (&all_threads, unsuspend_one_lwp, except);
2378 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2379 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2381 static int lwp_running (struct inferior_list_entry *entry, void *data);
2382 static ptid_t linux_wait_1 (ptid_t ptid,
2383 struct target_waitstatus *ourstatus,
2384 int target_options);
2386 /* Stabilize threads (move out of jump pads).
2388 If a thread is midway collecting a fast tracepoint, we need to
2389 finish the collection and move it out of the jump pad before
2390 reporting the signal.
2392 This avoids recursion while collecting (when a signal arrives
2393 midway, and the signal handler itself collects), which would trash
2394 the trace buffer. In case the user set a breakpoint in a signal
2395 handler, this avoids the backtrace showing the jump pad, etc..
2396 Most importantly, there are certain things we can't do safely if
2397 threads are stopped in a jump pad (or in its callee's). For
2400 - starting a new trace run. A thread still collecting the
2401 previous run, could trash the trace buffer when resumed. The trace
2402 buffer control structures would have been reset but the thread had
2403 no way to tell. The thread could even midway memcpy'ing to the
2404 buffer, which would mean that when resumed, it would clobber the
2405 trace buffer that had been set for a new run.
2407 - we can't rewrite/reuse the jump pads for new tracepoints
2408 safely. Say you do tstart while a thread is stopped midway while
2409 collecting. When the thread is later resumed, it finishes the
2410 collection, and returns to the jump pad, to execute the original
2411 instruction that was under the tracepoint jump at the time the
2412 older run had been started. If the jump pad had been rewritten
2413 since for something else in the new run, the thread would now
2414 execute the wrong / random instructions. */
2417 linux_stabilize_threads (void)
2419 struct thread_info *saved_thread;
2420 struct thread_info *thread_stuck;
2423 = (struct thread_info *) find_inferior (&all_threads,
2424 stuck_in_jump_pad_callback,
2426 if (thread_stuck != NULL)
2429 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2430 lwpid_of (thread_stuck));
2434 saved_thread = current_thread;
2436 stabilizing_threads = 1;
2439 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2441 /* Loop until all are stopped out of the jump pads. */
2442 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2444 struct target_waitstatus ourstatus;
2445 struct lwp_info *lwp;
2448 /* Note that we go through the full wait even loop. While
2449 moving threads out of jump pad, we need to be able to step
2450 over internal breakpoints and such. */
2451 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2453 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2455 lwp = get_thread_lwp (current_thread);
2460 if (ourstatus.value.sig != GDB_SIGNAL_0
2461 || current_thread->last_resume_kind == resume_stop)
2463 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2464 enqueue_one_deferred_signal (lwp, &wstat);
2469 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2471 stabilizing_threads = 0;
2473 current_thread = saved_thread;
2478 = (struct thread_info *) find_inferior (&all_threads,
2479 stuck_in_jump_pad_callback,
2481 if (thread_stuck != NULL)
2482 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2483 lwpid_of (thread_stuck));
2487 /* Wait for process, returns status. */
2490 linux_wait_1 (ptid_t ptid,
2491 struct target_waitstatus *ourstatus, int target_options)
2494 struct lwp_info *event_child;
2497 int step_over_finished;
2498 int bp_explains_trap;
2499 int maybe_internal_trap;
2507 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2510 /* Translate generic target options into linux options. */
2512 if (target_options & TARGET_WNOHANG)
2516 bp_explains_trap = 0;
2519 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2521 /* If we were only supposed to resume one thread, only wait for
2522 that thread - if it's still alive. If it died, however - which
2523 can happen if we're coming from the thread death case below -
2524 then we need to make sure we restart the other threads. We could
2525 pick a thread at random or restart all; restarting all is less
2528 && !ptid_equal (cont_thread, null_ptid)
2529 && !ptid_equal (cont_thread, minus_one_ptid))
2531 struct thread_info *thread;
2533 thread = (struct thread_info *) find_inferior_id (&all_threads,
2536 /* No stepping, no signal - unless one is pending already, of course. */
2539 struct thread_resume resume_info;
2540 resume_info.thread = minus_one_ptid;
2541 resume_info.kind = resume_continue;
2542 resume_info.sig = 0;
2543 linux_resume (&resume_info, 1);
2549 if (ptid_equal (step_over_bkpt, null_ptid))
2550 pid = linux_wait_for_event (ptid, &w, options);
2554 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2555 target_pid_to_str (step_over_bkpt));
2556 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2561 gdb_assert (target_options & TARGET_WNOHANG);
2565 debug_printf ("linux_wait_1 ret = null_ptid, "
2566 "TARGET_WAITKIND_IGNORE\n");
2570 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2577 debug_printf ("linux_wait_1 ret = null_ptid, "
2578 "TARGET_WAITKIND_NO_RESUMED\n");
2582 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2586 event_child = get_thread_lwp (current_thread);
2588 /* linux_wait_for_event only returns an exit status for the last
2589 child of a process. Report it. */
2590 if (WIFEXITED (w) || WIFSIGNALED (w))
2594 ourstatus->kind = TARGET_WAITKIND_EXITED;
2595 ourstatus->value.integer = WEXITSTATUS (w);
2599 debug_printf ("linux_wait_1 ret = %s, exited with "
2601 target_pid_to_str (ptid_of (current_thread)),
2608 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2609 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2613 debug_printf ("linux_wait_1 ret = %s, terminated with "
2615 target_pid_to_str (ptid_of (current_thread)),
2621 return ptid_of (current_thread);
2624 /* If this event was not handled before, and is not a SIGTRAP, we
2625 report it. SIGILL and SIGSEGV are also treated as traps in case
2626 a breakpoint is inserted at the current PC. If this target does
2627 not support internal breakpoints at all, we also report the
2628 SIGTRAP without further processing; it's of no concern to us. */
2630 = (supports_breakpoints ()
2631 && (WSTOPSIG (w) == SIGTRAP
2632 || ((WSTOPSIG (w) == SIGILL
2633 || WSTOPSIG (w) == SIGSEGV)
2634 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2636 if (maybe_internal_trap)
2638 /* Handle anything that requires bookkeeping before deciding to
2639 report the event or continue waiting. */
2641 /* First check if we can explain the SIGTRAP with an internal
2642 breakpoint, or if we should possibly report the event to GDB.
2643 Do this before anything that may remove or insert a
2645 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2647 /* We have a SIGTRAP, possibly a step-over dance has just
2648 finished. If so, tweak the state machine accordingly,
2649 reinsert breakpoints and delete any reinsert (software
2650 single-step) breakpoints. */
2651 step_over_finished = finish_step_over (event_child);
2653 /* Now invoke the callbacks of any internal breakpoints there. */
2654 check_breakpoints (event_child->stop_pc);
2656 /* Handle tracepoint data collecting. This may overflow the
2657 trace buffer, and cause a tracing stop, removing
2659 trace_event = handle_tracepoints (event_child);
2661 if (bp_explains_trap)
2663 /* If we stepped or ran into an internal breakpoint, we've
2664 already handled it. So next time we resume (from this
2665 PC), we should step over it. */
2667 debug_printf ("Hit a gdbserver breakpoint.\n");
2669 if (breakpoint_here (event_child->stop_pc))
2670 event_child->need_step_over = 1;
2675 /* We have some other signal, possibly a step-over dance was in
2676 progress, and it should be cancelled too. */
2677 step_over_finished = finish_step_over (event_child);
2680 /* We have all the data we need. Either report the event to GDB, or
2681 resume threads and keep waiting for more. */
2683 /* If we're collecting a fast tracepoint, finish the collection and
2684 move out of the jump pad before delivering a signal. See
2685 linux_stabilize_threads. */
2688 && WSTOPSIG (w) != SIGTRAP
2689 && supports_fast_tracepoints ()
2690 && agent_loaded_p ())
2693 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2694 "to defer or adjust it.\n",
2695 WSTOPSIG (w), lwpid_of (current_thread));
2697 /* Allow debugging the jump pad itself. */
2698 if (current_thread->last_resume_kind != resume_step
2699 && maybe_move_out_of_jump_pad (event_child, &w))
2701 enqueue_one_deferred_signal (event_child, &w);
2704 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2705 WSTOPSIG (w), lwpid_of (current_thread));
2707 linux_resume_one_lwp (event_child, 0, 0, NULL);
2712 if (event_child->collecting_fast_tracepoint)
2715 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2716 "Check if we're already there.\n",
2717 lwpid_of (current_thread),
2718 event_child->collecting_fast_tracepoint);
2722 event_child->collecting_fast_tracepoint
2723 = linux_fast_tracepoint_collecting (event_child, NULL);
2725 if (event_child->collecting_fast_tracepoint != 1)
2727 /* No longer need this breakpoint. */
2728 if (event_child->exit_jump_pad_bkpt != NULL)
2731 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2732 "stopping all threads momentarily.\n");
2734 /* Other running threads could hit this breakpoint.
2735 We don't handle moribund locations like GDB does,
2736 instead we always pause all threads when removing
2737 breakpoints, so that any step-over or
2738 decr_pc_after_break adjustment is always taken
2739 care of while the breakpoint is still
2741 stop_all_lwps (1, event_child);
2742 cancel_breakpoints ();
2744 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2745 event_child->exit_jump_pad_bkpt = NULL;
2747 unstop_all_lwps (1, event_child);
2749 gdb_assert (event_child->suspended >= 0);
2753 if (event_child->collecting_fast_tracepoint == 0)
2756 debug_printf ("fast tracepoint finished "
2757 "collecting successfully.\n");
2759 /* We may have a deferred signal to report. */
2760 if (dequeue_one_deferred_signal (event_child, &w))
2763 debug_printf ("dequeued one signal.\n");
2768 debug_printf ("no deferred signals.\n");
2770 if (stabilizing_threads)
2772 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2773 ourstatus->value.sig = GDB_SIGNAL_0;
2777 debug_printf ("linux_wait_1 ret = %s, stopped "
2778 "while stabilizing threads\n",
2779 target_pid_to_str (ptid_of (current_thread)));
2783 return ptid_of (current_thread);
2789 /* Check whether GDB would be interested in this event. */
2791 /* If GDB is not interested in this signal, don't stop other
2792 threads, and don't report it to GDB. Just resume the inferior
2793 right away. We do this for threading-related signals as well as
2794 any that GDB specifically requested we ignore. But never ignore
2795 SIGSTOP if we sent it ourselves, and do not ignore signals when
2796 stepping - they may require special handling to skip the signal
2798 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2801 && current_thread->last_resume_kind != resume_step
2803 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2804 (current_process ()->private->thread_db != NULL
2805 && (WSTOPSIG (w) == __SIGRTMIN
2806 || WSTOPSIG (w) == __SIGRTMIN + 1))
2809 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
2810 && !(WSTOPSIG (w) == SIGSTOP
2811 && current_thread->last_resume_kind == resume_stop))))
2813 siginfo_t info, *info_p;
2816 debug_printf ("Ignored signal %d for LWP %ld.\n",
2817 WSTOPSIG (w), lwpid_of (current_thread));
2819 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2820 (PTRACE_TYPE_ARG3) 0, &info) == 0)
2824 linux_resume_one_lwp (event_child, event_child->stepping,
2825 WSTOPSIG (w), info_p);
2829 /* Note that all addresses are always "out of the step range" when
2830 there's no range to begin with. */
2831 in_step_range = lwp_in_step_range (event_child);
2833 /* If GDB wanted this thread to single step, and the thread is out
2834 of the step range, we always want to report the SIGTRAP, and let
2835 GDB handle it. Watchpoints should always be reported. So should
2836 signals we can't explain. A SIGTRAP we can't explain could be a
2837 GDB breakpoint --- we may or not support Z0 breakpoints. If we
2838 do, we're be able to handle GDB breakpoints on top of internal
2839 breakpoints, by handling the internal breakpoint and still
2840 reporting the event to GDB. If we don't, we're out of luck, GDB
2841 won't see the breakpoint hit. */
2842 report_to_gdb = (!maybe_internal_trap
2843 || (current_thread->last_resume_kind == resume_step
2845 || event_child->stopped_by_watchpoint
2846 || (!step_over_finished && !in_step_range
2847 && !bp_explains_trap && !trace_event)
2848 || (gdb_breakpoint_here (event_child->stop_pc)
2849 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
2850 && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
2852 run_breakpoint_commands (event_child->stop_pc);
2854 /* We found no reason GDB would want us to stop. We either hit one
2855 of our own breakpoints, or finished an internal step GDB
2856 shouldn't know about. */
2861 if (bp_explains_trap)
2862 debug_printf ("Hit a gdbserver breakpoint.\n");
2863 if (step_over_finished)
2864 debug_printf ("Step-over finished.\n");
2866 debug_printf ("Tracepoint event.\n");
2867 if (lwp_in_step_range (event_child))
2868 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
2869 paddress (event_child->stop_pc),
2870 paddress (event_child->step_range_start),
2871 paddress (event_child->step_range_end));
2874 /* We're not reporting this breakpoint to GDB, so apply the
2875 decr_pc_after_break adjustment to the inferior's regcache
2878 if (the_low_target.set_pc != NULL)
2880 struct regcache *regcache
2881 = get_thread_regcache (current_thread, 1);
2882 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2885 /* We may have finished stepping over a breakpoint. If so,
2886 we've stopped and suspended all LWPs momentarily except the
2887 stepping one. This is where we resume them all again. We're
2888 going to keep waiting, so use proceed, which handles stepping
2889 over the next breakpoint. */
2891 debug_printf ("proceeding all threads.\n");
2893 if (step_over_finished)
2894 unsuspend_all_lwps (event_child);
2896 proceed_all_lwps ();
2902 if (current_thread->last_resume_kind == resume_step)
2904 if (event_child->step_range_start == event_child->step_range_end)
2905 debug_printf ("GDB wanted to single-step, reporting event.\n");
2906 else if (!lwp_in_step_range (event_child))
2907 debug_printf ("Out of step range, reporting event.\n");
2909 if (event_child->stopped_by_watchpoint)
2910 debug_printf ("Stopped by watchpoint.\n");
2911 if (gdb_breakpoint_here (event_child->stop_pc))
2912 debug_printf ("Stopped by GDB breakpoint.\n");
2914 debug_printf ("Hit a non-gdbserver trap event.\n");
2917 /* Alright, we're going to report a stop. */
2919 if (!non_stop && !stabilizing_threads)
2921 /* In all-stop, stop all threads. */
2922 stop_all_lwps (0, NULL);
2924 /* If we're not waiting for a specific LWP, choose an event LWP
2925 from among those that have had events. Giving equal priority
2926 to all LWPs that have had events helps prevent
2928 if (ptid_equal (ptid, minus_one_ptid))
2930 event_child->status_pending_p = 1;
2931 event_child->status_pending = w;
2933 select_event_lwp (&event_child);
2935 /* current_thread and event_child must stay in sync. */
2936 current_thread = get_lwp_thread (event_child);
2938 event_child->status_pending_p = 0;
2939 w = event_child->status_pending;
2942 /* Now that we've selected our final event LWP, cancel any
2943 breakpoints in other LWPs that have hit a GDB breakpoint.
2944 See the comment in cancel_breakpoints_callback to find out
2946 find_inferior (&all_threads, cancel_breakpoints_callback, event_child);
2948 /* If we were going a step-over, all other threads but the stepping one
2949 had been paused in start_step_over, with their suspend counts
2950 incremented. We don't want to do a full unstop/unpause, because we're
2951 in all-stop mode (so we want threads stopped), but we still need to
2952 unsuspend the other threads, to decrement their `suspended' count
2954 if (step_over_finished)
2955 unsuspend_all_lwps (event_child);
2957 /* Stabilize threads (move out of jump pads). */
2958 stabilize_threads ();
2962 /* If we just finished a step-over, then all threads had been
2963 momentarily paused. In all-stop, that's fine, we want
2964 threads stopped by now anyway. In non-stop, we need to
2965 re-resume threads that GDB wanted to be running. */
2966 if (step_over_finished)
2967 unstop_all_lwps (1, event_child);
2970 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2972 if (current_thread->last_resume_kind == resume_stop
2973 && WSTOPSIG (w) == SIGSTOP)
2975 /* A thread that has been requested to stop by GDB with vCont;t,
2976 and it stopped cleanly, so report as SIG0. The use of
2977 SIGSTOP is an implementation detail. */
2978 ourstatus->value.sig = GDB_SIGNAL_0;
2980 else if (current_thread->last_resume_kind == resume_stop
2981 && WSTOPSIG (w) != SIGSTOP)
2983 /* A thread that has been requested to stop by GDB with vCont;t,
2984 but, it stopped for other reasons. */
2985 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2989 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2992 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2996 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
2997 target_pid_to_str (ptid_of (current_thread)),
2998 ourstatus->kind, ourstatus->value.sig);
3002 return ptid_of (current_thread);
3005 /* Get rid of any pending event in the pipe. */
3007 async_file_flush (void)
3013 ret = read (linux_event_pipe[0], &buf, 1);
3014 while (ret >= 0 || (ret == -1 && errno == EINTR));
3017 /* Put something in the pipe, so the event loop wakes up. */
3019 async_file_mark (void)
3023 async_file_flush ();
3026 ret = write (linux_event_pipe[1], "+", 1);
3027 while (ret == 0 || (ret == -1 && errno == EINTR));
3029 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3030 be awakened anyway. */
3034 linux_wait (ptid_t ptid,
3035 struct target_waitstatus *ourstatus, int target_options)
3039 /* Flush the async file first. */
3040 if (target_is_async_p ())
3041 async_file_flush ();
3043 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3045 /* If at least one stop was reported, there may be more. A single
3046 SIGCHLD can signal more than one child stop. */
3047 if (target_is_async_p ()
3048 && (target_options & TARGET_WNOHANG) != 0
3049 && !ptid_equal (event_ptid, null_ptid))
3055 /* Send a signal to an LWP. */
3058 kill_lwp (unsigned long lwpid, int signo)
3060 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3061 fails, then we are not using nptl threads and we should be using kill. */
3065 static int tkill_failed;
3072 ret = syscall (__NR_tkill, lwpid, signo);
3073 if (errno != ENOSYS)
3080 return kill (lwpid, signo);
3084 linux_stop_lwp (struct lwp_info *lwp)
3090 send_sigstop (struct lwp_info *lwp)
3094 pid = lwpid_of (get_lwp_thread (lwp));
3096 /* If we already have a pending stop signal for this process, don't
3098 if (lwp->stop_expected)
3101 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3107 debug_printf ("Sending sigstop to lwp %d\n", pid);
3109 lwp->stop_expected = 1;
3110 kill_lwp (pid, SIGSTOP);
3114 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3116 struct thread_info *thread = (struct thread_info *) entry;
3117 struct lwp_info *lwp = get_thread_lwp (thread);
3119 /* Ignore EXCEPT. */
3130 /* Increment the suspend count of an LWP, and stop it, if not stopped
3133 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3136 struct thread_info *thread = (struct thread_info *) entry;
3137 struct lwp_info *lwp = get_thread_lwp (thread);
3139 /* Ignore EXCEPT. */
3145 return send_sigstop_callback (entry, except);
3149 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3151 /* It's dead, really. */
3154 /* Store the exit status for later. */
3155 lwp->status_pending_p = 1;
3156 lwp->status_pending = wstat;
3158 /* Prevent trying to stop it. */
3161 /* No further stops are expected from a dead lwp. */
3162 lwp->stop_expected = 0;
3165 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3168 wait_for_sigstop (void)
3170 struct thread_info *saved_thread;
3175 saved_thread = current_thread;
3176 if (saved_thread != NULL)
3177 saved_tid = saved_thread->entry.id;
3179 saved_tid = null_ptid; /* avoid bogus unused warning */
3182 debug_printf ("wait_for_sigstop: pulling events\n");
3184 /* Passing NULL_PTID as filter indicates we want all events to be
3185 left pending. Eventually this returns when there are no
3186 unwaited-for children left. */
3187 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3189 gdb_assert (ret == -1);
3191 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3192 current_thread = saved_thread;
3196 debug_printf ("Previously current thread died.\n");
3200 /* We can't change the current inferior behind GDB's back,
3201 otherwise, a subsequent command may apply to the wrong
3203 current_thread = NULL;
3207 /* Set a valid thread as current. */
3208 set_desired_thread (0);
3213 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3214 move it out, because we need to report the stop event to GDB. For
3215 example, if the user puts a breakpoint in the jump pad, it's
3216 because she wants to debug it. */
3219 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3221 struct thread_info *thread = (struct thread_info *) entry;
3222 struct lwp_info *lwp = get_thread_lwp (thread);
3224 gdb_assert (lwp->suspended == 0);
3225 gdb_assert (lwp->stopped);
3227 /* Allow debugging the jump pad, gdb_collect, etc.. */
3228 return (supports_fast_tracepoints ()
3229 && agent_loaded_p ()
3230 && (gdb_breakpoint_here (lwp->stop_pc)
3231 || lwp->stopped_by_watchpoint
3232 || thread->last_resume_kind == resume_step)
3233 && linux_fast_tracepoint_collecting (lwp, NULL));
3237 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3239 struct thread_info *thread = (struct thread_info *) entry;
3240 struct lwp_info *lwp = get_thread_lwp (thread);
3243 gdb_assert (lwp->suspended == 0);
3244 gdb_assert (lwp->stopped);
3246 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3248 /* Allow debugging the jump pad, gdb_collect, etc. */
3249 if (!gdb_breakpoint_here (lwp->stop_pc)
3250 && !lwp->stopped_by_watchpoint
3251 && thread->last_resume_kind != resume_step
3252 && maybe_move_out_of_jump_pad (lwp, wstat))
3255 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3260 lwp->status_pending_p = 0;
3261 enqueue_one_deferred_signal (lwp, wstat);
3264 debug_printf ("Signal %d for LWP %ld deferred "
3266 WSTOPSIG (*wstat), lwpid_of (thread));
3269 linux_resume_one_lwp (lwp, 0, 0, NULL);
3276 lwp_running (struct inferior_list_entry *entry, void *data)
3278 struct thread_info *thread = (struct thread_info *) entry;
3279 struct lwp_info *lwp = get_thread_lwp (thread);
3288 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3289 If SUSPEND, then also increase the suspend count of every LWP,
3293 stop_all_lwps (int suspend, struct lwp_info *except)
3295 /* Should not be called recursively. */
3296 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3301 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3302 suspend ? "stop-and-suspend" : "stop",
3304 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3308 stopping_threads = (suspend
3309 ? STOPPING_AND_SUSPENDING_THREADS
3310 : STOPPING_THREADS);
3313 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3315 find_inferior (&all_threads, send_sigstop_callback, except);
3316 wait_for_sigstop ();
3317 stopping_threads = NOT_STOPPING_THREADS;
3321 debug_printf ("stop_all_lwps done, setting stopping_threads "
3322 "back to !stopping\n");
3327 /* Resume execution of the inferior process.
3328 If STEP is nonzero, single-step it.
3329 If SIGNAL is nonzero, give it that signal. */
3332 linux_resume_one_lwp (struct lwp_info *lwp,
3333 int step, int signal, siginfo_t *info)
3335 struct thread_info *thread = get_lwp_thread (lwp);
3336 struct thread_info *saved_thread;
3337 int fast_tp_collecting;
3339 if (lwp->stopped == 0)
3342 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3344 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3346 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3347 user used the "jump" command, or "set $pc = foo"). */
3348 if (lwp->stop_pc != get_pc (lwp))
3350 /* Collecting 'while-stepping' actions doesn't make sense
3352 release_while_stepping_state_list (thread);
3355 /* If we have pending signals or status, and a new signal, enqueue the
3356 signal. Also enqueue the signal if we are waiting to reinsert a
3357 breakpoint; it will be picked up again below. */
3359 && (lwp->status_pending_p
3360 || lwp->pending_signals != NULL
3361 || lwp->bp_reinsert != 0
3362 || fast_tp_collecting))
3364 struct pending_signals *p_sig;
3365 p_sig = xmalloc (sizeof (*p_sig));
3366 p_sig->prev = lwp->pending_signals;
3367 p_sig->signal = signal;
3369 memset (&p_sig->info, 0, sizeof (siginfo_t));
3371 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3372 lwp->pending_signals = p_sig;
3375 if (lwp->status_pending_p)
3378 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3379 " has pending status\n",
3380 lwpid_of (thread), step ? "step" : "continue", signal,
3381 lwp->stop_expected ? "expected" : "not expected");
3385 saved_thread = current_thread;
3386 current_thread = thread;
3389 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3390 lwpid_of (thread), step ? "step" : "continue", signal,
3391 lwp->stop_expected ? "expected" : "not expected");
3393 /* This bit needs some thinking about. If we get a signal that
3394 we must report while a single-step reinsert is still pending,
3395 we often end up resuming the thread. It might be better to
3396 (ew) allow a stack of pending events; then we could be sure that
3397 the reinsert happened right away and not lose any signals.
3399 Making this stack would also shrink the window in which breakpoints are
3400 uninserted (see comment in linux_wait_for_lwp) but not enough for
3401 complete correctness, so it won't solve that problem. It may be
3402 worthwhile just to solve this one, however. */
3403 if (lwp->bp_reinsert != 0)
3406 debug_printf (" pending reinsert at 0x%s\n",
3407 paddress (lwp->bp_reinsert));
3409 if (can_hardware_single_step ())
3411 if (fast_tp_collecting == 0)
3414 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3416 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3423 /* Postpone any pending signal. It was enqueued above. */
3427 if (fast_tp_collecting == 1)
3430 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3431 " (exit-jump-pad-bkpt)\n",
3434 /* Postpone any pending signal. It was enqueued above. */
3437 else if (fast_tp_collecting == 2)
3440 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3441 " single-stepping\n",
3444 if (can_hardware_single_step ())
3448 internal_error (__FILE__, __LINE__,
3449 "moving out of jump pad single-stepping"
3450 " not implemented on this target");
3453 /* Postpone any pending signal. It was enqueued above. */
3457 /* If we have while-stepping actions in this thread set it stepping.
3458 If we have a signal to deliver, it may or may not be set to
3459 SIG_IGN, we don't know. Assume so, and allow collecting
3460 while-stepping into a signal handler. A possible smart thing to
3461 do would be to set an internal breakpoint at the signal return
3462 address, continue, and carry on catching this while-stepping
3463 action only when that breakpoint is hit. A future
3465 if (thread->while_stepping != NULL
3466 && can_hardware_single_step ())
3469 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3474 if (debug_threads && the_low_target.get_pc != NULL)
3476 struct regcache *regcache = get_thread_regcache (current_thread, 1);
3477 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
3478 debug_printf (" resuming from pc 0x%lx\n", (long) pc);
3481 /* If we have pending signals, consume one unless we are trying to
3482 reinsert a breakpoint or we're trying to finish a fast tracepoint
3484 if (lwp->pending_signals != NULL
3485 && lwp->bp_reinsert == 0
3486 && fast_tp_collecting == 0)
3488 struct pending_signals **p_sig;
3490 p_sig = &lwp->pending_signals;
3491 while ((*p_sig)->prev != NULL)
3492 p_sig = &(*p_sig)->prev;
3494 signal = (*p_sig)->signal;
3495 if ((*p_sig)->info.si_signo != 0)
3496 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3503 if (the_low_target.prepare_to_resume != NULL)
3504 the_low_target.prepare_to_resume (lwp);
3506 regcache_invalidate_thread (thread);
3509 lwp->stopped_by_watchpoint = 0;
3510 lwp->stepping = step;
3511 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
3512 (PTRACE_TYPE_ARG3) 0,
3513 /* Coerce to a uintptr_t first to avoid potential gcc warning
3514 of coercing an 8 byte integer to a 4 byte pointer. */
3515 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3517 current_thread = saved_thread;
3520 /* ESRCH from ptrace either means that the thread was already
3521 running (an error) or that it is gone (a race condition). If
3522 it's gone, we will get a notification the next time we wait,
3523 so we can ignore the error. We could differentiate these
3524 two, but it's tricky without waiting; the thread still exists
3525 as a zombie, so sending it signal 0 would succeed. So just
3530 perror_with_name ("ptrace");
3534 struct thread_resume_array
3536 struct thread_resume *resume;
3540 /* This function is called once per thread via find_inferior.
3541 ARG is a pointer to a thread_resume_array struct.
3542 We look up the thread specified by ENTRY in ARG, and mark the thread
3543 with a pointer to the appropriate resume request.
3545 This algorithm is O(threads * resume elements), but resume elements
3546 is small (and will remain small at least until GDB supports thread
3550 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3552 struct thread_info *thread = (struct thread_info *) entry;
3553 struct lwp_info *lwp = get_thread_lwp (thread);
3555 struct thread_resume_array *r;
3559 for (ndx = 0; ndx < r->n; ndx++)
3561 ptid_t ptid = r->resume[ndx].thread;
3562 if (ptid_equal (ptid, minus_one_ptid)
3563 || ptid_equal (ptid, entry->id)
3564 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3566 || (ptid_get_pid (ptid) == pid_of (thread)
3567 && (ptid_is_pid (ptid)
3568 || ptid_get_lwp (ptid) == -1)))
3570 if (r->resume[ndx].kind == resume_stop
3571 && thread->last_resume_kind == resume_stop)
3574 debug_printf ("already %s LWP %ld at GDB's request\n",
3575 (thread->last_status.kind
3576 == TARGET_WAITKIND_STOPPED)
3584 lwp->resume = &r->resume[ndx];
3585 thread->last_resume_kind = lwp->resume->kind;
3587 lwp->step_range_start = lwp->resume->step_range_start;
3588 lwp->step_range_end = lwp->resume->step_range_end;
3590 /* If we had a deferred signal to report, dequeue one now.
3591 This can happen if LWP gets more than one signal while
3592 trying to get out of a jump pad. */
3594 && !lwp->status_pending_p
3595 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3597 lwp->status_pending_p = 1;
3600 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3601 "leaving status pending.\n",
3602 WSTOPSIG (lwp->status_pending),
3610 /* No resume action for this thread. */
3616 /* find_inferior callback for linux_resume.
3617 Set *FLAG_P if this lwp has an interesting status pending. */
3620 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3622 struct thread_info *thread = (struct thread_info *) entry;
3623 struct lwp_info *lwp = get_thread_lwp (thread);
3625 /* LWPs which will not be resumed are not interesting, because
3626 we might not wait for them next time through linux_wait. */
3627 if (lwp->resume == NULL)
3630 if (lwp->status_pending_p)
3631 * (int *) flag_p = 1;
3636 /* Return 1 if this lwp that GDB wants running is stopped at an
3637 internal breakpoint that we need to step over. It assumes that any
3638 required STOP_PC adjustment has already been propagated to the
3639 inferior's regcache. */
3642 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3644 struct thread_info *thread = (struct thread_info *) entry;
3645 struct lwp_info *lwp = get_thread_lwp (thread);
3646 struct thread_info *saved_thread;
3649 /* LWPs which will not be resumed are not interesting, because we
3650 might not wait for them next time through linux_wait. */
3655 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
3660 if (thread->last_resume_kind == resume_stop)
3663 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
3669 gdb_assert (lwp->suspended >= 0);
3674 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
3679 if (!lwp->need_step_over)
3682 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
3685 if (lwp->status_pending_p)
3688 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
3694 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3698 /* If the PC has changed since we stopped, then don't do anything,
3699 and let the breakpoint/tracepoint be hit. This happens if, for
3700 instance, GDB handled the decr_pc_after_break subtraction itself,
3701 GDB is OOL stepping this thread, or the user has issued a "jump"
3702 command, or poked thread's registers herself. */
3703 if (pc != lwp->stop_pc)
3706 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
3707 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3709 paddress (lwp->stop_pc), paddress (pc));
3711 lwp->need_step_over = 0;
3715 saved_thread = current_thread;
3716 current_thread = thread;
3718 /* We can only step over breakpoints we know about. */
3719 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3721 /* Don't step over a breakpoint that GDB expects to hit
3722 though. If the condition is being evaluated on the target's side
3723 and it evaluate to false, step over this breakpoint as well. */
3724 if (gdb_breakpoint_here (pc)
3725 && gdb_condition_true_at_breakpoint (pc)
3726 && gdb_no_commands_at_breakpoint (pc))
3729 debug_printf ("Need step over [LWP %ld]? yes, but found"
3730 " GDB breakpoint at 0x%s; skipping step over\n",
3731 lwpid_of (thread), paddress (pc));
3733 current_thread = saved_thread;
3739 debug_printf ("Need step over [LWP %ld]? yes, "
3740 "found breakpoint at 0x%s\n",
3741 lwpid_of (thread), paddress (pc));
3743 /* We've found an lwp that needs stepping over --- return 1 so
3744 that find_inferior stops looking. */
3745 current_thread = saved_thread;
3747 /* If the step over is cancelled, this is set again. */
3748 lwp->need_step_over = 0;
3753 current_thread = saved_thread;
3756 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
3758 lwpid_of (thread), paddress (pc));
3763 /* Start a step-over operation on LWP. When LWP stopped at a
3764 breakpoint, to make progress, we need to remove the breakpoint out
3765 of the way. If we let other threads run while we do that, they may
3766 pass by the breakpoint location and miss hitting it. To avoid
3767 that, a step-over momentarily stops all threads while LWP is
3768 single-stepped while the breakpoint is temporarily uninserted from
3769 the inferior. When the single-step finishes, we reinsert the
3770 breakpoint, and let all threads that are supposed to be running,
3773 On targets that don't support hardware single-step, we don't
3774 currently support full software single-stepping. Instead, we only
3775 support stepping over the thread event breakpoint, by asking the
3776 low target where to place a reinsert breakpoint. Since this
3777 routine assumes the breakpoint being stepped over is a thread event
3778 breakpoint, it usually assumes the return address of the current
3779 function is a good enough place to set the reinsert breakpoint. */
3782 start_step_over (struct lwp_info *lwp)
3784 struct thread_info *thread = get_lwp_thread (lwp);
3785 struct thread_info *saved_thread;
3790 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
3793 stop_all_lwps (1, lwp);
3794 gdb_assert (lwp->suspended == 0);
3797 debug_printf ("Done stopping all threads for step-over.\n");
3799 /* Note, we should always reach here with an already adjusted PC,
3800 either by GDB (if we're resuming due to GDB's request), or by our
3801 caller, if we just finished handling an internal breakpoint GDB
3802 shouldn't care about. */
3805 saved_thread = current_thread;
3806 current_thread = thread;
3808 lwp->bp_reinsert = pc;
3809 uninsert_breakpoints_at (pc);
3810 uninsert_fast_tracepoint_jumps_at (pc);
3812 if (can_hardware_single_step ())
3818 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3819 set_reinsert_breakpoint (raddr);
3823 current_thread = saved_thread;
3825 linux_resume_one_lwp (lwp, step, 0, NULL);
3827 /* Require next event from this LWP. */
3828 step_over_bkpt = thread->entry.id;
3832 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3833 start_step_over, if still there, and delete any reinsert
3834 breakpoints we've set, on non hardware single-step targets. */
3837 finish_step_over (struct lwp_info *lwp)
3839 if (lwp->bp_reinsert != 0)
3842 debug_printf ("Finished step over.\n");
3844 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3845 may be no breakpoint to reinsert there by now. */
3846 reinsert_breakpoints_at (lwp->bp_reinsert);
3847 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3849 lwp->bp_reinsert = 0;
3851 /* Delete any software-single-step reinsert breakpoints. No
3852 longer needed. We don't have to worry about other threads
3853 hitting this trap, and later not being able to explain it,
3854 because we were stepping over a breakpoint, and we hold all
3855 threads but LWP stopped while doing that. */
3856 if (!can_hardware_single_step ())
3857 delete_reinsert_breakpoints ();
3859 step_over_bkpt = null_ptid;
3866 /* This function is called once per thread. We check the thread's resume
3867 request, which will tell us whether to resume, step, or leave the thread
3868 stopped; and what signal, if any, it should be sent.
3870 For threads which we aren't explicitly told otherwise, we preserve
3871 the stepping flag; this is used for stepping over gdbserver-placed
3874 If pending_flags was set in any thread, we queue any needed
3875 signals, since we won't actually resume. We already have a pending
3876 event to report, so we don't need to preserve any step requests;
3877 they should be re-issued if necessary. */
3880 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3882 struct thread_info *thread = (struct thread_info *) entry;
3883 struct lwp_info *lwp = get_thread_lwp (thread);
3885 int leave_all_stopped = * (int *) arg;
3888 if (lwp->resume == NULL)
3891 if (lwp->resume->kind == resume_stop)
3894 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
3899 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
3901 /* Stop the thread, and wait for the event asynchronously,
3902 through the event loop. */
3908 debug_printf ("already stopped LWP %ld\n",
3911 /* The LWP may have been stopped in an internal event that
3912 was not meant to be notified back to GDB (e.g., gdbserver
3913 breakpoint), so we should be reporting a stop event in
3916 /* If the thread already has a pending SIGSTOP, this is a
3917 no-op. Otherwise, something later will presumably resume
3918 the thread and this will cause it to cancel any pending
3919 operation, due to last_resume_kind == resume_stop. If
3920 the thread already has a pending status to report, we
3921 will still report it the next time we wait - see
3922 status_pending_p_callback. */
3924 /* If we already have a pending signal to report, then
3925 there's no need to queue a SIGSTOP, as this means we're
3926 midway through moving the LWP out of the jumppad, and we
3927 will report the pending signal as soon as that is
3929 if (lwp->pending_signals_to_report == NULL)
3933 /* For stop requests, we're done. */
3935 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3939 /* If this thread which is about to be resumed has a pending status,
3940 then don't resume any threads - we can just report the pending
3941 status. Make sure to queue any signals that would otherwise be
3942 sent. In all-stop mode, we do this decision based on if *any*
3943 thread has a pending status. If there's a thread that needs the
3944 step-over-breakpoint dance, then don't resume any other thread
3945 but that particular one. */
3946 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3951 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
3953 step = (lwp->resume->kind == resume_step);
3954 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3959 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
3961 /* If we have a new signal, enqueue the signal. */
3962 if (lwp->resume->sig != 0)
3964 struct pending_signals *p_sig;
3965 p_sig = xmalloc (sizeof (*p_sig));
3966 p_sig->prev = lwp->pending_signals;
3967 p_sig->signal = lwp->resume->sig;
3968 memset (&p_sig->info, 0, sizeof (siginfo_t));
3970 /* If this is the same signal we were previously stopped by,
3971 make sure to queue its siginfo. We can ignore the return
3972 value of ptrace; if it fails, we'll skip
3973 PTRACE_SETSIGINFO. */
3974 if (WIFSTOPPED (lwp->last_status)
3975 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3976 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3979 lwp->pending_signals = p_sig;
3983 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3989 linux_resume (struct thread_resume *resume_info, size_t n)
3991 struct thread_resume_array array = { resume_info, n };
3992 struct thread_info *need_step_over = NULL;
3994 int leave_all_stopped;
3999 debug_printf ("linux_resume:\n");
4002 find_inferior (&all_threads, linux_set_resume_request, &array);
4004 /* If there is a thread which would otherwise be resumed, which has
4005 a pending status, then don't resume any threads - we can just
4006 report the pending status. Make sure to queue any signals that
4007 would otherwise be sent. In non-stop mode, we'll apply this
4008 logic to each thread individually. We consume all pending events
4009 before considering to start a step-over (in all-stop). */
4012 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
4014 /* If there is a thread which would otherwise be resumed, which is
4015 stopped at a breakpoint that needs stepping over, then don't
4016 resume any threads - have it step over the breakpoint with all
4017 other threads stopped, then resume all threads again. Make sure
4018 to queue any signals that would otherwise be delivered or
4020 if (!any_pending && supports_breakpoints ())
4022 = (struct thread_info *) find_inferior (&all_threads,
4023 need_step_over_p, NULL);
4025 leave_all_stopped = (need_step_over != NULL || any_pending);
4029 if (need_step_over != NULL)
4030 debug_printf ("Not resuming all, need step over\n");
4031 else if (any_pending)
4032 debug_printf ("Not resuming, all-stop and found "
4033 "an LWP with pending status\n");
4035 debug_printf ("Resuming, no pending status or step over needed\n");
4038 /* Even if we're leaving threads stopped, queue all signals we'd
4039 otherwise deliver. */
4040 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4043 start_step_over (get_thread_lwp (need_step_over));
4047 debug_printf ("linux_resume done\n");
4052 /* This function is called once per thread. We check the thread's
4053 last resume request, which will tell us whether to resume, step, or
4054 leave the thread stopped. Any signal the client requested to be
4055 delivered has already been enqueued at this point.
4057 If any thread that GDB wants running is stopped at an internal
4058 breakpoint that needs stepping over, we start a step-over operation
4059 on that particular thread, and leave all others stopped. */
4062 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4064 struct thread_info *thread = (struct thread_info *) entry;
4065 struct lwp_info *lwp = get_thread_lwp (thread);
4072 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4077 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4081 if (thread->last_resume_kind == resume_stop
4082 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4085 debug_printf (" client wants LWP to remain %ld stopped\n",
4090 if (lwp->status_pending_p)
4093 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4098 gdb_assert (lwp->suspended >= 0);
4103 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4107 if (thread->last_resume_kind == resume_stop
4108 && lwp->pending_signals_to_report == NULL
4109 && lwp->collecting_fast_tracepoint == 0)
4111 /* We haven't reported this LWP as stopped yet (otherwise, the
4112 last_status.kind check above would catch it, and we wouldn't
4113 reach here. This LWP may have been momentarily paused by a
4114 stop_all_lwps call while handling for example, another LWP's
4115 step-over. In that case, the pending expected SIGSTOP signal
4116 that was queued at vCont;t handling time will have already
4117 been consumed by wait_for_sigstop, and so we need to requeue
4118 another one here. Note that if the LWP already has a SIGSTOP
4119 pending, this is a no-op. */
4122 debug_printf ("Client wants LWP %ld to stop. "
4123 "Making sure it has a SIGSTOP pending\n",
4129 step = thread->last_resume_kind == resume_step;
4130 linux_resume_one_lwp (lwp, step, 0, NULL);
4135 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4137 struct thread_info *thread = (struct thread_info *) entry;
4138 struct lwp_info *lwp = get_thread_lwp (thread);
4144 gdb_assert (lwp->suspended >= 0);
4146 return proceed_one_lwp (entry, except);
4149 /* When we finish a step-over, set threads running again. If there's
4150 another thread that may need a step-over, now's the time to start
4151 it. Eventually, we'll move all threads past their breakpoints. */
4154 proceed_all_lwps (void)
4156 struct thread_info *need_step_over;
4158 /* If there is a thread which would otherwise be resumed, which is
4159 stopped at a breakpoint that needs stepping over, then don't
4160 resume any threads - have it step over the breakpoint with all
4161 other threads stopped, then resume all threads again. */
4163 if (supports_breakpoints ())
4166 = (struct thread_info *) find_inferior (&all_threads,
4167 need_step_over_p, NULL);
4169 if (need_step_over != NULL)
4172 debug_printf ("proceed_all_lwps: found "
4173 "thread %ld needing a step-over\n",
4174 lwpid_of (need_step_over));
4176 start_step_over (get_thread_lwp (need_step_over));
4182 debug_printf ("Proceeding, no step-over needed\n");
4184 find_inferior (&all_threads, proceed_one_lwp, NULL);
4187 /* Stopped LWPs that the client wanted to be running, that don't have
4188 pending statuses, are set to run again, except for EXCEPT, if not
4189 NULL. This undoes a stop_all_lwps call. */
4192 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4198 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4199 lwpid_of (get_lwp_thread (except)));
4201 debug_printf ("unstopping all lwps\n");
4205 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4207 find_inferior (&all_threads, proceed_one_lwp, except);
4211 debug_printf ("unstop_all_lwps done\n");
4217 #ifdef HAVE_LINUX_REGSETS
4219 #define use_linux_regsets 1
4221 /* Returns true if REGSET has been disabled. */
4224 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4226 return (info->disabled_regsets != NULL
4227 && info->disabled_regsets[regset - info->regsets]);
4230 /* Disable REGSET. */
4233 disable_regset (struct regsets_info *info, struct regset_info *regset)
4237 dr_offset = regset - info->regsets;
4238 if (info->disabled_regsets == NULL)
4239 info->disabled_regsets = xcalloc (1, info->num_regsets);
4240 info->disabled_regsets[dr_offset] = 1;
4244 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4245 struct regcache *regcache)
4247 struct regset_info *regset;
4248 int saw_general_regs = 0;
4252 regset = regsets_info->regsets;
4254 pid = lwpid_of (current_thread);
4255 while (regset->size >= 0)
4260 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4266 buf = xmalloc (regset->size);
4268 nt_type = regset->nt_type;
4272 iov.iov_len = regset->size;
4273 data = (void *) &iov;
4279 res = ptrace (regset->get_request, pid,
4280 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4282 res = ptrace (regset->get_request, pid, data, nt_type);
4288 /* If we get EIO on a regset, do not try it again for
4289 this process mode. */
4290 disable_regset (regsets_info, regset);
4297 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4302 else if (regset->type == GENERAL_REGS)
4303 saw_general_regs = 1;
4304 regset->store_function (regcache, buf);
4308 if (saw_general_regs)
4315 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4316 struct regcache *regcache)
4318 struct regset_info *regset;
4319 int saw_general_regs = 0;
4323 regset = regsets_info->regsets;
4325 pid = lwpid_of (current_thread);
4326 while (regset->size >= 0)
4331 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4337 buf = xmalloc (regset->size);
4339 /* First fill the buffer with the current register set contents,
4340 in case there are any items in the kernel's regset that are
4341 not in gdbserver's regcache. */
4343 nt_type = regset->nt_type;
4347 iov.iov_len = regset->size;
4348 data = (void *) &iov;
4354 res = ptrace (regset->get_request, pid,
4355 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4357 res = ptrace (regset->get_request, pid, data, nt_type);
4362 /* Then overlay our cached registers on that. */
4363 regset->fill_function (regcache, buf);
4365 /* Only now do we write the register set. */
4367 res = ptrace (regset->set_request, pid,
4368 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4370 res = ptrace (regset->set_request, pid, data, nt_type);
4378 /* If we get EIO on a regset, do not try it again for
4379 this process mode. */
4380 disable_regset (regsets_info, regset);
4384 else if (errno == ESRCH)
4386 /* At this point, ESRCH should mean the process is
4387 already gone, in which case we simply ignore attempts
4388 to change its registers. See also the related
4389 comment in linux_resume_one_lwp. */
4395 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4398 else if (regset->type == GENERAL_REGS)
4399 saw_general_regs = 1;
4403 if (saw_general_regs)
4409 #else /* !HAVE_LINUX_REGSETS */
4411 #define use_linux_regsets 0
4412 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4413 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4417 /* Return 1 if register REGNO is supported by one of the regset ptrace
4418 calls or 0 if it has to be transferred individually. */
4421 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4423 unsigned char mask = 1 << (regno % 8);
4424 size_t index = regno / 8;
4426 return (use_linux_regsets
4427 && (regs_info->regset_bitmap == NULL
4428 || (regs_info->regset_bitmap[index] & mask) != 0));
4431 #ifdef HAVE_LINUX_USRREGS
4434 register_addr (const struct usrregs_info *usrregs, int regnum)
4438 if (regnum < 0 || regnum >= usrregs->num_regs)
4439 error ("Invalid register number %d.", regnum);
4441 addr = usrregs->regmap[regnum];
4446 /* Fetch one register. */
4448 fetch_register (const struct usrregs_info *usrregs,
4449 struct regcache *regcache, int regno)
4456 if (regno >= usrregs->num_regs)
4458 if ((*the_low_target.cannot_fetch_register) (regno))
4461 regaddr = register_addr (usrregs, regno);
4465 size = ((register_size (regcache->tdesc, regno)
4466 + sizeof (PTRACE_XFER_TYPE) - 1)
4467 & -sizeof (PTRACE_XFER_TYPE));
4468 buf = alloca (size);
4470 pid = lwpid_of (current_thread);
4471 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4474 *(PTRACE_XFER_TYPE *) (buf + i) =
4475 ptrace (PTRACE_PEEKUSER, pid,
4476 /* Coerce to a uintptr_t first to avoid potential gcc warning
4477 of coercing an 8 byte integer to a 4 byte pointer. */
4478 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
4479 regaddr += sizeof (PTRACE_XFER_TYPE);
4481 error ("reading register %d: %s", regno, strerror (errno));
4484 if (the_low_target.supply_ptrace_register)
4485 the_low_target.supply_ptrace_register (regcache, regno, buf);
4487 supply_register (regcache, regno, buf);
4490 /* Store one register. */
4492 store_register (const struct usrregs_info *usrregs,
4493 struct regcache *regcache, int regno)
4500 if (regno >= usrregs->num_regs)
4502 if ((*the_low_target.cannot_store_register) (regno))
4505 regaddr = register_addr (usrregs, regno);
4509 size = ((register_size (regcache->tdesc, regno)
4510 + sizeof (PTRACE_XFER_TYPE) - 1)
4511 & -sizeof (PTRACE_XFER_TYPE));
4512 buf = alloca (size);
4513 memset (buf, 0, size);
4515 if (the_low_target.collect_ptrace_register)
4516 the_low_target.collect_ptrace_register (regcache, regno, buf);
4518 collect_register (regcache, regno, buf);
4520 pid = lwpid_of (current_thread);
4521 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4524 ptrace (PTRACE_POKEUSER, pid,
4525 /* Coerce to a uintptr_t first to avoid potential gcc warning
4526 about coercing an 8 byte integer to a 4 byte pointer. */
4527 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4528 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
4531 /* At this point, ESRCH should mean the process is
4532 already gone, in which case we simply ignore attempts
4533 to change its registers. See also the related
4534 comment in linux_resume_one_lwp. */
4538 if ((*the_low_target.cannot_store_register) (regno) == 0)
4539 error ("writing register %d: %s", regno, strerror (errno));
4541 regaddr += sizeof (PTRACE_XFER_TYPE);
4545 /* Fetch all registers, or just one, from the child process.
4546 If REGNO is -1, do this for all registers, skipping any that are
4547 assumed to have been retrieved by regsets_fetch_inferior_registers,
4548 unless ALL is non-zero.
4549 Otherwise, REGNO specifies which register (so we can save time). */
4551 usr_fetch_inferior_registers (const struct regs_info *regs_info,
4552 struct regcache *regcache, int regno, int all)
4554 struct usrregs_info *usr = regs_info->usrregs;
4558 for (regno = 0; regno < usr->num_regs; regno++)
4559 if (all || !linux_register_in_regsets (regs_info, regno))
4560 fetch_register (usr, regcache, regno);
4563 fetch_register (usr, regcache, regno);
4566 /* Store our register values back into the inferior.
4567 If REGNO is -1, do this for all registers, skipping any that are
4568 assumed to have been saved by regsets_store_inferior_registers,
4569 unless ALL is non-zero.
4570 Otherwise, REGNO specifies which register (so we can save time). */
4572 usr_store_inferior_registers (const struct regs_info *regs_info,
4573 struct regcache *regcache, int regno, int all)
4575 struct usrregs_info *usr = regs_info->usrregs;
4579 for (regno = 0; regno < usr->num_regs; regno++)
4580 if (all || !linux_register_in_regsets (regs_info, regno))
4581 store_register (usr, regcache, regno);
4584 store_register (usr, regcache, regno);
4587 #else /* !HAVE_LINUX_USRREGS */
4589 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4590 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4596 linux_fetch_registers (struct regcache *regcache, int regno)
4600 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4604 if (the_low_target.fetch_register != NULL
4605 && regs_info->usrregs != NULL)
4606 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
4607 (*the_low_target.fetch_register) (regcache, regno);
4609 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4610 if (regs_info->usrregs != NULL)
4611 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
4615 if (the_low_target.fetch_register != NULL
4616 && (*the_low_target.fetch_register) (regcache, regno))
4619 use_regsets = linux_register_in_regsets (regs_info, regno);
4621 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4623 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4624 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
4629 linux_store_registers (struct regcache *regcache, int regno)
4633 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4637 all = regsets_store_inferior_registers (regs_info->regsets_info,
4639 if (regs_info->usrregs != NULL)
4640 usr_store_inferior_registers (regs_info, regcache, regno, all);
4644 use_regsets = linux_register_in_regsets (regs_info, regno);
4646 all = regsets_store_inferior_registers (regs_info->regsets_info,
4648 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4649 usr_store_inferior_registers (regs_info, regcache, regno, 1);
4654 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4655 to debugger memory starting at MYADDR. */
4658 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4660 int pid = lwpid_of (current_thread);
4661 register PTRACE_XFER_TYPE *buffer;
4662 register CORE_ADDR addr;
4669 /* Try using /proc. Don't bother for one word. */
4670 if (len >= 3 * sizeof (long))
4674 /* We could keep this file open and cache it - possibly one per
4675 thread. That requires some juggling, but is even faster. */
4676 sprintf (filename, "/proc/%d/mem", pid);
4677 fd = open (filename, O_RDONLY | O_LARGEFILE);
4681 /* If pread64 is available, use it. It's faster if the kernel
4682 supports it (only one syscall), and it's 64-bit safe even on
4683 32-bit platforms (for instance, SPARC debugging a SPARC64
4686 bytes = pread64 (fd, myaddr, len, memaddr);
4689 if (lseek (fd, memaddr, SEEK_SET) != -1)
4690 bytes = read (fd, myaddr, len);
4697 /* Some data was read, we'll try to get the rest with ptrace. */
4707 /* Round starting address down to longword boundary. */
4708 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4709 /* Round ending address up; get number of longwords that makes. */
4710 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4711 / sizeof (PTRACE_XFER_TYPE));
4712 /* Allocate buffer of that many longwords. */
4713 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4715 /* Read all the longwords */
4717 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4719 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4720 about coercing an 8 byte integer to a 4 byte pointer. */
4721 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4722 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4723 (PTRACE_TYPE_ARG4) 0);
4729 /* Copy appropriate bytes out of the buffer. */
4732 i *= sizeof (PTRACE_XFER_TYPE);
4733 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
4735 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4742 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4743 memory at MEMADDR. On failure (cannot write to the inferior)
4744 returns the value of errno. Always succeeds if LEN is zero. */
4747 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4750 /* Round starting address down to longword boundary. */
4751 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4752 /* Round ending address up; get number of longwords that makes. */
4754 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4755 / sizeof (PTRACE_XFER_TYPE);
4757 /* Allocate buffer of that many longwords. */
4758 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4759 alloca (count * sizeof (PTRACE_XFER_TYPE));
4761 int pid = lwpid_of (current_thread);
4765 /* Zero length write always succeeds. */
4771 /* Dump up to four bytes. */
4772 unsigned int val = * (unsigned int *) myaddr;
4778 val = val & 0xffffff;
4779 debug_printf ("Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4780 val, (long)memaddr);
4783 /* Fill start and end extra bytes of buffer with existing memory data. */
4786 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4787 about coercing an 8 byte integer to a 4 byte pointer. */
4788 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4789 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4790 (PTRACE_TYPE_ARG4) 0);
4798 = ptrace (PTRACE_PEEKTEXT, pid,
4799 /* Coerce to a uintptr_t first to avoid potential gcc warning
4800 about coercing an 8 byte integer to a 4 byte pointer. */
4801 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
4802 * sizeof (PTRACE_XFER_TYPE)),
4803 (PTRACE_TYPE_ARG4) 0);
4808 /* Copy data to be written over corresponding part of buffer. */
4810 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4813 /* Write the entire buffer. */
4815 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4818 ptrace (PTRACE_POKETEXT, pid,
4819 /* Coerce to a uintptr_t first to avoid potential gcc warning
4820 about coercing an 8 byte integer to a 4 byte pointer. */
4821 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4822 (PTRACE_TYPE_ARG4) buffer[i]);
4831 linux_look_up_symbols (void)
4833 #ifdef USE_THREAD_DB
4834 struct process_info *proc = current_process ();
4836 if (proc->private->thread_db != NULL)
4839 /* If the kernel supports tracing clones, then we don't need to
4840 use the magic thread event breakpoint to learn about
4842 thread_db_init (!linux_supports_traceclone ());
4847 linux_request_interrupt (void)
4849 extern unsigned long signal_pid;
4851 if (!ptid_equal (cont_thread, null_ptid)
4852 && !ptid_equal (cont_thread, minus_one_ptid))
4856 lwpid = lwpid_of (current_thread);
4857 kill_lwp (lwpid, SIGINT);
4860 kill_lwp (signal_pid, SIGINT);
4863 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4864 to debugger memory starting at MYADDR. */
4867 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4869 char filename[PATH_MAX];
4871 int pid = lwpid_of (current_thread);
4873 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4875 fd = open (filename, O_RDONLY);
4879 if (offset != (CORE_ADDR) 0
4880 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4883 n = read (fd, myaddr, len);
4890 /* These breakpoint and watchpoint related wrapper functions simply
4891 pass on the function call if the target has registered a
4892 corresponding function. */
4895 linux_supports_z_point_type (char z_type)
4897 return (the_low_target.supports_z_point_type != NULL
4898 && the_low_target.supports_z_point_type (z_type));
4902 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
4903 int size, struct raw_breakpoint *bp)
4905 if (the_low_target.insert_point != NULL)
4906 return the_low_target.insert_point (type, addr, size, bp);
4908 /* Unsupported (see target.h). */
4913 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
4914 int size, struct raw_breakpoint *bp)
4916 if (the_low_target.remove_point != NULL)
4917 return the_low_target.remove_point (type, addr, size, bp);
4919 /* Unsupported (see target.h). */
4924 linux_stopped_by_watchpoint (void)
4926 struct lwp_info *lwp = get_thread_lwp (current_thread);
4928 return lwp->stopped_by_watchpoint;
4932 linux_stopped_data_address (void)
4934 struct lwp_info *lwp = get_thread_lwp (current_thread);
4936 return lwp->stopped_data_address;
4939 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
4940 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
4941 && defined(PT_TEXT_END_ADDR)
4943 /* This is only used for targets that define PT_TEXT_ADDR,
4944 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
4945 the target has different ways of acquiring this information, like
4948 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4949 to tell gdb about. */
4952 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4954 unsigned long text, text_end, data;
4955 int pid = lwpid_of (get_thread_lwp (current_thread));
4959 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
4960 (PTRACE_TYPE_ARG4) 0);
4961 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
4962 (PTRACE_TYPE_ARG4) 0);
4963 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
4964 (PTRACE_TYPE_ARG4) 0);
4968 /* Both text and data offsets produced at compile-time (and so
4969 used by gdb) are relative to the beginning of the program,
4970 with the data segment immediately following the text segment.
4971 However, the actual runtime layout in memory may put the data
4972 somewhere else, so when we send gdb a data base-address, we
4973 use the real data base address and subtract the compile-time
4974 data base-address from it (which is just the length of the
4975 text segment). BSS immediately follows data in both
4978 *data_p = data - (text_end - text);
4987 linux_qxfer_osdata (const char *annex,
4988 unsigned char *readbuf, unsigned const char *writebuf,
4989 CORE_ADDR offset, int len)
4991 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4994 /* Convert a native/host siginfo object, into/from the siginfo in the
4995 layout of the inferiors' architecture. */
4998 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
5002 if (the_low_target.siginfo_fixup != NULL)
5003 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5005 /* If there was no callback, or the callback didn't do anything,
5006 then just do a straight memcpy. */
5010 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5012 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5017 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5018 unsigned const char *writebuf, CORE_ADDR offset, int len)
5022 char inf_siginfo[sizeof (siginfo_t)];
5024 if (current_thread == NULL)
5027 pid = lwpid_of (current_thread);
5030 debug_printf ("%s siginfo for lwp %d.\n",
5031 readbuf != NULL ? "Reading" : "Writing",
5034 if (offset >= sizeof (siginfo))
5037 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5040 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5041 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5042 inferior with a 64-bit GDBSERVER should look the same as debugging it
5043 with a 32-bit GDBSERVER, we need to convert it. */
5044 siginfo_fixup (&siginfo, inf_siginfo, 0);
5046 if (offset + len > sizeof (siginfo))
5047 len = sizeof (siginfo) - offset;
5049 if (readbuf != NULL)
5050 memcpy (readbuf, inf_siginfo + offset, len);
5053 memcpy (inf_siginfo + offset, writebuf, len);
5055 /* Convert back to ptrace layout before flushing it out. */
5056 siginfo_fixup (&siginfo, inf_siginfo, 1);
5058 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5065 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5066 so we notice when children change state; as the handler for the
5067 sigsuspend in my_waitpid. */
5070 sigchld_handler (int signo)
5072 int old_errno = errno;
5078 /* fprintf is not async-signal-safe, so call write
5080 if (write (2, "sigchld_handler\n",
5081 sizeof ("sigchld_handler\n") - 1) < 0)
5082 break; /* just ignore */
5086 if (target_is_async_p ())
5087 async_file_mark (); /* trigger a linux_wait */
5093 linux_supports_non_stop (void)
5099 linux_async (int enable)
5101 int previous = target_is_async_p ();
5104 debug_printf ("linux_async (%d), previous=%d\n",
5107 if (previous != enable)
5110 sigemptyset (&mask);
5111 sigaddset (&mask, SIGCHLD);
5113 sigprocmask (SIG_BLOCK, &mask, NULL);
5117 if (pipe (linux_event_pipe) == -1)
5119 linux_event_pipe[0] = -1;
5120 linux_event_pipe[1] = -1;
5121 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5123 warning ("creating event pipe failed.");
5127 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5128 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5130 /* Register the event loop handler. */
5131 add_file_handler (linux_event_pipe[0],
5132 handle_target_event, NULL);
5134 /* Always trigger a linux_wait. */
5139 delete_file_handler (linux_event_pipe[0]);
5141 close (linux_event_pipe[0]);
5142 close (linux_event_pipe[1]);
5143 linux_event_pipe[0] = -1;
5144 linux_event_pipe[1] = -1;
5147 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5154 linux_start_non_stop (int nonstop)
5156 /* Register or unregister from event-loop accordingly. */
5157 linux_async (nonstop);
5159 if (target_is_async_p () != (nonstop != 0))
5166 linux_supports_multi_process (void)
5172 linux_supports_disable_randomization (void)
5174 #ifdef HAVE_PERSONALITY
5182 linux_supports_agent (void)
5188 linux_supports_range_stepping (void)
5190 if (*the_low_target.supports_range_stepping == NULL)
5193 return (*the_low_target.supports_range_stepping) ();
5196 /* Enumerate spufs IDs for process PID. */
5198 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5204 struct dirent *entry;
5206 sprintf (path, "/proc/%ld/fd", pid);
5207 dir = opendir (path);
5212 while ((entry = readdir (dir)) != NULL)
5218 fd = atoi (entry->d_name);
5222 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5223 if (stat (path, &st) != 0)
5225 if (!S_ISDIR (st.st_mode))
5228 if (statfs (path, &stfs) != 0)
5230 if (stfs.f_type != SPUFS_MAGIC)
5233 if (pos >= offset && pos + 4 <= offset + len)
5235 *(unsigned int *)(buf + pos - offset) = fd;
5245 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5246 object type, using the /proc file system. */
5248 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5249 unsigned const char *writebuf,
5250 CORE_ADDR offset, int len)
5252 long pid = lwpid_of (current_thread);
5257 if (!writebuf && !readbuf)
5265 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5268 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5269 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5274 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5281 ret = write (fd, writebuf, (size_t) len);
5283 ret = read (fd, readbuf, (size_t) len);
5289 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5290 struct target_loadseg
5292 /* Core address to which the segment is mapped. */
5294 /* VMA recorded in the program header. */
5296 /* Size of this segment in memory. */
5300 # if defined PT_GETDSBT
5301 struct target_loadmap
5303 /* Protocol version number, must be zero. */
5305 /* Pointer to the DSBT table, its size, and the DSBT index. */
5306 unsigned *dsbt_table;
5307 unsigned dsbt_size, dsbt_index;
5308 /* Number of segments in this map. */
5310 /* The actual memory map. */
5311 struct target_loadseg segs[/*nsegs*/];
5313 # define LINUX_LOADMAP PT_GETDSBT
5314 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5315 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5317 struct target_loadmap
5319 /* Protocol version number, must be zero. */
5321 /* Number of segments in this map. */
5323 /* The actual memory map. */
5324 struct target_loadseg segs[/*nsegs*/];
5326 # define LINUX_LOADMAP PTRACE_GETFDPIC
5327 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5328 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5332 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5333 unsigned char *myaddr, unsigned int len)
5335 int pid = lwpid_of (current_thread);
5337 struct target_loadmap *data = NULL;
5338 unsigned int actual_length, copy_length;
5340 if (strcmp (annex, "exec") == 0)
5341 addr = (int) LINUX_LOADMAP_EXEC;
5342 else if (strcmp (annex, "interp") == 0)
5343 addr = (int) LINUX_LOADMAP_INTERP;
5347 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5353 actual_length = sizeof (struct target_loadmap)
5354 + sizeof (struct target_loadseg) * data->nsegs;
5356 if (offset < 0 || offset > actual_length)
5359 copy_length = actual_length - offset < len ? actual_length - offset : len;
5360 memcpy (myaddr, (char *) data + offset, copy_length);
5364 # define linux_read_loadmap NULL
5365 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5368 linux_process_qsupported (const char *query)
5370 if (the_low_target.process_qsupported != NULL)
5371 the_low_target.process_qsupported (query);
5375 linux_supports_tracepoints (void)
5377 if (*the_low_target.supports_tracepoints == NULL)
5380 return (*the_low_target.supports_tracepoints) ();
5384 linux_read_pc (struct regcache *regcache)
5386 if (the_low_target.get_pc == NULL)
5389 return (*the_low_target.get_pc) (regcache);
5393 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5395 gdb_assert (the_low_target.set_pc != NULL);
5397 (*the_low_target.set_pc) (regcache, pc);
5401 linux_thread_stopped (struct thread_info *thread)
5403 return get_thread_lwp (thread)->stopped;
5406 /* This exposes stop-all-threads functionality to other modules. */
5409 linux_pause_all (int freeze)
5411 stop_all_lwps (freeze, NULL);
5414 /* This exposes unstop-all-threads functionality to other gdbserver
5418 linux_unpause_all (int unfreeze)
5420 unstop_all_lwps (unfreeze, NULL);
5424 linux_prepare_to_access_memory (void)
5426 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5429 linux_pause_all (1);
5434 linux_done_accessing_memory (void)
5436 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5439 linux_unpause_all (1);
5443 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5444 CORE_ADDR collector,
5447 CORE_ADDR *jump_entry,
5448 CORE_ADDR *trampoline,
5449 ULONGEST *trampoline_size,
5450 unsigned char *jjump_pad_insn,
5451 ULONGEST *jjump_pad_insn_size,
5452 CORE_ADDR *adjusted_insn_addr,
5453 CORE_ADDR *adjusted_insn_addr_end,
5456 return (*the_low_target.install_fast_tracepoint_jump_pad)
5457 (tpoint, tpaddr, collector, lockaddr, orig_size,
5458 jump_entry, trampoline, trampoline_size,
5459 jjump_pad_insn, jjump_pad_insn_size,
5460 adjusted_insn_addr, adjusted_insn_addr_end,
5464 static struct emit_ops *
5465 linux_emit_ops (void)
5467 if (the_low_target.emit_ops != NULL)
5468 return (*the_low_target.emit_ops) ();
5474 linux_get_min_fast_tracepoint_insn_len (void)
5476 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5479 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5482 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5483 CORE_ADDR *phdr_memaddr, int *num_phdr)
5485 char filename[PATH_MAX];
5487 const int auxv_size = is_elf64
5488 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5489 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5491 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5493 fd = open (filename, O_RDONLY);
5499 while (read (fd, buf, auxv_size) == auxv_size
5500 && (*phdr_memaddr == 0 || *num_phdr == 0))
5504 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5506 switch (aux->a_type)
5509 *phdr_memaddr = aux->a_un.a_val;
5512 *num_phdr = aux->a_un.a_val;
5518 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5520 switch (aux->a_type)
5523 *phdr_memaddr = aux->a_un.a_val;
5526 *num_phdr = aux->a_un.a_val;
5534 if (*phdr_memaddr == 0 || *num_phdr == 0)
5536 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5537 "phdr_memaddr = %ld, phdr_num = %d",
5538 (long) *phdr_memaddr, *num_phdr);
5545 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5548 get_dynamic (const int pid, const int is_elf64)
5550 CORE_ADDR phdr_memaddr, relocation;
5552 unsigned char *phdr_buf;
5553 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5555 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5558 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5559 phdr_buf = alloca (num_phdr * phdr_size);
5561 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5564 /* Compute relocation: it is expected to be 0 for "regular" executables,
5565 non-zero for PIE ones. */
5567 for (i = 0; relocation == -1 && i < num_phdr; i++)
5570 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5572 if (p->p_type == PT_PHDR)
5573 relocation = phdr_memaddr - p->p_vaddr;
5577 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5579 if (p->p_type == PT_PHDR)
5580 relocation = phdr_memaddr - p->p_vaddr;
5583 if (relocation == -1)
5585 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5586 any real world executables, including PIE executables, have always
5587 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5588 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5589 or present DT_DEBUG anyway (fpc binaries are statically linked).
5591 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5593 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5598 for (i = 0; i < num_phdr; i++)
5602 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5604 if (p->p_type == PT_DYNAMIC)
5605 return p->p_vaddr + relocation;
5609 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5611 if (p->p_type == PT_DYNAMIC)
5612 return p->p_vaddr + relocation;
5619 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5620 can be 0 if the inferior does not yet have the library list initialized.
5621 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5622 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
5625 get_r_debug (const int pid, const int is_elf64)
5627 CORE_ADDR dynamic_memaddr;
5628 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5629 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5632 dynamic_memaddr = get_dynamic (pid, is_elf64);
5633 if (dynamic_memaddr == 0)
5636 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5640 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5641 #ifdef DT_MIPS_RLD_MAP
5645 unsigned char buf[sizeof (Elf64_Xword)];
5649 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5651 if (linux_read_memory (dyn->d_un.d_val,
5652 rld_map.buf, sizeof (rld_map.buf)) == 0)
5657 #endif /* DT_MIPS_RLD_MAP */
5659 if (dyn->d_tag == DT_DEBUG && map == -1)
5660 map = dyn->d_un.d_val;
5662 if (dyn->d_tag == DT_NULL)
5667 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5668 #ifdef DT_MIPS_RLD_MAP
5672 unsigned char buf[sizeof (Elf32_Word)];
5676 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5678 if (linux_read_memory (dyn->d_un.d_val,
5679 rld_map.buf, sizeof (rld_map.buf)) == 0)
5684 #endif /* DT_MIPS_RLD_MAP */
5686 if (dyn->d_tag == DT_DEBUG && map == -1)
5687 map = dyn->d_un.d_val;
5689 if (dyn->d_tag == DT_NULL)
5693 dynamic_memaddr += dyn_size;
5699 /* Read one pointer from MEMADDR in the inferior. */
5702 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5706 /* Go through a union so this works on either big or little endian
5707 hosts, when the inferior's pointer size is smaller than the size
5708 of CORE_ADDR. It is assumed the inferior's endianness is the
5709 same of the superior's. */
5712 CORE_ADDR core_addr;
5717 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5720 if (ptr_size == sizeof (CORE_ADDR))
5721 *ptr = addr.core_addr;
5722 else if (ptr_size == sizeof (unsigned int))
5725 gdb_assert_not_reached ("unhandled pointer size");
5730 struct link_map_offsets
5732 /* Offset and size of r_debug.r_version. */
5733 int r_version_offset;
5735 /* Offset and size of r_debug.r_map. */
5738 /* Offset to l_addr field in struct link_map. */
5741 /* Offset to l_name field in struct link_map. */
5744 /* Offset to l_ld field in struct link_map. */
5747 /* Offset to l_next field in struct link_map. */
5750 /* Offset to l_prev field in struct link_map. */
5754 /* Construct qXfer:libraries-svr4:read reply. */
5757 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5758 unsigned const char *writebuf,
5759 CORE_ADDR offset, int len)
5762 unsigned document_len;
5763 struct process_info_private *const priv = current_process ()->private;
5764 char filename[PATH_MAX];
5767 static const struct link_map_offsets lmo_32bit_offsets =
5769 0, /* r_version offset. */
5770 4, /* r_debug.r_map offset. */
5771 0, /* l_addr offset in link_map. */
5772 4, /* l_name offset in link_map. */
5773 8, /* l_ld offset in link_map. */
5774 12, /* l_next offset in link_map. */
5775 16 /* l_prev offset in link_map. */
5778 static const struct link_map_offsets lmo_64bit_offsets =
5780 0, /* r_version offset. */
5781 8, /* r_debug.r_map offset. */
5782 0, /* l_addr offset in link_map. */
5783 8, /* l_name offset in link_map. */
5784 16, /* l_ld offset in link_map. */
5785 24, /* l_next offset in link_map. */
5786 32 /* l_prev offset in link_map. */
5788 const struct link_map_offsets *lmo;
5789 unsigned int machine;
5791 CORE_ADDR lm_addr = 0, lm_prev = 0;
5792 int allocated = 1024;
5794 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
5795 int header_done = 0;
5797 if (writebuf != NULL)
5799 if (readbuf == NULL)
5802 pid = lwpid_of (current_thread);
5803 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5804 is_elf64 = elf_64_file_p (filename, &machine);
5805 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5806 ptr_size = is_elf64 ? 8 : 4;
5808 while (annex[0] != '\0')
5814 sep = strchr (annex, '=');
5819 if (len == 5 && strncmp (annex, "start", 5) == 0)
5821 else if (len == 4 && strncmp (annex, "prev", 4) == 0)
5825 annex = strchr (sep, ';');
5832 annex = decode_address_to_semicolon (addrp, sep + 1);
5839 if (priv->r_debug == 0)
5840 priv->r_debug = get_r_debug (pid, is_elf64);
5842 /* We failed to find DT_DEBUG. Such situation will not change
5843 for this inferior - do not retry it. Report it to GDB as
5844 E01, see for the reasons at the GDB solib-svr4.c side. */
5845 if (priv->r_debug == (CORE_ADDR) -1)
5848 if (priv->r_debug != 0)
5850 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5851 (unsigned char *) &r_version,
5852 sizeof (r_version)) != 0
5855 warning ("unexpected r_debug version %d", r_version);
5857 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5858 &lm_addr, ptr_size) != 0)
5860 warning ("unable to read r_map from 0x%lx",
5861 (long) priv->r_debug + lmo->r_map_offset);
5866 document = xmalloc (allocated);
5867 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5868 p = document + strlen (document);
5871 && read_one_ptr (lm_addr + lmo->l_name_offset,
5872 &l_name, ptr_size) == 0
5873 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5874 &l_addr, ptr_size) == 0
5875 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5876 &l_ld, ptr_size) == 0
5877 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5878 &l_prev, ptr_size) == 0
5879 && read_one_ptr (lm_addr + lmo->l_next_offset,
5880 &l_next, ptr_size) == 0)
5882 unsigned char libname[PATH_MAX];
5884 if (lm_prev != l_prev)
5886 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5887 (long) lm_prev, (long) l_prev);
5891 /* Ignore the first entry even if it has valid name as the first entry
5892 corresponds to the main executable. The first entry should not be
5893 skipped if the dynamic loader was loaded late by a static executable
5894 (see solib-svr4.c parameter ignore_first). But in such case the main
5895 executable does not have PT_DYNAMIC present and this function already
5896 exited above due to failed get_r_debug. */
5899 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5904 /* Not checking for error because reading may stop before
5905 we've got PATH_MAX worth of characters. */
5907 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5908 libname[sizeof (libname) - 1] = '\0';
5909 if (libname[0] != '\0')
5911 /* 6x the size for xml_escape_text below. */
5912 size_t len = 6 * strlen ((char *) libname);
5917 /* Terminate `<library-list-svr4'. */
5922 while (allocated < p - document + len + 200)
5924 /* Expand to guarantee sufficient storage. */
5925 uintptr_t document_len = p - document;
5927 document = xrealloc (document, 2 * allocated);
5929 p = document + document_len;
5932 name = xml_escape_text ((char *) libname);
5933 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5934 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5935 name, (unsigned long) lm_addr,
5936 (unsigned long) l_addr, (unsigned long) l_ld);
5947 /* Empty list; terminate `<library-list-svr4'. */
5951 strcpy (p, "</library-list-svr4>");
5953 document_len = strlen (document);
5954 if (offset < document_len)
5955 document_len -= offset;
5958 if (len > document_len)
5961 memcpy (readbuf, document + offset, len);
5967 #ifdef HAVE_LINUX_BTRACE
5969 /* See to_enable_btrace target method. */
5971 static struct btrace_target_info *
5972 linux_low_enable_btrace (ptid_t ptid)
5974 struct btrace_target_info *tinfo;
5976 tinfo = linux_enable_btrace (ptid);
5980 struct thread_info *thread = find_thread_ptid (ptid);
5981 struct regcache *regcache = get_thread_regcache (thread, 0);
5983 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
5989 /* See to_disable_btrace target method. */
5992 linux_low_disable_btrace (struct btrace_target_info *tinfo)
5994 enum btrace_error err;
5996 err = linux_disable_btrace (tinfo);
5997 return (err == BTRACE_ERR_NONE ? 0 : -1);
6000 /* See to_read_btrace target method. */
6003 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
6006 VEC (btrace_block_s) *btrace;
6007 struct btrace_block *block;
6008 enum btrace_error err;
6012 err = linux_read_btrace (&btrace, tinfo, type);
6013 if (err != BTRACE_ERR_NONE)
6015 if (err == BTRACE_ERR_OVERFLOW)
6016 buffer_grow_str0 (buffer, "E.Overflow.");
6018 buffer_grow_str0 (buffer, "E.Generic Error.");
6023 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6024 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6026 for (i = 0; VEC_iterate (btrace_block_s, btrace, i, block); i++)
6027 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6028 paddress (block->begin), paddress (block->end));
6030 buffer_grow_str0 (buffer, "</btrace>\n");
6032 VEC_free (btrace_block_s, btrace);
6036 #endif /* HAVE_LINUX_BTRACE */
6038 static struct target_ops linux_target_ops = {
6039 linux_create_inferior,
6048 linux_fetch_registers,
6049 linux_store_registers,
6050 linux_prepare_to_access_memory,
6051 linux_done_accessing_memory,
6054 linux_look_up_symbols,
6055 linux_request_interrupt,
6057 linux_supports_z_point_type,
6060 linux_stopped_by_watchpoint,
6061 linux_stopped_data_address,
6062 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6063 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6064 && defined(PT_TEXT_END_ADDR)
6069 #ifdef USE_THREAD_DB
6070 thread_db_get_tls_address,
6075 hostio_last_error_from_errno,
6078 linux_supports_non_stop,
6080 linux_start_non_stop,
6081 linux_supports_multi_process,
6082 #ifdef USE_THREAD_DB
6083 thread_db_handle_monitor_command,
6087 linux_common_core_of_thread,
6089 linux_process_qsupported,
6090 linux_supports_tracepoints,
6093 linux_thread_stopped,
6097 linux_cancel_breakpoints,
6098 linux_stabilize_threads,
6099 linux_install_fast_tracepoint_jump_pad,
6101 linux_supports_disable_randomization,
6102 linux_get_min_fast_tracepoint_insn_len,
6103 linux_qxfer_libraries_svr4,
6104 linux_supports_agent,
6105 #ifdef HAVE_LINUX_BTRACE
6106 linux_supports_btrace,
6107 linux_low_enable_btrace,
6108 linux_low_disable_btrace,
6109 linux_low_read_btrace,
6116 linux_supports_range_stepping,
6120 linux_init_signals ()
6122 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6123 to find what the cancel signal actually is. */
6124 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6125 signal (__SIGRTMIN+1, SIG_IGN);
6129 #ifdef HAVE_LINUX_REGSETS
6131 initialize_regsets_info (struct regsets_info *info)
6133 for (info->num_regsets = 0;
6134 info->regsets[info->num_regsets].size >= 0;
6135 info->num_regsets++)
6141 initialize_low (void)
6143 struct sigaction sigchld_action;
6144 memset (&sigchld_action, 0, sizeof (sigchld_action));
6145 set_target_ops (&linux_target_ops);
6146 set_breakpoint_data (the_low_target.breakpoint,
6147 the_low_target.breakpoint_len);
6148 linux_init_signals ();
6149 linux_ptrace_init_warnings ();
6151 sigchld_action.sa_handler = sigchld_handler;
6152 sigemptyset (&sigchld_action.sa_mask);
6153 sigchld_action.sa_flags = SA_RESTART;
6154 sigaction (SIGCHLD, &sigchld_action, NULL);
6156 initialize_low_arch ();