/* GNU/Linux native-dependent code common to multiple platforms.
- Copyright (C) 2001-2016 Free Software Foundation, Inc.
+ Copyright (C) 2001-2018 Free Software Foundation, Inc.
This file is part of GDB.
/* The method to call, if any, when a new thread is attached. */
static void (*linux_nat_new_thread) (struct lwp_info *);
+/* The method to call, if any, when a thread is destroyed. */
+static void (*linux_nat_delete_thread) (struct arch_lwp_info *);
+
/* The method to call, if any, when a new fork is attached. */
static linux_nat_new_fork_ftype *linux_nat_new_fork;
};
struct simple_pid_list *stopped_pids;
+/* Whether target_thread_events is in effect. */
+static int report_thread_events;
+
/* Async mode support. */
/* The read/write ends of the pipe registered as waitable file in the
static int lwp_status_pending_p (struct lwp_info *lp);
-static int check_stopped_by_breakpoint (struct lwp_info *lp);
static int sigtrap_is_event (int status);
static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
+static void save_stop_reason (struct lwp_info *lp);
+
\f
/* LWP accessors. */
return lwp->stop_reason;
}
+/* See nat/linux-nat.h. */
+
+int
+lwp_is_stepping (struct lwp_info *lwp)
+{
+ return lwp->step;
+}
+
\f
/* Trivial list manipulation functions to keep track of a list of
new stopped processes. */
{
struct lwp_info *child_lp = NULL;
int status = W_STOPCODE (0);
- struct cleanup *old_chain;
int has_vforked;
ptid_t parent_ptid, child_ptid;
int parent_pid, child_pid;
child_pid = ptid_get_lwp (child_ptid);
/* We're already attached to the parent, by default. */
- old_chain = save_inferior_ptid ();
- inferior_ptid = child_ptid;
- child_lp = add_lwp (inferior_ptid);
+ child_lp = add_lwp (child_ptid);
child_lp->stopped = 1;
child_lp->last_resume_kind = resume_stop;
/* Detach new forked process? */
if (detach_fork)
{
- make_cleanup (delete_lwp_cleanup, child_lp);
+ struct cleanup *old_chain = make_cleanup (delete_lwp_cleanup,
+ child_lp);
if (linux_nat_prepare_to_resume != NULL)
linux_nat_prepare_to_resume (child_lp);
To work around this, single step the child process
once before detaching to clear the flags. */
+ /* Note that we consult the parent's architecture instead of
+ the child's because there's no inferior for the child at
+ this point. */
if (!gdbarch_software_single_step_p (target_thread_architecture
- (child_lp->ptid)))
+ (parent_ptid)))
{
linux_disable_event_reporting (child_pid);
if (ptrace (PTRACE_SINGLESTEP, child_pid, 0, 0) < 0)
ptrace (PTRACE_DETACH, child_pid, 0, signo);
}
- /* Resets value of inferior_ptid to parent ptid. */
do_cleanups (old_chain);
}
else
{
+ scoped_restore save_inferior_ptid
+ = make_scoped_restore (&inferior_ptid);
+ inferior_ptid = child_ptid;
+
/* Let the thread_db layer learn about this new process. */
check_for_thread_db ();
}
- do_cleanups (old_chain);
-
if (has_vforked)
{
struct lwp_info *parent_lp;
static int
linux_child_set_syscall_catchpoint (struct target_ops *self,
- int pid, int needed, int any_count,
- int table_size, int *table)
+ int pid, bool needed, int any_count,
+ gdb::array_view<const int> syscall_counts)
{
if (!linux_supports_tracesysgood ())
return 1;
/* On GNU/Linux, we ignore the arguments. It means that we only
enable the syscall catchpoints, but do not disable them.
- Also, we do not use the `table' information because we do not
+ Also, we do not use the `syscall_counts' information because we do not
filter system calls here. We let GDB do the logic for us. */
return 0;
}
-/* List of known LWPs. */
+/* List of known LWPs, keyed by LWP PID. This speeds up the common
+ case of mapping a PID returned from the kernel to our corresponding
+ lwp_info data structure. */
+static htab_t lwp_lwpid_htab;
+
+/* Calculate a hash from a lwp_info's LWP PID. */
+
+static hashval_t
+lwp_info_hash (const void *ap)
+{
+ const struct lwp_info *lp = (struct lwp_info *) ap;
+ pid_t pid = ptid_get_lwp (lp->ptid);
+
+ return iterative_hash_object (pid, 0);
+}
+
+/* Equality function for the lwp_info hash table. Compares the LWP's
+ PID. */
+
+static int
+lwp_lwpid_htab_eq (const void *a, const void *b)
+{
+ const struct lwp_info *entry = (const struct lwp_info *) a;
+ const struct lwp_info *element = (const struct lwp_info *) b;
+
+ return ptid_get_lwp (entry->ptid) == ptid_get_lwp (element->ptid);
+}
+
+/* Create the lwp_lwpid_htab hash table. */
+
+static void
+lwp_lwpid_htab_create (void)
+{
+ lwp_lwpid_htab = htab_create (100, lwp_info_hash, lwp_lwpid_htab_eq, NULL);
+}
+
+/* Add LP to the hash table. */
+
+static void
+lwp_lwpid_htab_add_lwp (struct lwp_info *lp)
+{
+ void **slot;
+
+ slot = htab_find_slot (lwp_lwpid_htab, lp, INSERT);
+ gdb_assert (slot != NULL && *slot == NULL);
+ *slot = lp;
+}
+
+/* Head of doubly-linked list of known LWPs. Sorted by reverse
+ creation order. This order is assumed in some cases. E.g.,
+ reaping status after killing alls lwps of a process: the leader LWP
+ must be reaped last. */
struct lwp_info *lwp_list;
+
+/* Add LP to sorted-by-reverse-creation-order doubly-linked list. */
+
+static void
+lwp_list_add (struct lwp_info *lp)
+{
+ lp->next = lwp_list;
+ if (lwp_list != NULL)
+ lwp_list->prev = lp;
+ lwp_list = lp;
+}
+
+/* Remove LP from sorted-by-reverse-creation-order doubly-linked
+ list. */
+
+static void
+lwp_list_remove (struct lwp_info *lp)
+{
+ /* Remove from sorted-by-creation-order list. */
+ if (lp->next != NULL)
+ lp->next->prev = lp->prev;
+ if (lp->prev != NULL)
+ lp->prev->next = lp->next;
+ if (lp == lwp_list)
+ lwp_list = lp->next;
+}
+
\f
/* Original signal mask. */
static int stop_wait_callback (struct lwp_info *lp, void *data);
static char *linux_child_pid_to_exec_file (struct target_ops *self, int pid);
static int resume_stopped_resumed_lwps (struct lwp_info *lp, void *data);
+static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
\f
static void
lwp_free (struct lwp_info *lp)
{
- xfree (lp->arch_private);
+ /* Let the arch specific bits release arch_lwp_info. */
+ if (linux_nat_delete_thread != NULL)
+ linux_nat_delete_thread (lp->arch_private);
+ else
+ gdb_assert (lp->arch_private == NULL);
+
xfree (lp);
}
-/* Remove all LWPs belong to PID from the lwp list. */
+/* Traversal function for purge_lwp_list. */
-static void
-purge_lwp_list (int pid)
+static int
+lwp_lwpid_htab_remove_pid (void **slot, void *info)
{
- struct lwp_info *lp, *lpprev, *lpnext;
-
- lpprev = NULL;
+ struct lwp_info *lp = (struct lwp_info *) *slot;
+ int pid = *(int *) info;
- for (lp = lwp_list; lp; lp = lpnext)
+ if (ptid_get_pid (lp->ptid) == pid)
{
- lpnext = lp->next;
+ htab_clear_slot (lwp_lwpid_htab, slot);
+ lwp_list_remove (lp);
+ lwp_free (lp);
+ }
- if (ptid_get_pid (lp->ptid) == pid)
- {
- if (lp == lwp_list)
- lwp_list = lp->next;
- else
- lpprev->next = lp->next;
+ return 1;
+}
- lwp_free (lp);
- }
- else
- lpprev = lp;
- }
+/* Remove all LWPs belong to PID from the lwp list. */
+
+static void
+purge_lwp_list (int pid)
+{
+ htab_traverse_noresize (lwp_lwpid_htab, lwp_lwpid_htab_remove_pid, &pid);
}
/* Add the LWP specified by PTID to the list. PTID is the first LWP
lp->ptid = ptid;
lp->core = -1;
- lp->next = lwp_list;
- lwp_list = lp;
+ /* Add to sorted-by-reverse-creation-order list. */
+ lwp_list_add (lp);
+
+ /* Add to keyed-by-pid htab. */
+ lwp_lwpid_htab_add_lwp (lp);
return lp;
}
static void
delete_lwp (ptid_t ptid)
{
- struct lwp_info *lp, *lpprev;
+ struct lwp_info *lp;
+ void **slot;
+ struct lwp_info dummy;
- lpprev = NULL;
+ dummy.ptid = ptid;
+ slot = htab_find_slot (lwp_lwpid_htab, &dummy, NO_INSERT);
+ if (slot == NULL)
+ return;
- for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
- if (ptid_equal (lp->ptid, ptid))
- break;
+ lp = *(struct lwp_info **) slot;
+ gdb_assert (lp != NULL);
- if (!lp)
- return;
+ htab_clear_slot (lwp_lwpid_htab, slot);
- if (lpprev)
- lpprev->next = lp->next;
- else
- lwp_list = lp->next;
+ /* Remove from sorted-by-creation-order list. */
+ lwp_list_remove (lp);
+ /* Release. */
lwp_free (lp);
}
{
struct lwp_info *lp;
int lwp;
+ struct lwp_info dummy;
if (ptid_lwp_p (ptid))
lwp = ptid_get_lwp (ptid);
else
lwp = ptid_get_pid (ptid);
- for (lp = lwp_list; lp; lp = lp->next)
- if (lwp == ptid_get_lwp (lp->ptid))
- return lp;
-
- return NULL;
+ dummy.ptid = ptid_build (0, lwp, 0);
+ lp = (struct lwp_info *) htab_find (lwp_lwpid_htab, &dummy);
+ return lp;
}
/* See nat/linux-nat.h. */
Returns a wait status for that LWP, to cache. */
static int
-linux_nat_post_attach_wait (ptid_t ptid, int first, int *signalled)
+linux_nat_post_attach_wait (ptid_t ptid, int *signalled)
{
pid_t new_pid, pid = ptid_get_lwp (ptid);
int status;
static void
linux_nat_create_inferior (struct target_ops *ops,
- char *exec_file, char *allargs, char **env,
- int from_tty)
+ const char *exec_file, const std::string &allargs,
+ char **env, int from_tty)
{
- struct cleanup *restore_personality
- = maybe_disable_address_space_randomization (disable_randomization);
+ maybe_disable_address_space_randomization restore_personality
+ (disable_randomization);
/* The fork_child mechanism is synchronous and calls target_wait, so
we have to mask the async mode. */
linux_nat_pass_signals (ops, 0, NULL);
linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
-
- do_cleanups (restore_personality);
}
/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
}
else
{
+ std::string reason
+ = linux_ptrace_attach_fail_reason_string (ptid, err);
+
warning (_("Cannot attach to lwp %d: %s"),
- lwpid,
- linux_ptrace_attach_fail_reason_string (ptid,
- err));
+ lwpid, reason.c_str ());
}
}
else
/* We need to wait for a stop before being able to make the
next ptrace call on this LWP. */
lp->must_set_ptrace_flags = 1;
+
+ /* So that wait collects the SIGSTOP. */
+ lp->resumed = 1;
+
+ /* Also add the LWP to gdb's thread list, in case a
+ matching libthread_db is not found (or the process uses
+ raw clone). */
+ add_thread (lp->ptid);
+ set_running (lp->ptid, 1);
+ set_executing (lp->ptid, 1);
}
return 1;
CATCH (ex, RETURN_MASK_ERROR)
{
pid_t pid = parse_pid_to_attach (args);
- struct buffer buffer;
- char *message, *buffer_s;
-
- message = xstrdup (ex.message);
- make_cleanup (xfree, message);
-
- buffer_init (&buffer);
- linux_ptrace_attach_fail_reason (pid, &buffer);
+ std::string reason = linux_ptrace_attach_fail_reason (pid);
- buffer_grow_str0 (&buffer, "");
- buffer_s = buffer_finish (&buffer);
- make_cleanup (xfree, buffer_s);
-
- if (*buffer_s != '\0')
- throw_error (ex.error, "warning: %s\n%s", buffer_s, message);
+ if (!reason.empty ())
+ throw_error (ex.error, "warning: %s\n%s", reason.c_str (), ex.message);
else
- throw_error (ex.error, "%s", message);
+ throw_error (ex.error, "%s", ex.message);
}
END_CATCH
/* Add the initial process as the first LWP to the list. */
lp = add_initial_lwp (ptid);
- status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->signalled);
+ status = linux_nat_post_attach_wait (lp->ptid, &lp->signalled);
if (!WIFSTOPPED (status))
{
if (WIFEXITED (status))
{
int exit_code = WEXITSTATUS (status);
- target_terminal_ours ();
- target_mourn_inferior ();
+ target_terminal::ours ();
+ target_mourn_inferior (inferior_ptid);
if (exit_code == 0)
error (_("Unable to attach: program exited normally."));
else
{
enum gdb_signal signo;
- target_terminal_ours ();
- target_mourn_inferior ();
+ target_terminal::ours ();
+ target_mourn_inferior (inferior_ptid);
signo = gdb_signal_from_host (WTERMSIG (status));
error (_("Unable to attach: program terminated with signal "
target_async (1);
}
-/* Get pending status of LP. */
+/* Get pending signal of THREAD as a host signal number, for detaching
+ purposes. This is the signal the thread last stopped for, which we
+ need to deliver to the thread when detaching, otherwise, it'd be
+ suppressed/lost. */
+
static int
-get_pending_status (struct lwp_info *lp, int *status)
+get_detach_signal (struct lwp_info *lp)
{
enum gdb_signal signo = GDB_SIGNAL_0;
{
struct thread_info *tp = find_thread_ptid (lp->ptid);
- signo = tp->suspend.stop_signal;
+ if (tp->suspend.waitstatus_pending_p)
+ signo = tp->suspend.waitstatus.value.sig;
+ else
+ signo = tp->suspend.stop_signal;
}
else if (!target_is_non_stop_p ())
{
}
}
- *status = 0;
-
if (signo == GDB_SIGNAL_0)
{
if (debug_linux_nat)
}
else
{
- *status = W_STOPCODE (gdb_signal_to_host (signo));
-
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
"GPT: lwp %s has pending signal %s\n",
target_pid_to_str (lp->ptid),
gdb_signal_to_string (signo));
+
+ return gdb_signal_to_host (signo);
}
return 0;
}
-static int
-detach_callback (struct lwp_info *lp, void *data)
+/* Detach from LP. If SIGNO_P is non-NULL, then it points to the
+ signal number that should be passed to the LWP when detaching.
+ Otherwise pass any pending signal the LWP may have, if any. */
+
+static void
+detach_one_lwp (struct lwp_info *lp, int *signo_p)
{
+ int lwpid = ptid_get_lwp (lp->ptid);
+ int signo;
+
gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
if (debug_linux_nat && lp->status)
"DC: Sending SIGCONT to %s\n",
target_pid_to_str (lp->ptid));
- kill_lwp (ptid_get_lwp (lp->ptid), SIGCONT);
+ kill_lwp (lwpid, SIGCONT);
lp->signalled = 0;
}
- /* We don't actually detach from the LWP that has an id equal to the
- overall process id just yet. */
- if (ptid_get_lwp (lp->ptid) != ptid_get_pid (lp->ptid))
+ if (signo_p == NULL)
{
- int status = 0;
-
/* Pass on any pending signal for this LWP. */
- get_pending_status (lp, &status);
+ signo = get_detach_signal (lp);
+ }
+ else
+ signo = *signo_p;
+ /* Preparing to resume may try to write registers, and fail if the
+ lwp is zombie. If that happens, ignore the error. We'll handle
+ it below, when detach fails with ESRCH. */
+ TRY
+ {
if (linux_nat_prepare_to_resume != NULL)
linux_nat_prepare_to_resume (lp);
- errno = 0;
- if (ptrace (PTRACE_DETACH, ptid_get_lwp (lp->ptid), 0,
- WSTOPSIG (status)) < 0)
- error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
- safe_strerror (errno));
+ }
+ CATCH (ex, RETURN_MASK_ERROR)
+ {
+ if (!check_ptrace_stopped_lwp_gone (lp))
+ throw_exception (ex);
+ }
+ END_CATCH
- if (debug_linux_nat)
- fprintf_unfiltered (gdb_stdlog,
- "PTRACE_DETACH (%s, %s, 0) (OK)\n",
- target_pid_to_str (lp->ptid),
- strsignal (WSTOPSIG (status)));
+ if (ptrace (PTRACE_DETACH, lwpid, 0, signo) < 0)
+ {
+ int save_errno = errno;
- delete_lwp (lp->ptid);
+ /* We know the thread exists, so ESRCH must mean the lwp is
+ zombie. This can happen if one of the already-detached
+ threads exits the whole thread group. In that case we're
+ still attached, and must reap the lwp. */
+ if (save_errno == ESRCH)
+ {
+ int ret, status;
+
+ ret = my_waitpid (lwpid, &status, __WALL);
+ if (ret == -1)
+ {
+ warning (_("Couldn't reap LWP %d while detaching: %s"),
+ lwpid, strerror (errno));
+ }
+ else if (!WIFEXITED (status) && !WIFSIGNALED (status))
+ {
+ warning (_("Reaping LWP %d while detaching "
+ "returned unexpected status 0x%x"),
+ lwpid, status);
+ }
+ }
+ else
+ {
+ error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
+ safe_strerror (save_errno));
+ }
+ }
+ else if (debug_linux_nat)
+ {
+ fprintf_unfiltered (gdb_stdlog,
+ "PTRACE_DETACH (%s, %s, 0) (OK)\n",
+ target_pid_to_str (lp->ptid),
+ strsignal (signo));
}
+ delete_lwp (lp->ptid);
+}
+
+static int
+detach_callback (struct lwp_info *lp, void *data)
+{
+ /* We don't actually detach from the thread group leader just yet.
+ If the thread group exits, we must reap the zombie clone lwps
+ before we're able to reap the leader. */
+ if (ptid_get_lwp (lp->ptid) != ptid_get_pid (lp->ptid))
+ detach_one_lwp (lp, NULL);
return 0;
}
static void
-linux_nat_detach (struct target_ops *ops, const char *args, int from_tty)
+linux_nat_detach (struct target_ops *ops, inferior *inf, int from_tty)
{
- int pid;
- int status;
struct lwp_info *main_lwp;
-
- pid = ptid_get_pid (inferior_ptid);
+ int pid = inf->pid;
/* Don't unregister from the event loop, as there may be other
inferiors running. */
iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
/* Only the initial process should be left right now. */
- gdb_assert (num_lwps (ptid_get_pid (inferior_ptid)) == 1);
+ gdb_assert (num_lwps (pid) == 1);
main_lwp = find_lwp_pid (pid_to_ptid (pid));
- /* Pass on any pending signal for the last LWP. */
- if ((args == NULL || *args == '\0')
- && get_pending_status (main_lwp, &status) != -1
- && WIFSTOPPED (status))
- {
- char *tem;
-
- /* Put the signal number in ARGS so that inf_ptrace_detach will
- pass it along with PTRACE_DETACH. */
- tem = (char *) alloca (8);
- xsnprintf (tem, 8, "%d", (int) WSTOPSIG (status));
- args = tem;
- if (debug_linux_nat)
- fprintf_unfiltered (gdb_stdlog,
- "LND: Sending signal %s to %s\n",
- args,
- target_pid_to_str (main_lwp->ptid));
- }
-
- if (linux_nat_prepare_to_resume != NULL)
- linux_nat_prepare_to_resume (main_lwp);
- delete_lwp (main_lwp->ptid);
-
if (forks_exist_p ())
{
/* Multi-fork case. The current inferior_ptid is being detached
from, but there are other viable forks to debug. Detach from
the current fork, and context-switch to the first
available. */
- linux_fork_detach (args, from_tty);
+ linux_fork_detach (from_tty);
}
else
- linux_ops->to_detach (ops, args, from_tty);
+ {
+ target_announce_detach (from_tty);
+
+ /* Pass on any pending signal for the last LWP. */
+ int signo = get_detach_signal (main_lwp);
+
+ detach_one_lwp (main_lwp, &signo);
+
+ inf_ptrace_detach_success (ops, inf);
+ }
}
/* Resume execution of the inferior process. If STEP is nonzero,
status. Note that we must not throw after this is cleared,
otherwise handle_zombie_lwp_error would get confused. */
lp->stopped = 0;
+ lp->core = -1;
lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
registers_changed_ptid (lp->ptid);
}
status_to_str (status));
new_lp->status = status;
}
+ else if (report_thread_events)
+ {
+ new_lp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
+ new_lp->status = status;
+ }
return 1;
}
_("unknown ptrace event %d"), event);
}
+/* Suspend waiting for a signal. We're mostly interested in
+ SIGCHLD/SIGINT. */
+
+static void
+wait_for_signal ()
+{
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog, "linux-nat: about to sigsuspend\n");
+ sigsuspend (&suspend_mask);
+
+ /* If the quit flag is set, it means that the user pressed Ctrl-C
+ and we're debugging a process that is running on a separate
+ terminal, so we must forward the Ctrl-C to the inferior. (If the
+ inferior is sharing GDB's terminal, then the Ctrl-C reaches the
+ inferior directly.) We must do this here because functions that
+ need to block waiting for a signal loop forever until there's an
+ event to report before returning back to the event loop. */
+ if (!target_terminal::is_ours ())
+ {
+ if (check_quit_flag ())
+ target_pass_ctrlc ();
+ }
+}
+
/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
exited. */
linux_nat_wait_1 and there if we get called my_waitpid gets called
again before it gets to sigsuspend so we can safely let the handlers
get executed here. */
-
- if (debug_linux_nat)
- fprintf_unfiltered (gdb_stdlog, "WL: about to sigsuspend\n");
- sigsuspend (&suspend_mask);
+ wait_for_signal ();
}
restore_child_signals_mask (&prev_mask);
/* Check if the thread has exited. */
if (WIFEXITED (status) || WIFSIGNALED (status))
{
- if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid))
+ if (report_thread_events
+ || ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid))
{
if (debug_linux_nat)
- fprintf_unfiltered (gdb_stdlog, "WL: Process %d exited.\n",
+ fprintf_unfiltered (gdb_stdlog, "WL: LWP %d exited.\n",
ptid_get_pid (lp->ptid));
- /* This is the leader exiting, it means the whole
+ /* If this is the leader exiting, it means the whole
process is gone. Store the status to report to the
core. Store it in lp->waitstatus, because lp->status
would be ambiguous (W_EXITCODE(0,0) == 0). */
static int
check_stopped_by_watchpoint (struct lwp_info *lp)
{
- struct cleanup *old_chain;
-
if (linux_ops->to_stopped_by_watchpoint == NULL)
return 0;
- old_chain = save_inferior_ptid ();
+ scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
inferior_ptid = lp->ptid;
if (linux_ops->to_stopped_by_watchpoint (linux_ops))
lp->stopped_data_address_p = 0;
}
- do_cleanups (old_chain);
-
return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
}
-/* Called when the LWP stopped for a trap that could be explained by a
- watchpoint or a breakpoint. */
-
-static void
-save_sigtrap (struct lwp_info *lp)
-{
- gdb_assert (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON);
- gdb_assert (lp->status != 0);
-
- /* Check first if this was a SW/HW breakpoint before checking
- watchpoints, because at least s390 can't tell the data address of
- hardware watchpoint hits, and the kernel returns
- stopped-by-watchpoint as long as there's a watchpoint set. */
- if (linux_nat_status_is_event (lp->status))
- check_stopped_by_breakpoint (lp);
-
- /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
- or hardware watchpoint. Check which is which if we got
- TARGET_STOPPED_BY_HW_BREAKPOINT. */
- if (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON
- || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
- check_stopped_by_watchpoint (lp);
-}
-
/* Returns true if the LWP had stopped for a watchpoint. */
static int
/* Save the sigtrap event. */
lp->status = status;
gdb_assert (lp->signalled);
- save_sigtrap (lp);
+ save_stop_reason (lp);
}
else
{
|| lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
{
struct regcache *regcache = get_thread_regcache (lp->ptid);
- struct gdbarch *gdbarch = get_regcache_arch (regcache);
CORE_ADDR pc;
int discard = 0;
}
#if !USE_SIGTRAP_SIGINFO
- else if (!breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
+ else if (!breakpoint_inserted_here_p (regcache->aspace (), pc))
{
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
return 0;
}
-/* Called when the LWP got a signal/trap that could be explained by a
- software or hardware breakpoint. */
+/* Called when the LWP stopped for a signal/trap. If it stopped for a
+ trap check what caused it (breakpoint, watchpoint, trace, etc.),
+ and save the result in the LWP's stop_reason field. If it stopped
+ for a breakpoint, decrement the PC if necessary on the lwp's
+ architecture. */
-static int
-check_stopped_by_breakpoint (struct lwp_info *lp)
+static void
+save_stop_reason (struct lwp_info *lp)
{
- /* Arrange for a breakpoint to be hit again later. We don't keep
- the SIGTRAP status and don't forward the SIGTRAP signal to the
- LWP. We will handle the current event, eventually we will resume
- this LWP, and this breakpoint will trap again.
-
- If we do not do this, then we run the risk that the user will
- delete or disable the breakpoint, but the LWP will have already
- tripped on it. */
-
- struct regcache *regcache = get_thread_regcache (lp->ptid);
- struct gdbarch *gdbarch = get_regcache_arch (regcache);
+ struct regcache *regcache;
+ struct gdbarch *gdbarch;
CORE_ADDR pc;
CORE_ADDR sw_bp_pc;
#if USE_SIGTRAP_SIGINFO
siginfo_t siginfo;
#endif
+ gdb_assert (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON);
+ gdb_assert (lp->status != 0);
+
+ if (!linux_nat_status_is_event (lp->status))
+ return;
+
+ regcache = get_thread_regcache (lp->ptid);
+ gdbarch = regcache->arch ();
+
pc = regcache_read_pc (regcache);
sw_bp_pc = pc - gdbarch_decr_pc_after_break (gdbarch);
{
if (siginfo.si_signo == SIGTRAP)
{
- if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
+ if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
+ && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
{
- if (debug_linux_nat)
- fprintf_unfiltered (gdb_stdlog,
- "CSBB: %s stopped by software "
- "breakpoint\n",
- target_pid_to_str (lp->ptid));
-
- /* Back up the PC if necessary. */
- if (pc != sw_bp_pc)
- regcache_write_pc (regcache, sw_bp_pc);
-
- lp->stop_pc = sw_bp_pc;
+ /* The si_code is ambiguous on this arch -- check debug
+ registers. */
+ if (!check_stopped_by_watchpoint (lp))
+ lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
+ }
+ else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
+ {
+ /* If we determine the LWP stopped for a SW breakpoint,
+ trust it. Particularly don't check watchpoint
+ registers, because at least on s390, we'd find
+ stopped-by-watchpoint as long as there's a watchpoint
+ set. */
lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
- return 1;
}
- else if (siginfo.si_code == TRAP_HWBKPT)
+ else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
{
- if (debug_linux_nat)
- fprintf_unfiltered (gdb_stdlog,
- "CSBB: %s stopped by hardware "
- "breakpoint/watchpoint\n",
- target_pid_to_str (lp->ptid));
-
- lp->stop_pc = pc;
- lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
- return 1;
+ /* This can indicate either a hardware breakpoint or
+ hardware watchpoint. Check debug registers. */
+ if (!check_stopped_by_watchpoint (lp))
+ lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
}
else if (siginfo.si_code == TRAP_TRACE)
{
fprintf_unfiltered (gdb_stdlog,
"CSBB: %s stopped by trace\n",
target_pid_to_str (lp->ptid));
+
+ /* We may have single stepped an instruction that
+ triggered a watchpoint. In that case, on some
+ architectures (such as x86), instead of TRAP_HWBKPT,
+ si_code indicates TRAP_TRACE, and we need to check
+ the debug registers separately. */
+ check_stopped_by_watchpoint (lp);
}
}
}
#else
if ((!lp->step || lp->stop_pc == sw_bp_pc)
- && software_breakpoint_inserted_here_p (get_regcache_aspace (regcache),
+ && software_breakpoint_inserted_here_p (regcache->aspace (),
sw_bp_pc))
{
/* The LWP was either continued, or stepped a software
breakpoint instruction. */
+ lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
+ }
+
+ if (hardware_breakpoint_inserted_here_p (regcache->aspace (), pc))
+ lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
+
+ if (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
+ check_stopped_by_watchpoint (lp);
+#endif
+
+ if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
+ {
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
"CSBB: %s stopped by software breakpoint\n",
if (pc != sw_bp_pc)
regcache_write_pc (regcache, sw_bp_pc);
- lp->stop_pc = sw_bp_pc;
- lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
- return 1;
+ /* Update this so we record the correct stop PC below. */
+ pc = sw_bp_pc;
}
-
- if (hardware_breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
+ else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
{
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
- "CSBB: stopped by hardware breakpoint %s\n",
+ "CSBB: %s stopped by hardware breakpoint\n",
+ target_pid_to_str (lp->ptid));
+ }
+ else if (lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
+ {
+ if (debug_linux_nat)
+ fprintf_unfiltered (gdb_stdlog,
+ "CSBB: %s stopped by hardware watchpoint\n",
target_pid_to_str (lp->ptid));
-
- lp->stop_pc = pc;
- lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
- return 1;
}
-#endif
- return 0;
+ lp->stop_pc = pc;
}
/* Check if the thread has exited. */
if (WIFEXITED (status) || WIFSIGNALED (status))
{
- if (num_lwps (ptid_get_pid (lp->ptid)) > 1)
+ if (!report_thread_events
+ && num_lwps (ptid_get_pid (lp->ptid)) > 1)
{
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
resumed. */
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
- "Process %ld exited (resumed=%d)\n",
+ "LWP %ld exited (resumed=%d)\n",
ptid_get_lwp (lp->ptid), lp->resumed);
- /* This was the last lwp in the process. Since events are
- serialized to GDB core, we may not be able report this one
- right now, but GDB core and the other target layers will want
- to be notified about the exit code/signal, leave the status
- pending for the next time we're able to report it. */
-
/* Dead LWP's aren't expected to reported a pending sigstop. */
lp->signalled = 0;
/* An interesting event. */
gdb_assert (lp);
lp->status = status;
- save_sigtrap (lp);
+ save_stop_reason (lp);
return lp;
}
}
}
+/* Convenience function that is called when the kernel reports an exit
+ event. This decides whether to report the event to GDB as a
+ process exit event, a thread exit event, or to suppress the
+ event. */
+
+static ptid_t
+filter_exit_event (struct lwp_info *event_child,
+ struct target_waitstatus *ourstatus)
+{
+ ptid_t ptid = event_child->ptid;
+
+ if (num_lwps (ptid_get_pid (ptid)) > 1)
+ {
+ if (report_thread_events)
+ ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
+ else
+ ourstatus->kind = TARGET_WAITKIND_IGNORE;
+
+ exit_lwp (event_child);
+ }
+
+ return ptid;
+}
+
static ptid_t
linux_nat_wait_1 (struct target_ops *ops,
ptid_t ptid, struct target_waitstatus *ourstatus,
gdb_assert (lp == NULL);
/* Block until we get an event reported with SIGCHLD. */
- if (debug_linux_nat)
- fprintf_unfiltered (gdb_stdlog, "LNW: about to sigsuspend\n");
- sigsuspend (&suspend_mask);
+ wait_for_signal ();
}
gdb_assert (lp);
&& !USE_SIGTRAP_SIGINFO)
{
struct regcache *regcache = get_thread_regcache (lp->ptid);
- struct gdbarch *gdbarch = get_regcache_arch (regcache);
+ struct gdbarch *gdbarch = regcache->arch ();
int decr_pc = gdbarch_decr_pc_after_break (gdbarch);
if (decr_pc != 0)
else
lp->core = linux_common_core_of_thread (lp->ptid);
+ if (ourstatus->kind == TARGET_WAITKIND_EXITED)
+ return filter_exit_event (lp, ourstatus);
+
return lp->ptid;
}
else
{
struct regcache *regcache = get_thread_regcache (lp->ptid);
- struct gdbarch *gdbarch = get_regcache_arch (regcache);
+ struct gdbarch *gdbarch = regcache->arch ();
TRY
{
immediately, and we're not waiting for this LWP. */
if (!ptid_match (lp->ptid, *wait_ptid_p))
{
- if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
+ if (breakpoint_inserted_here_p (regcache->aspace (), pc))
leave_stopped = 1;
}
return event_ptid;
}
-static int
-kill_callback (struct lwp_info *lp, void *data)
+/* Kill one LWP. */
+
+static void
+kill_one_lwp (pid_t pid)
{
/* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
errno = 0;
- kill_lwp (ptid_get_lwp (lp->ptid), SIGKILL);
+ kill_lwp (pid, SIGKILL);
if (debug_linux_nat)
{
int save_errno = errno;
fprintf_unfiltered (gdb_stdlog,
- "KC: kill (SIGKILL) %s, 0, 0 (%s)\n",
- target_pid_to_str (lp->ptid),
+ "KC: kill (SIGKILL) %ld, 0, 0 (%s)\n", (long) pid,
save_errno ? safe_strerror (save_errno) : "OK");
}
/* Some kernels ignore even SIGKILL for processes under ptrace. */
errno = 0;
- ptrace (PTRACE_KILL, ptid_get_lwp (lp->ptid), 0, 0);
+ ptrace (PTRACE_KILL, pid, 0, 0);
if (debug_linux_nat)
{
int save_errno = errno;
fprintf_unfiltered (gdb_stdlog,
- "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
- target_pid_to_str (lp->ptid),
+ "KC: PTRACE_KILL %ld, 0, 0 (%s)\n", (long) pid,
save_errno ? safe_strerror (save_errno) : "OK");
}
-
- return 0;
}
-static int
-kill_wait_callback (struct lwp_info *lp, void *data)
+/* Wait for an LWP to die. */
+
+static void
+kill_wait_one_lwp (pid_t pid)
{
- pid_t pid;
+ pid_t res;
/* We must make sure that there are no pending events (delayed
SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
do
{
- pid = my_waitpid (ptid_get_lwp (lp->ptid), NULL, __WALL);
- if (pid != (pid_t) -1)
+ res = my_waitpid (pid, NULL, __WALL);
+ if (res != (pid_t) -1)
{
if (debug_linux_nat)
fprintf_unfiltered (gdb_stdlog,
- "KWC: wait %s received unknown.\n",
- target_pid_to_str (lp->ptid));
+ "KWC: wait %ld received unknown.\n",
+ (long) pid);
/* The Linux kernel sometimes fails to kill a thread
completely after PTRACE_KILL; that goes from the stop
point in do_fork out to the one in get_signal_to_deliver
and waits again. So kill it again. */
- kill_callback (lp, NULL);
+ kill_one_lwp (pid);
}
}
- while (pid == ptid_get_lwp (lp->ptid));
+ while (res == pid);
- gdb_assert (pid == -1 && errno == ECHILD);
+ gdb_assert (res == -1 && errno == ECHILD);
+}
+
+/* Callback for iterate_over_lwps. */
+
+static int
+kill_callback (struct lwp_info *lp, void *data)
+{
+ kill_one_lwp (ptid_get_lwp (lp->ptid));
return 0;
}
+/* Callback for iterate_over_lwps. */
+
+static int
+kill_wait_callback (struct lwp_info *lp, void *data)
+{
+ kill_wait_one_lwp (ptid_get_lwp (lp->ptid));
+ return 0;
+}
+
+/* Kill the fork children of any threads of inferior INF that are
+ stopped at a fork event. */
+
static void
-linux_nat_kill (struct target_ops *ops)
+kill_unfollowed_fork_children (struct inferior *inf)
{
- struct target_waitstatus last;
- ptid_t last_ptid;
- int status;
+ struct thread_info *thread;
+ ALL_NON_EXITED_THREADS (thread)
+ if (thread->inf == inf)
+ {
+ struct target_waitstatus *ws = &thread->pending_follow;
+
+ if (ws->kind == TARGET_WAITKIND_FORKED
+ || ws->kind == TARGET_WAITKIND_VFORKED)
+ {
+ ptid_t child_ptid = ws->value.related_pid;
+ int child_pid = ptid_get_pid (child_ptid);
+ int child_lwp = ptid_get_lwp (child_ptid);
+
+ kill_one_lwp (child_lwp);
+ kill_wait_one_lwp (child_lwp);
+
+ /* Let the arch-specific native code know this process is
+ gone. */
+ linux_nat_forget_process (child_pid);
+ }
+ }
+}
+
+static void
+linux_nat_kill (struct target_ops *ops)
+{
/* If we're stopped while forking and we haven't followed yet,
kill the other task. We need to do this first because the
parent will be sleeping if this is a vfork. */
-
- get_last_target_status (&last_ptid, &last);
-
- if (last.kind == TARGET_WAITKIND_FORKED
- || last.kind == TARGET_WAITKIND_VFORKED)
- {
- ptrace (PT_KILL, ptid_get_pid (last.value.related_pid), 0, 0);
- wait (&status);
-
- /* Let the arch-specific native code know this process is
- gone. */
- linux_nat_forget_process (ptid_get_pid (last.value.related_pid));
- }
+ kill_unfollowed_fork_children (current_inferior ());
if (forks_exist_p ())
linux_fork_killall ();
iterate_over_lwps (ptid, kill_wait_callback, NULL);
}
- target_mourn_inferior ();
+ target_mourn_inferior (inferior_ptid);
}
static void
const gdb_byte *writebuf,
ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
{
- struct cleanup *old_chain;
enum target_xfer_status xfer;
if (object == TARGET_OBJECT_SIGNAL_INFO)
if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
return TARGET_XFER_EOF;
- old_chain = save_inferior_ptid ();
-
- if (ptid_lwp_p (inferior_ptid))
- inferior_ptid = pid_to_ptid (ptid_get_lwp (inferior_ptid));
-
xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
offset, len, xfered_len);
- do_cleanups (old_chain);
return xfer;
}
/* Update the processor core that each lwp/thread was last seen
running on. */
ALL_LWPS (lwp)
- lwp->core = linux_common_core_of_thread (lwp->ptid);
+ {
+ /* Avoid accessing /proc if the thread hasn't run since we last
+ time we fetched the thread's core. Accessing /proc becomes
+ noticeably expensive when we have thousands of LWPs. */
+ if (lwp->core == -1)
+ lwp->core = linux_common_core_of_thread (lwp->ptid);
+ }
}
-static char *
+static const char *
linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
{
static char buf[64];
return linux_proc_pid_to_exec_file (pid);
}
-/* Implement the to_xfer_partial interface for memory reads using the /proc
- filesystem. Because we can use a single read() call for /proc, this
- can be much more efficient than banging away at PTRACE_PEEKTEXT,
- but it doesn't support writes. */
+/* Implement the to_xfer_partial target method using /proc/<pid>/mem.
+ Because we can use a single read/write call, this can be much more
+ efficient than banging away at PTRACE_PEEKTEXT. */
static enum target_xfer_status
linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
int fd;
char filename[64];
- if (object != TARGET_OBJECT_MEMORY || !readbuf)
+ if (object != TARGET_OBJECT_MEMORY)
return TARGET_XFER_EOF;
/* Don't bother for one word. */
/* We could keep this file open and cache it - possibly one per
thread. That requires some juggling, but is even faster. */
- xsnprintf (filename, sizeof filename, "/proc/%d/mem",
- ptid_get_pid (inferior_ptid));
- fd = gdb_open_cloexec (filename, O_RDONLY | O_LARGEFILE, 0);
+ xsnprintf (filename, sizeof filename, "/proc/%ld/mem",
+ ptid_get_lwp (inferior_ptid));
+ fd = gdb_open_cloexec (filename, ((readbuf ? O_RDONLY : O_WRONLY)
+ | O_LARGEFILE), 0);
if (fd == -1)
return TARGET_XFER_EOF;
- /* If pread64 is available, use it. It's faster if the kernel
- supports it (only one syscall), and it's 64-bit safe even on
- 32-bit platforms (for instance, SPARC debugging a SPARC64
- application). */
+ /* Use pread64/pwrite64 if available, since they save a syscall and can
+ handle 64-bit offsets even on 32-bit platforms (for instance, SPARC
+ debugging a SPARC64 application). */
#ifdef HAVE_PREAD64
- if (pread64 (fd, readbuf, len, offset) != len)
+ ret = (readbuf ? pread64 (fd, readbuf, len, offset)
+ : pwrite64 (fd, writebuf, len, offset));
#else
- if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
+ ret = lseek (fd, offset, SEEK_SET);
+ if (ret != -1)
+ ret = (readbuf ? read (fd, readbuf, len)
+ : write (fd, writebuf, len));
#endif
- ret = 0;
- else
- ret = len;
close (fd);
- if (ret == 0)
+ if (ret == -1 || ret == 0)
return TARGET_XFER_EOF;
else
{
char buf[128];
int fd = 0;
int ret = -1;
- int pid = ptid_get_pid (inferior_ptid);
+ int pid = ptid_get_lwp (inferior_ptid);
if (!annex)
{
linux_proc_pending_signals (int pid, sigset_t *pending,
sigset_t *blocked, sigset_t *ignored)
{
- FILE *procfile;
char buffer[PATH_MAX], fname[PATH_MAX];
- struct cleanup *cleanup;
sigemptyset (pending);
sigemptyset (blocked);
sigemptyset (ignored);
xsnprintf (fname, sizeof fname, "/proc/%d/status", pid);
- procfile = gdb_fopen_cloexec (fname, "r");
+ gdb_file_up procfile = gdb_fopen_cloexec (fname, "r");
if (procfile == NULL)
error (_("Could not open %s"), fname);
- cleanup = make_cleanup_fclose (procfile);
- while (fgets (buffer, PATH_MAX, procfile) != NULL)
+ while (fgets (buffer, PATH_MAX, procfile.get ()) != NULL)
{
/* Normal queued signals are on the SigPnd line in the status
file. However, 2.6 kernels also have a "shared" pending
else if (startswith (buffer, "SigIgn:\t"))
add_line_to_sigset (buffer + 8, ignored);
}
-
- do_cleanups (cleanup);
}
static enum target_xfer_status
gdb_assert (arg != NULL);
/* Unpause all */
- target_resume (*ptid, 0, GDB_SIGNAL_0);
+ target_continue_no_signal (*ptid);
}
static VEC(static_tracepoint_marker_p) *
int pid = ptid_get_pid (inferior_ptid);
VEC(static_tracepoint_marker_p) *markers = NULL;
struct static_tracepoint_marker *marker = NULL;
- char *p = s;
+ const char *p = s;
ptid_t ptid = ptid_build (pid, 0, 0);
/* Pause all */
static int
linux_nat_can_async_p (struct target_ops *ops)
{
- /* NOTE: palves 2008-03-21: We're only async when the user requests
- it explicitly with the "set target-async" command.
- Someday, linux will always be async. */
+ /* We're always async, unless the user explicitly prevented it with the
+ "maint set target-async" command. */
return target_async_permitted;
}
#endif
}
-static int async_terminal_is_ours = 1;
-
-/* target_terminal_inferior implementation.
-
- This is a wrapper around child_terminal_inferior to add async support. */
-
-static void
-linux_nat_terminal_inferior (struct target_ops *self)
-{
- child_terminal_inferior (self);
-
- /* Calls to target_terminal_*() are meant to be idempotent. */
- if (!async_terminal_is_ours)
- return;
-
- delete_file_handler (input_fd);
- async_terminal_is_ours = 0;
- set_sigint_trap ();
-}
-
-/* target_terminal_ours implementation.
-
- This is a wrapper around child_terminal_ours to add async support (and
- implement the target_terminal_ours vs target_terminal_ours_for_output
- distinction). child_terminal_ours is currently no different than
- child_terminal_ours_for_output.
- We leave target_terminal_ours_for_output alone, leaving it to
- child_terminal_ours_for_output. */
-
-static void
-linux_nat_terminal_ours (struct target_ops *self)
-{
- /* GDB should never give the terminal to the inferior if the
- inferior is running in the background (run&, continue&, etc.),
- but claiming it sure should. */
- child_terminal_ours (self);
-
- if (async_terminal_is_ours)
- return;
-
- clear_sigint_trap ();
- add_file_handler (input_fd, stdin_event_handler, 0);
- async_terminal_is_ours = 1;
-}
-
/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
so we notice when any child changes state, and notify the
event-loop; it allows us to use sigsuspend in linux_nat_wait_1
}
static void
-linux_nat_interrupt (struct target_ops *self, ptid_t ptid)
-{
- if (non_stop)
- iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
- else
- linux_ops->to_interrupt (linux_ops, ptid);
-}
-
-static void
linux_nat_close (struct target_ops *self)
{
/* Unregister from the event loop. */
return ret;
}
+/* Implementation of the to_thread_events method. */
+
+static void
+linux_nat_thread_events (struct target_ops *ops, int enable)
+{
+ report_thread_events = enable;
+}
+
void
linux_nat_add_target (struct target_ops *t)
{
t->to_supports_stopped_by_sw_breakpoint = linux_nat_supports_stopped_by_sw_breakpoint;
t->to_stopped_by_hw_breakpoint = linux_nat_stopped_by_hw_breakpoint;
t->to_supports_stopped_by_hw_breakpoint = linux_nat_supports_stopped_by_hw_breakpoint;
+ t->to_thread_events = linux_nat_thread_events;
t->to_can_async_p = linux_nat_can_async_p;
t->to_is_async_p = linux_nat_is_async_p;
t->to_supports_non_stop = linux_nat_supports_non_stop;
t->to_always_non_stop_p = linux_nat_always_non_stop_p;
t->to_async = linux_nat_async;
- t->to_terminal_inferior = linux_nat_terminal_inferior;
- t->to_terminal_ours = linux_nat_terminal_ours;
super_close = t->to_close;
t->to_close = linux_nat_close;
t->to_stop = linux_nat_stop;
- t->to_interrupt = linux_nat_interrupt;
t->to_supports_multi_process = linux_nat_supports_multi_process;
linux_nat_new_thread = new_thread;
}
+/* Register a method to call whenever a new thread is attached. */
+void
+linux_nat_set_delete_thread (struct target_ops *t,
+ void (*delete_thread) (struct arch_lwp_info *))
+{
+ /* Save the pointer. We only support a single registered instance
+ of the GNU/Linux native target, so we do not need to map this to
+ T. */
+ linux_nat_delete_thread = delete_thread;
+}
+
/* See declaration in linux-nat.h. */
void
return inferior_ptid;
}
-/* Provide a prototype to silence -Wmissing-prototypes. */
-extern initialize_file_ftype _initialize_linux_nat;
-
void
_initialize_linux_nat (void)
{
sigdelset (&suspend_mask, SIGCHLD);
sigemptyset (&blocked_mask);
+
+ lwp_lwpid_htab_create ();
}
\f