1 /* Select target systems and architectures at runtime for GDB.
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
5 Contributed by Cygnus Support.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
26 #include "target-dcache.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
48 #include "target-debug.h"
50 static void target_info (char *, int);
52 static void generic_tls_error (void) ATTRIBUTE_NORETURN;
54 static void default_terminal_info (struct target_ops *, const char *, int);
56 static int default_watchpoint_addr_within_range (struct target_ops *,
57 CORE_ADDR, CORE_ADDR, int);
59 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
62 static void default_rcmd (struct target_ops *, const char *, struct ui_file *);
64 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
67 static int default_follow_fork (struct target_ops *self, int follow_child,
70 static void default_mourn_inferior (struct target_ops *self);
72 static int default_search_memory (struct target_ops *ops,
74 ULONGEST search_space_len,
75 const gdb_byte *pattern,
77 CORE_ADDR *found_addrp);
79 static int default_verify_memory (struct target_ops *self,
81 CORE_ADDR memaddr, ULONGEST size);
83 static struct address_space *default_thread_address_space
84 (struct target_ops *self, ptid_t ptid);
86 static void tcomplain (void) ATTRIBUTE_NORETURN;
88 static int return_zero (struct target_ops *);
90 static int return_zero_has_execution (struct target_ops *, ptid_t);
92 static void target_command (char *, int);
94 static struct target_ops *find_default_run_target (char *);
96 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
99 static int dummy_find_memory_regions (struct target_ops *self,
100 find_memory_region_ftype ignore1,
103 static char *dummy_make_corefile_notes (struct target_ops *self,
104 bfd *ignore1, int *ignore2);
106 static char *default_pid_to_str (struct target_ops *ops, ptid_t ptid);
108 static enum exec_direction_kind default_execution_direction
109 (struct target_ops *self);
111 static CORE_ADDR default_target_decr_pc_after_break (struct target_ops *ops,
112 struct gdbarch *gdbarch);
114 static struct target_ops debug_target;
116 #include "target-delegates.c"
118 static void init_dummy_target (void);
120 static void update_current_target (void);
122 /* Pointer to array of target architecture structures; the size of the
123 array; the current index into the array; the allocated size of the
125 struct target_ops **target_structs;
126 unsigned target_struct_size;
127 unsigned target_struct_allocsize;
128 #define DEFAULT_ALLOCSIZE 10
130 /* The initial current target, so that there is always a semi-valid
133 static struct target_ops dummy_target;
135 /* Top of target stack. */
137 static struct target_ops *target_stack;
139 /* The target structure we are currently using to talk to a process
140 or file or whatever "inferior" we have. */
142 struct target_ops current_target;
144 /* Command list for target. */
146 static struct cmd_list_element *targetlist = NULL;
148 /* Nonzero if we should trust readonly sections from the
149 executable when reading memory. */
151 static int trust_readonly = 0;
153 /* Nonzero if we should show true memory content including
154 memory breakpoint inserted by gdb. */
156 static int show_memory_breakpoints = 0;
158 /* These globals control whether GDB attempts to perform these
159 operations; they are useful for targets that need to prevent
160 inadvertant disruption, such as in non-stop mode. */
162 int may_write_registers = 1;
164 int may_write_memory = 1;
166 int may_insert_breakpoints = 1;
168 int may_insert_tracepoints = 1;
170 int may_insert_fast_tracepoints = 1;
174 /* Non-zero if we want to see trace of target level stuff. */
176 static unsigned int targetdebug = 0;
179 set_targetdebug (char *args, int from_tty, struct cmd_list_element *c)
181 update_current_target ();
185 show_targetdebug (struct ui_file *file, int from_tty,
186 struct cmd_list_element *c, const char *value)
188 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
191 static void setup_target_debug (void);
193 /* The user just typed 'target' without the name of a target. */
196 target_command (char *arg, int from_tty)
198 fputs_filtered ("Argument required (target name). Try `help target'\n",
202 /* Default target_has_* methods for process_stratum targets. */
205 default_child_has_all_memory (struct target_ops *ops)
207 /* If no inferior selected, then we can't read memory here. */
208 if (ptid_equal (inferior_ptid, null_ptid))
215 default_child_has_memory (struct target_ops *ops)
217 /* If no inferior selected, then we can't read memory here. */
218 if (ptid_equal (inferior_ptid, null_ptid))
225 default_child_has_stack (struct target_ops *ops)
227 /* If no inferior selected, there's no stack. */
228 if (ptid_equal (inferior_ptid, null_ptid))
235 default_child_has_registers (struct target_ops *ops)
237 /* Can't read registers from no inferior. */
238 if (ptid_equal (inferior_ptid, null_ptid))
245 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
247 /* If there's no thread selected, then we can't make it run through
249 if (ptid_equal (the_ptid, null_ptid))
257 target_has_all_memory_1 (void)
259 struct target_ops *t;
261 for (t = current_target.beneath; t != NULL; t = t->beneath)
262 if (t->to_has_all_memory (t))
269 target_has_memory_1 (void)
271 struct target_ops *t;
273 for (t = current_target.beneath; t != NULL; t = t->beneath)
274 if (t->to_has_memory (t))
281 target_has_stack_1 (void)
283 struct target_ops *t;
285 for (t = current_target.beneath; t != NULL; t = t->beneath)
286 if (t->to_has_stack (t))
293 target_has_registers_1 (void)
295 struct target_ops *t;
297 for (t = current_target.beneath; t != NULL; t = t->beneath)
298 if (t->to_has_registers (t))
305 target_has_execution_1 (ptid_t the_ptid)
307 struct target_ops *t;
309 for (t = current_target.beneath; t != NULL; t = t->beneath)
310 if (t->to_has_execution (t, the_ptid))
317 target_has_execution_current (void)
319 return target_has_execution_1 (inferior_ptid);
322 /* Complete initialization of T. This ensures that various fields in
323 T are set, if needed by the target implementation. */
326 complete_target_initialization (struct target_ops *t)
328 /* Provide default values for all "must have" methods. */
330 if (t->to_has_all_memory == NULL)
331 t->to_has_all_memory = return_zero;
333 if (t->to_has_memory == NULL)
334 t->to_has_memory = return_zero;
336 if (t->to_has_stack == NULL)
337 t->to_has_stack = return_zero;
339 if (t->to_has_registers == NULL)
340 t->to_has_registers = return_zero;
342 if (t->to_has_execution == NULL)
343 t->to_has_execution = return_zero_has_execution;
345 /* These methods can be called on an unpushed target and so require
346 a default implementation if the target might plausibly be the
347 default run target. */
348 gdb_assert (t->to_can_run == NULL || (t->to_can_async_p != NULL
349 && t->to_supports_non_stop != NULL));
351 install_delegators (t);
354 /* This is used to implement the various target commands. */
357 open_target (char *args, int from_tty, struct cmd_list_element *command)
359 struct target_ops *ops = get_cmd_context (command);
362 fprintf_unfiltered (gdb_stdlog, "-> %s->to_open (...)\n",
365 ops->to_open (args, from_tty);
368 fprintf_unfiltered (gdb_stdlog, "<- %s->to_open (%s, %d)\n",
369 ops->to_shortname, args, from_tty);
372 /* Add possible target architecture T to the list and add a new
373 command 'target T->to_shortname'. Set COMPLETER as the command's
374 completer if not NULL. */
377 add_target_with_completer (struct target_ops *t,
378 completer_ftype *completer)
380 struct cmd_list_element *c;
382 complete_target_initialization (t);
386 target_struct_allocsize = DEFAULT_ALLOCSIZE;
387 target_structs = (struct target_ops **) xmalloc
388 (target_struct_allocsize * sizeof (*target_structs));
390 if (target_struct_size >= target_struct_allocsize)
392 target_struct_allocsize *= 2;
393 target_structs = (struct target_ops **)
394 xrealloc ((char *) target_structs,
395 target_struct_allocsize * sizeof (*target_structs));
397 target_structs[target_struct_size++] = t;
399 if (targetlist == NULL)
400 add_prefix_cmd ("target", class_run, target_command, _("\
401 Connect to a target machine or process.\n\
402 The first argument is the type or protocol of the target machine.\n\
403 Remaining arguments are interpreted by the target protocol. For more\n\
404 information on the arguments for a particular protocol, type\n\
405 `help target ' followed by the protocol name."),
406 &targetlist, "target ", 0, &cmdlist);
407 c = add_cmd (t->to_shortname, no_class, NULL, t->to_doc, &targetlist);
408 set_cmd_sfunc (c, open_target);
409 set_cmd_context (c, t);
410 if (completer != NULL)
411 set_cmd_completer (c, completer);
414 /* Add a possible target architecture to the list. */
417 add_target (struct target_ops *t)
419 add_target_with_completer (t, NULL);
425 add_deprecated_target_alias (struct target_ops *t, char *alias)
427 struct cmd_list_element *c;
430 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
432 c = add_cmd (alias, no_class, NULL, t->to_doc, &targetlist);
433 set_cmd_sfunc (c, open_target);
434 set_cmd_context (c, t);
435 alt = xstrprintf ("target %s", t->to_shortname);
436 deprecate_cmd (c, alt);
444 current_target.to_kill (¤t_target);
448 target_load (const char *arg, int from_tty)
450 target_dcache_invalidate ();
451 (*current_target.to_load) (¤t_target, arg, from_tty);
455 target_terminal_inferior (void)
457 /* A background resume (``run&'') should leave GDB in control of the
458 terminal. Use target_can_async_p, not target_is_async_p, since at
459 this point the target is not async yet. However, if sync_execution
460 is not set, we know it will become async prior to resume. */
461 if (target_can_async_p () && !sync_execution)
464 /* If GDB is resuming the inferior in the foreground, install
465 inferior's terminal modes. */
466 (*current_target.to_terminal_inferior) (¤t_target);
472 target_supports_terminal_ours (void)
474 struct target_ops *t;
476 for (t = current_target.beneath; t != NULL; t = t->beneath)
478 if (t->to_terminal_ours != delegate_terminal_ours
479 && t->to_terminal_ours != tdefault_terminal_ours)
489 error (_("You can't do that when your target is `%s'"),
490 current_target.to_shortname);
496 error (_("You can't do that without a process to debug."));
500 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
502 printf_unfiltered (_("No saved terminal information.\n"));
505 /* A default implementation for the to_get_ada_task_ptid target method.
507 This function builds the PTID by using both LWP and TID as part of
508 the PTID lwp and tid elements. The pid used is the pid of the
512 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
514 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
517 static enum exec_direction_kind
518 default_execution_direction (struct target_ops *self)
520 if (!target_can_execute_reverse)
522 else if (!target_can_async_p ())
525 gdb_assert_not_reached ("\
526 to_execution_direction must be implemented for reverse async");
529 /* Go through the target stack from top to bottom, copying over zero
530 entries in current_target, then filling in still empty entries. In
531 effect, we are doing class inheritance through the pushed target
534 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
535 is currently implemented, is that it discards any knowledge of
536 which target an inherited method originally belonged to.
537 Consequently, new new target methods should instead explicitly and
538 locally search the target stack for the target that can handle the
542 update_current_target (void)
544 struct target_ops *t;
546 /* First, reset current's contents. */
547 memset (¤t_target, 0, sizeof (current_target));
549 /* Install the delegators. */
550 install_delegators (¤t_target);
552 current_target.to_stratum = target_stack->to_stratum;
554 #define INHERIT(FIELD, TARGET) \
555 if (!current_target.FIELD) \
556 current_target.FIELD = (TARGET)->FIELD
558 /* Do not add any new INHERITs here. Instead, use the delegation
559 mechanism provided by make-target-delegates. */
560 for (t = target_stack; t; t = t->beneath)
562 INHERIT (to_shortname, t);
563 INHERIT (to_longname, t);
564 INHERIT (to_attach_no_wait, t);
565 INHERIT (to_have_steppable_watchpoint, t);
566 INHERIT (to_have_continuable_watchpoint, t);
567 INHERIT (to_has_thread_control, t);
571 /* Finally, position the target-stack beneath the squashed
572 "current_target". That way code looking for a non-inherited
573 target method can quickly and simply find it. */
574 current_target.beneath = target_stack;
577 setup_target_debug ();
580 /* Push a new target type into the stack of the existing target accessors,
581 possibly superseding some of the existing accessors.
583 Rather than allow an empty stack, we always have the dummy target at
584 the bottom stratum, so we can call the function vectors without
588 push_target (struct target_ops *t)
590 struct target_ops **cur;
592 /* Check magic number. If wrong, it probably means someone changed
593 the struct definition, but not all the places that initialize one. */
594 if (t->to_magic != OPS_MAGIC)
596 fprintf_unfiltered (gdb_stderr,
597 "Magic number of %s target struct wrong\n",
599 internal_error (__FILE__, __LINE__,
600 _("failed internal consistency check"));
603 /* Find the proper stratum to install this target in. */
604 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
606 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
610 /* If there's already targets at this stratum, remove them. */
611 /* FIXME: cagney/2003-10-15: I think this should be popping all
612 targets to CUR, and not just those at this stratum level. */
613 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
615 /* There's already something at this stratum level. Close it,
616 and un-hook it from the stack. */
617 struct target_ops *tmp = (*cur);
619 (*cur) = (*cur)->beneath;
624 /* We have removed all targets in our stratum, now add the new one. */
628 update_current_target ();
631 /* Remove a target_ops vector from the stack, wherever it may be.
632 Return how many times it was removed (0 or 1). */
635 unpush_target (struct target_ops *t)
637 struct target_ops **cur;
638 struct target_ops *tmp;
640 if (t->to_stratum == dummy_stratum)
641 internal_error (__FILE__, __LINE__,
642 _("Attempt to unpush the dummy target"));
644 /* Look for the specified target. Note that we assume that a target
645 can only occur once in the target stack. */
647 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
653 /* If we don't find target_ops, quit. Only open targets should be
658 /* Unchain the target. */
660 (*cur) = (*cur)->beneath;
663 update_current_target ();
665 /* Finally close the target. Note we do this after unchaining, so
666 any target method calls from within the target_close
667 implementation don't end up in T anymore. */
674 pop_all_targets_above (enum strata above_stratum)
676 while ((int) (current_target.to_stratum) > (int) above_stratum)
678 if (!unpush_target (target_stack))
680 fprintf_unfiltered (gdb_stderr,
681 "pop_all_targets couldn't find target %s\n",
682 target_stack->to_shortname);
683 internal_error (__FILE__, __LINE__,
684 _("failed internal consistency check"));
691 pop_all_targets (void)
693 pop_all_targets_above (dummy_stratum);
696 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
699 target_is_pushed (struct target_ops *t)
701 struct target_ops *cur;
703 /* Check magic number. If wrong, it probably means someone changed
704 the struct definition, but not all the places that initialize one. */
705 if (t->to_magic != OPS_MAGIC)
707 fprintf_unfiltered (gdb_stderr,
708 "Magic number of %s target struct wrong\n",
710 internal_error (__FILE__, __LINE__,
711 _("failed internal consistency check"));
714 for (cur = target_stack; cur != NULL; cur = cur->beneath)
721 /* Default implementation of to_get_thread_local_address. */
724 generic_tls_error (void)
726 throw_error (TLS_GENERIC_ERROR,
727 _("Cannot find thread-local variables on this target"));
730 /* Using the objfile specified in OBJFILE, find the address for the
731 current thread's thread-local storage with offset OFFSET. */
733 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
735 volatile CORE_ADDR addr = 0;
736 struct target_ops *target = ¤t_target;
738 if (gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
740 ptid_t ptid = inferior_ptid;
741 volatile struct gdb_exception ex;
743 TRY_CATCH (ex, RETURN_MASK_ALL)
747 /* Fetch the load module address for this objfile. */
748 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
751 addr = target->to_get_thread_local_address (target, ptid,
754 /* If an error occurred, print TLS related messages here. Otherwise,
755 throw the error to some higher catcher. */
758 int objfile_is_library = (objfile->flags & OBJF_SHARED);
762 case TLS_NO_LIBRARY_SUPPORT_ERROR:
763 error (_("Cannot find thread-local variables "
764 "in this thread library."));
766 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
767 if (objfile_is_library)
768 error (_("Cannot find shared library `%s' in dynamic"
769 " linker's load module list"), objfile_name (objfile));
771 error (_("Cannot find executable file `%s' in dynamic"
772 " linker's load module list"), objfile_name (objfile));
774 case TLS_NOT_ALLOCATED_YET_ERROR:
775 if (objfile_is_library)
776 error (_("The inferior has not yet allocated storage for"
777 " thread-local variables in\n"
778 "the shared library `%s'\n"
780 objfile_name (objfile), target_pid_to_str (ptid));
782 error (_("The inferior has not yet allocated storage for"
783 " thread-local variables in\n"
784 "the executable `%s'\n"
786 objfile_name (objfile), target_pid_to_str (ptid));
788 case TLS_GENERIC_ERROR:
789 if (objfile_is_library)
790 error (_("Cannot find thread-local storage for %s, "
791 "shared library %s:\n%s"),
792 target_pid_to_str (ptid),
793 objfile_name (objfile), ex.message);
795 error (_("Cannot find thread-local storage for %s, "
796 "executable file %s:\n%s"),
797 target_pid_to_str (ptid),
798 objfile_name (objfile), ex.message);
801 throw_exception (ex);
806 /* It wouldn't be wrong here to try a gdbarch method, too; finding
807 TLS is an ABI-specific thing. But we don't do that yet. */
809 error (_("Cannot find thread-local variables on this target"));
815 target_xfer_status_to_string (enum target_xfer_status status)
817 #define CASE(X) case X: return #X
820 CASE(TARGET_XFER_E_IO);
821 CASE(TARGET_XFER_UNAVAILABLE);
830 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
832 /* target_read_string -- read a null terminated string, up to LEN bytes,
833 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
834 Set *STRING to a pointer to malloc'd memory containing the data; the caller
835 is responsible for freeing it. Return the number of bytes successfully
839 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
845 int buffer_allocated;
847 unsigned int nbytes_read = 0;
851 /* Small for testing. */
852 buffer_allocated = 4;
853 buffer = xmalloc (buffer_allocated);
858 tlen = MIN (len, 4 - (memaddr & 3));
859 offset = memaddr & 3;
861 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
864 /* The transfer request might have crossed the boundary to an
865 unallocated region of memory. Retry the transfer, requesting
869 errcode = target_read_memory (memaddr, buf, 1);
874 if (bufptr - buffer + tlen > buffer_allocated)
878 bytes = bufptr - buffer;
879 buffer_allocated *= 2;
880 buffer = xrealloc (buffer, buffer_allocated);
881 bufptr = buffer + bytes;
884 for (i = 0; i < tlen; i++)
886 *bufptr++ = buf[i + offset];
887 if (buf[i + offset] == '\000')
889 nbytes_read += i + 1;
905 struct target_section_table *
906 target_get_section_table (struct target_ops *target)
908 return (*target->to_get_section_table) (target);
911 /* Find a section containing ADDR. */
913 struct target_section *
914 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
916 struct target_section_table *table = target_get_section_table (target);
917 struct target_section *secp;
922 for (secp = table->sections; secp < table->sections_end; secp++)
924 if (addr >= secp->addr && addr < secp->endaddr)
930 /* Read memory from more than one valid target. A core file, for
931 instance, could have some of memory but delegate other bits to
932 the target below it. So, we must manually try all targets. */
934 static enum target_xfer_status
935 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
936 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
937 ULONGEST *xfered_len)
939 enum target_xfer_status res;
943 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
944 readbuf, writebuf, memaddr, len,
946 if (res == TARGET_XFER_OK)
949 /* Stop if the target reports that the memory is not available. */
950 if (res == TARGET_XFER_UNAVAILABLE)
953 /* We want to continue past core files to executables, but not
954 past a running target's memory. */
955 if (ops->to_has_all_memory (ops))
962 /* The cache works at the raw memory level. Make sure the cache
963 gets updated with raw contents no matter what kind of memory
964 object was originally being written. Note we do write-through
965 first, so that if it fails, we don't write to the cache contents
966 that never made it to the target. */
968 && !ptid_equal (inferior_ptid, null_ptid)
969 && target_dcache_init_p ()
970 && (stack_cache_enabled_p () || code_cache_enabled_p ()))
972 DCACHE *dcache = target_dcache_get ();
974 /* Note that writing to an area of memory which wasn't present
975 in the cache doesn't cause it to be loaded in. */
976 dcache_update (dcache, res, memaddr, writebuf, *xfered_len);
982 /* Perform a partial memory transfer.
983 For docs see target.h, to_xfer_partial. */
985 static enum target_xfer_status
986 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
987 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
988 ULONGEST len, ULONGEST *xfered_len)
990 enum target_xfer_status res;
992 struct mem_region *region;
993 struct inferior *inf;
995 /* For accesses to unmapped overlay sections, read directly from
996 files. Must do this first, as MEMADDR may need adjustment. */
997 if (readbuf != NULL && overlay_debugging)
999 struct obj_section *section = find_pc_overlay (memaddr);
1001 if (pc_in_unmapped_range (memaddr, section))
1003 struct target_section_table *table
1004 = target_get_section_table (ops);
1005 const char *section_name = section->the_bfd_section->name;
1007 memaddr = overlay_mapped_address (memaddr, section);
1008 return section_table_xfer_memory_partial (readbuf, writebuf,
1009 memaddr, len, xfered_len,
1011 table->sections_end,
1016 /* Try the executable files, if "trust-readonly-sections" is set. */
1017 if (readbuf != NULL && trust_readonly)
1019 struct target_section *secp;
1020 struct target_section_table *table;
1022 secp = target_section_by_addr (ops, memaddr);
1024 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1025 secp->the_bfd_section)
1028 table = target_get_section_table (ops);
1029 return section_table_xfer_memory_partial (readbuf, writebuf,
1030 memaddr, len, xfered_len,
1032 table->sections_end,
1037 /* Try GDB's internal data cache. */
1038 region = lookup_mem_region (memaddr);
1039 /* region->hi == 0 means there's no upper bound. */
1040 if (memaddr + len < region->hi || region->hi == 0)
1043 reg_len = region->hi - memaddr;
1045 switch (region->attrib.mode)
1048 if (writebuf != NULL)
1049 return TARGET_XFER_E_IO;
1053 if (readbuf != NULL)
1054 return TARGET_XFER_E_IO;
1058 /* We only support writing to flash during "load" for now. */
1059 if (writebuf != NULL)
1060 error (_("Writing to flash memory forbidden in this context"));
1064 return TARGET_XFER_E_IO;
1067 if (!ptid_equal (inferior_ptid, null_ptid))
1068 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1074 /* The dcache reads whole cache lines; that doesn't play well
1075 with reading from a trace buffer, because reading outside of
1076 the collected memory range fails. */
1077 && get_traceframe_number () == -1
1078 && (region->attrib.cache
1079 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1080 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1082 DCACHE *dcache = target_dcache_get_or_init ();
1084 return dcache_read_memory_partial (ops, dcache, memaddr, readbuf,
1085 reg_len, xfered_len);
1088 /* If none of those methods found the memory we wanted, fall back
1089 to a target partial transfer. Normally a single call to
1090 to_xfer_partial is enough; if it doesn't recognize an object
1091 it will call the to_xfer_partial of the next target down.
1092 But for memory this won't do. Memory is the only target
1093 object which can be read from more than one valid target.
1094 A core file, for instance, could have some of memory but
1095 delegate other bits to the target below it. So, we must
1096 manually try all targets. */
1098 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1101 /* If we still haven't got anything, return the last error. We
1106 /* Perform a partial memory transfer. For docs see target.h,
1109 static enum target_xfer_status
1110 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1111 gdb_byte *readbuf, const gdb_byte *writebuf,
1112 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1114 enum target_xfer_status res;
1116 /* Zero length requests are ok and require no work. */
1118 return TARGET_XFER_EOF;
1120 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1121 breakpoint insns, thus hiding out from higher layers whether
1122 there are software breakpoints inserted in the code stream. */
1123 if (readbuf != NULL)
1125 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1128 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1129 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, *xfered_len);
1134 struct cleanup *old_chain;
1136 /* A large write request is likely to be partially satisfied
1137 by memory_xfer_partial_1. We will continually malloc
1138 and free a copy of the entire write request for breakpoint
1139 shadow handling even though we only end up writing a small
1140 subset of it. Cap writes to 4KB to mitigate this. */
1141 len = min (4096, len);
1143 buf = xmalloc (len);
1144 old_chain = make_cleanup (xfree, buf);
1145 memcpy (buf, writebuf, len);
1147 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1148 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1151 do_cleanups (old_chain);
1158 restore_show_memory_breakpoints (void *arg)
1160 show_memory_breakpoints = (uintptr_t) arg;
1164 make_show_memory_breakpoints_cleanup (int show)
1166 int current = show_memory_breakpoints;
1168 show_memory_breakpoints = show;
1169 return make_cleanup (restore_show_memory_breakpoints,
1170 (void *) (uintptr_t) current);
1173 /* For docs see target.h, to_xfer_partial. */
1175 enum target_xfer_status
1176 target_xfer_partial (struct target_ops *ops,
1177 enum target_object object, const char *annex,
1178 gdb_byte *readbuf, const gdb_byte *writebuf,
1179 ULONGEST offset, ULONGEST len,
1180 ULONGEST *xfered_len)
1182 enum target_xfer_status retval;
1184 gdb_assert (ops->to_xfer_partial != NULL);
1186 /* Transfer is done when LEN is zero. */
1188 return TARGET_XFER_EOF;
1190 if (writebuf && !may_write_memory)
1191 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1192 core_addr_to_string_nz (offset), plongest (len));
1196 /* If this is a memory transfer, let the memory-specific code
1197 have a look at it instead. Memory transfers are more
1199 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1200 || object == TARGET_OBJECT_CODE_MEMORY)
1201 retval = memory_xfer_partial (ops, object, readbuf,
1202 writebuf, offset, len, xfered_len);
1203 else if (object == TARGET_OBJECT_RAW_MEMORY)
1205 /* Request the normal memory object from other layers. */
1206 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1210 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1211 writebuf, offset, len, xfered_len);
1215 const unsigned char *myaddr = NULL;
1217 fprintf_unfiltered (gdb_stdlog,
1218 "%s:target_xfer_partial "
1219 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1222 (annex ? annex : "(null)"),
1223 host_address_to_string (readbuf),
1224 host_address_to_string (writebuf),
1225 core_addr_to_string_nz (offset),
1226 pulongest (len), retval,
1227 pulongest (*xfered_len));
1233 if (retval == TARGET_XFER_OK && myaddr != NULL)
1237 fputs_unfiltered (", bytes =", gdb_stdlog);
1238 for (i = 0; i < *xfered_len; i++)
1240 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1242 if (targetdebug < 2 && i > 0)
1244 fprintf_unfiltered (gdb_stdlog, " ...");
1247 fprintf_unfiltered (gdb_stdlog, "\n");
1250 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1254 fputc_unfiltered ('\n', gdb_stdlog);
1257 /* Check implementations of to_xfer_partial update *XFERED_LEN
1258 properly. Do assertion after printing debug messages, so that we
1259 can find more clues on assertion failure from debugging messages. */
1260 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_UNAVAILABLE)
1261 gdb_assert (*xfered_len > 0);
1266 /* Read LEN bytes of target memory at address MEMADDR, placing the
1267 results in GDB's memory at MYADDR. Returns either 0 for success or
1268 TARGET_XFER_E_IO if any error occurs.
1270 If an error occurs, no guarantee is made about the contents of the data at
1271 MYADDR. In particular, the caller should not depend upon partial reads
1272 filling the buffer with good data. There is no way for the caller to know
1273 how much good data might have been transfered anyway. Callers that can
1274 deal with partial reads should call target_read (which will retry until
1275 it makes no progress, and then return how much was transferred). */
1278 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1280 /* Dispatch to the topmost target, not the flattened current_target.
1281 Memory accesses check target->to_has_(all_)memory, and the
1282 flattened target doesn't inherit those. */
1283 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1284 myaddr, memaddr, len) == len)
1287 return TARGET_XFER_E_IO;
1290 /* Like target_read_memory, but specify explicitly that this is a read
1291 from the target's raw memory. That is, this read bypasses the
1292 dcache, breakpoint shadowing, etc. */
1295 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1297 /* See comment in target_read_memory about why the request starts at
1298 current_target.beneath. */
1299 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1300 myaddr, memaddr, len) == len)
1303 return TARGET_XFER_E_IO;
1306 /* Like target_read_memory, but specify explicitly that this is a read from
1307 the target's stack. This may trigger different cache behavior. */
1310 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1312 /* See comment in target_read_memory about why the request starts at
1313 current_target.beneath. */
1314 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1315 myaddr, memaddr, len) == len)
1318 return TARGET_XFER_E_IO;
1321 /* Like target_read_memory, but specify explicitly that this is a read from
1322 the target's code. This may trigger different cache behavior. */
1325 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1327 /* See comment in target_read_memory about why the request starts at
1328 current_target.beneath. */
1329 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1330 myaddr, memaddr, len) == len)
1333 return TARGET_XFER_E_IO;
1336 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1337 Returns either 0 for success or TARGET_XFER_E_IO if any
1338 error occurs. If an error occurs, no guarantee is made about how
1339 much data got written. Callers that can deal with partial writes
1340 should call target_write. */
1343 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1345 /* See comment in target_read_memory about why the request starts at
1346 current_target.beneath. */
1347 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1348 myaddr, memaddr, len) == len)
1351 return TARGET_XFER_E_IO;
1354 /* Write LEN bytes from MYADDR to target raw memory at address
1355 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1356 if any error occurs. If an error occurs, no guarantee is made
1357 about how much data got written. Callers that can deal with
1358 partial writes should call target_write. */
1361 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1363 /* See comment in target_read_memory about why the request starts at
1364 current_target.beneath. */
1365 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1366 myaddr, memaddr, len) == len)
1369 return TARGET_XFER_E_IO;
1372 /* Fetch the target's memory map. */
1375 target_memory_map (void)
1377 VEC(mem_region_s) *result;
1378 struct mem_region *last_one, *this_one;
1380 struct target_ops *t;
1382 result = current_target.to_memory_map (¤t_target);
1386 qsort (VEC_address (mem_region_s, result),
1387 VEC_length (mem_region_s, result),
1388 sizeof (struct mem_region), mem_region_cmp);
1390 /* Check that regions do not overlap. Simultaneously assign
1391 a numbering for the "mem" commands to use to refer to
1394 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1396 this_one->number = ix;
1398 if (last_one && last_one->hi > this_one->lo)
1400 warning (_("Overlapping regions in memory map: ignoring"));
1401 VEC_free (mem_region_s, result);
1404 last_one = this_one;
1411 target_flash_erase (ULONGEST address, LONGEST length)
1413 current_target.to_flash_erase (¤t_target, address, length);
1417 target_flash_done (void)
1419 current_target.to_flash_done (¤t_target);
1423 show_trust_readonly (struct ui_file *file, int from_tty,
1424 struct cmd_list_element *c, const char *value)
1426 fprintf_filtered (file,
1427 _("Mode for reading from readonly sections is %s.\n"),
1431 /* Target vector read/write partial wrapper functions. */
1433 static enum target_xfer_status
1434 target_read_partial (struct target_ops *ops,
1435 enum target_object object,
1436 const char *annex, gdb_byte *buf,
1437 ULONGEST offset, ULONGEST len,
1438 ULONGEST *xfered_len)
1440 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1444 static enum target_xfer_status
1445 target_write_partial (struct target_ops *ops,
1446 enum target_object object,
1447 const char *annex, const gdb_byte *buf,
1448 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1450 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1454 /* Wrappers to perform the full transfer. */
1456 /* For docs on target_read see target.h. */
1459 target_read (struct target_ops *ops,
1460 enum target_object object,
1461 const char *annex, gdb_byte *buf,
1462 ULONGEST offset, LONGEST len)
1466 while (xfered < len)
1468 ULONGEST xfered_len;
1469 enum target_xfer_status status;
1471 status = target_read_partial (ops, object, annex,
1472 (gdb_byte *) buf + xfered,
1473 offset + xfered, len - xfered,
1476 /* Call an observer, notifying them of the xfer progress? */
1477 if (status == TARGET_XFER_EOF)
1479 else if (status == TARGET_XFER_OK)
1481 xfered += xfered_len;
1491 /* Assuming that the entire [begin, end) range of memory cannot be
1492 read, try to read whatever subrange is possible to read.
1494 The function returns, in RESULT, either zero or one memory block.
1495 If there's a readable subrange at the beginning, it is completely
1496 read and returned. Any further readable subrange will not be read.
1497 Otherwise, if there's a readable subrange at the end, it will be
1498 completely read and returned. Any readable subranges before it
1499 (obviously, not starting at the beginning), will be ignored. In
1500 other cases -- either no readable subrange, or readable subrange(s)
1501 that is neither at the beginning, or end, nothing is returned.
1503 The purpose of this function is to handle a read across a boundary
1504 of accessible memory in a case when memory map is not available.
1505 The above restrictions are fine for this case, but will give
1506 incorrect results if the memory is 'patchy'. However, supporting
1507 'patchy' memory would require trying to read every single byte,
1508 and it seems unacceptable solution. Explicit memory map is
1509 recommended for this case -- and target_read_memory_robust will
1510 take care of reading multiple ranges then. */
1513 read_whatever_is_readable (struct target_ops *ops,
1514 ULONGEST begin, ULONGEST end,
1515 VEC(memory_read_result_s) **result)
1517 gdb_byte *buf = xmalloc (end - begin);
1518 ULONGEST current_begin = begin;
1519 ULONGEST current_end = end;
1521 memory_read_result_s r;
1522 ULONGEST xfered_len;
1524 /* If we previously failed to read 1 byte, nothing can be done here. */
1525 if (end - begin <= 1)
1531 /* Check that either first or the last byte is readable, and give up
1532 if not. This heuristic is meant to permit reading accessible memory
1533 at the boundary of accessible region. */
1534 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1535 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
1540 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1541 buf + (end-begin) - 1, end - 1, 1,
1542 &xfered_len) == TARGET_XFER_OK)
1553 /* Loop invariant is that the [current_begin, current_end) was previously
1554 found to be not readable as a whole.
1556 Note loop condition -- if the range has 1 byte, we can't divide the range
1557 so there's no point trying further. */
1558 while (current_end - current_begin > 1)
1560 ULONGEST first_half_begin, first_half_end;
1561 ULONGEST second_half_begin, second_half_end;
1563 ULONGEST middle = current_begin + (current_end - current_begin)/2;
1567 first_half_begin = current_begin;
1568 first_half_end = middle;
1569 second_half_begin = middle;
1570 second_half_end = current_end;
1574 first_half_begin = middle;
1575 first_half_end = current_end;
1576 second_half_begin = current_begin;
1577 second_half_end = middle;
1580 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
1581 buf + (first_half_begin - begin),
1583 first_half_end - first_half_begin);
1585 if (xfer == first_half_end - first_half_begin)
1587 /* This half reads up fine. So, the error must be in the
1589 current_begin = second_half_begin;
1590 current_end = second_half_end;
1594 /* This half is not readable. Because we've tried one byte, we
1595 know some part of this half if actually redable. Go to the next
1596 iteration to divide again and try to read.
1598 We don't handle the other half, because this function only tries
1599 to read a single readable subrange. */
1600 current_begin = first_half_begin;
1601 current_end = first_half_end;
1607 /* The [begin, current_begin) range has been read. */
1609 r.end = current_begin;
1614 /* The [current_end, end) range has been read. */
1615 LONGEST rlen = end - current_end;
1617 r.data = xmalloc (rlen);
1618 memcpy (r.data, buf + current_end - begin, rlen);
1619 r.begin = current_end;
1623 VEC_safe_push(memory_read_result_s, (*result), &r);
1627 free_memory_read_result_vector (void *x)
1629 VEC(memory_read_result_s) *v = x;
1630 memory_read_result_s *current;
1633 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
1635 xfree (current->data);
1637 VEC_free (memory_read_result_s, v);
1640 VEC(memory_read_result_s) *
1641 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
1643 VEC(memory_read_result_s) *result = 0;
1646 while (xfered < len)
1648 struct mem_region *region = lookup_mem_region (offset + xfered);
1651 /* If there is no explicit region, a fake one should be created. */
1652 gdb_assert (region);
1654 if (region->hi == 0)
1655 rlen = len - xfered;
1657 rlen = region->hi - offset;
1659 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
1661 /* Cannot read this region. Note that we can end up here only
1662 if the region is explicitly marked inaccessible, or
1663 'inaccessible-by-default' is in effect. */
1668 LONGEST to_read = min (len - xfered, rlen);
1669 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
1671 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
1672 (gdb_byte *) buffer,
1673 offset + xfered, to_read);
1674 /* Call an observer, notifying them of the xfer progress? */
1677 /* Got an error reading full chunk. See if maybe we can read
1680 read_whatever_is_readable (ops, offset + xfered,
1681 offset + xfered + to_read, &result);
1686 struct memory_read_result r;
1688 r.begin = offset + xfered;
1689 r.end = r.begin + xfer;
1690 VEC_safe_push (memory_read_result_s, result, &r);
1700 /* An alternative to target_write with progress callbacks. */
1703 target_write_with_progress (struct target_ops *ops,
1704 enum target_object object,
1705 const char *annex, const gdb_byte *buf,
1706 ULONGEST offset, LONGEST len,
1707 void (*progress) (ULONGEST, void *), void *baton)
1711 /* Give the progress callback a chance to set up. */
1713 (*progress) (0, baton);
1715 while (xfered < len)
1717 ULONGEST xfered_len;
1718 enum target_xfer_status status;
1720 status = target_write_partial (ops, object, annex,
1721 (gdb_byte *) buf + xfered,
1722 offset + xfered, len - xfered,
1725 if (status != TARGET_XFER_OK)
1726 return status == TARGET_XFER_EOF ? xfered : -1;
1729 (*progress) (xfered_len, baton);
1731 xfered += xfered_len;
1737 /* For docs on target_write see target.h. */
1740 target_write (struct target_ops *ops,
1741 enum target_object object,
1742 const char *annex, const gdb_byte *buf,
1743 ULONGEST offset, LONGEST len)
1745 return target_write_with_progress (ops, object, annex, buf, offset, len,
1749 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
1750 the size of the transferred data. PADDING additional bytes are
1751 available in *BUF_P. This is a helper function for
1752 target_read_alloc; see the declaration of that function for more
1756 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
1757 const char *annex, gdb_byte **buf_p, int padding)
1759 size_t buf_alloc, buf_pos;
1762 /* This function does not have a length parameter; it reads the
1763 entire OBJECT). Also, it doesn't support objects fetched partly
1764 from one target and partly from another (in a different stratum,
1765 e.g. a core file and an executable). Both reasons make it
1766 unsuitable for reading memory. */
1767 gdb_assert (object != TARGET_OBJECT_MEMORY);
1769 /* Start by reading up to 4K at a time. The target will throttle
1770 this number down if necessary. */
1772 buf = xmalloc (buf_alloc);
1776 ULONGEST xfered_len;
1777 enum target_xfer_status status;
1779 status = target_read_partial (ops, object, annex, &buf[buf_pos],
1780 buf_pos, buf_alloc - buf_pos - padding,
1783 if (status == TARGET_XFER_EOF)
1785 /* Read all there was. */
1792 else if (status != TARGET_XFER_OK)
1794 /* An error occurred. */
1796 return TARGET_XFER_E_IO;
1799 buf_pos += xfered_len;
1801 /* If the buffer is filling up, expand it. */
1802 if (buf_alloc < buf_pos * 2)
1805 buf = xrealloc (buf, buf_alloc);
1812 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
1813 the size of the transferred data. See the declaration in "target.h"
1814 function for more information about the return value. */
1817 target_read_alloc (struct target_ops *ops, enum target_object object,
1818 const char *annex, gdb_byte **buf_p)
1820 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
1823 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
1824 returned as a string, allocated using xmalloc. If an error occurs
1825 or the transfer is unsupported, NULL is returned. Empty objects
1826 are returned as allocated but empty strings. A warning is issued
1827 if the result contains any embedded NUL bytes. */
1830 target_read_stralloc (struct target_ops *ops, enum target_object object,
1835 LONGEST i, transferred;
1837 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
1838 bufstr = (char *) buffer;
1840 if (transferred < 0)
1843 if (transferred == 0)
1844 return xstrdup ("");
1846 bufstr[transferred] = 0;
1848 /* Check for embedded NUL bytes; but allow trailing NULs. */
1849 for (i = strlen (bufstr); i < transferred; i++)
1852 warning (_("target object %d, annex %s, "
1853 "contained unexpected null characters"),
1854 (int) object, annex ? annex : "(none)");
1861 /* Memory transfer methods. */
1864 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
1867 /* This method is used to read from an alternate, non-current
1868 target. This read must bypass the overlay support (as symbols
1869 don't match this target), and GDB's internal cache (wrong cache
1870 for this target). */
1871 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
1873 memory_error (TARGET_XFER_E_IO, addr);
1877 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
1878 int len, enum bfd_endian byte_order)
1880 gdb_byte buf[sizeof (ULONGEST)];
1882 gdb_assert (len <= sizeof (buf));
1883 get_target_memory (ops, addr, buf, len);
1884 return extract_unsigned_integer (buf, len, byte_order);
1890 target_insert_breakpoint (struct gdbarch *gdbarch,
1891 struct bp_target_info *bp_tgt)
1893 if (!may_insert_breakpoints)
1895 warning (_("May not insert breakpoints"));
1899 return current_target.to_insert_breakpoint (¤t_target,
1906 target_remove_breakpoint (struct gdbarch *gdbarch,
1907 struct bp_target_info *bp_tgt)
1909 /* This is kind of a weird case to handle, but the permission might
1910 have been changed after breakpoints were inserted - in which case
1911 we should just take the user literally and assume that any
1912 breakpoints should be left in place. */
1913 if (!may_insert_breakpoints)
1915 warning (_("May not remove breakpoints"));
1919 return current_target.to_remove_breakpoint (¤t_target,
1924 target_info (char *args, int from_tty)
1926 struct target_ops *t;
1927 int has_all_mem = 0;
1929 if (symfile_objfile != NULL)
1930 printf_unfiltered (_("Symbols from \"%s\".\n"),
1931 objfile_name (symfile_objfile));
1933 for (t = target_stack; t != NULL; t = t->beneath)
1935 if (!(*t->to_has_memory) (t))
1938 if ((int) (t->to_stratum) <= (int) dummy_stratum)
1941 printf_unfiltered (_("\tWhile running this, "
1942 "GDB does not access memory from...\n"));
1943 printf_unfiltered ("%s:\n", t->to_longname);
1944 (t->to_files_info) (t);
1945 has_all_mem = (*t->to_has_all_memory) (t);
1949 /* This function is called before any new inferior is created, e.g.
1950 by running a program, attaching, or connecting to a target.
1951 It cleans up any state from previous invocations which might
1952 change between runs. This is a subset of what target_preopen
1953 resets (things which might change between targets). */
1956 target_pre_inferior (int from_tty)
1958 /* Clear out solib state. Otherwise the solib state of the previous
1959 inferior might have survived and is entirely wrong for the new
1960 target. This has been observed on GNU/Linux using glibc 2.3. How
1972 Cannot access memory at address 0xdeadbeef
1975 /* In some OSs, the shared library list is the same/global/shared
1976 across inferiors. If code is shared between processes, so are
1977 memory regions and features. */
1978 if (!gdbarch_has_global_solist (target_gdbarch ()))
1980 no_shared_libraries (NULL, from_tty);
1982 invalidate_target_mem_regions ();
1984 target_clear_description ();
1987 agent_capability_invalidate ();
1990 /* Callback for iterate_over_inferiors. Gets rid of the given
1994 dispose_inferior (struct inferior *inf, void *args)
1996 struct thread_info *thread;
1998 thread = any_thread_of_process (inf->pid);
2001 switch_to_thread (thread->ptid);
2003 /* Core inferiors actually should be detached, not killed. */
2004 if (target_has_execution)
2007 target_detach (NULL, 0);
2013 /* This is to be called by the open routine before it does
2017 target_preopen (int from_tty)
2021 if (have_inferiors ())
2024 || !have_live_inferiors ()
2025 || query (_("A program is being debugged already. Kill it? ")))
2026 iterate_over_inferiors (dispose_inferior, NULL);
2028 error (_("Program not killed."));
2031 /* Calling target_kill may remove the target from the stack. But if
2032 it doesn't (which seems like a win for UDI), remove it now. */
2033 /* Leave the exec target, though. The user may be switching from a
2034 live process to a core of the same program. */
2035 pop_all_targets_above (file_stratum);
2037 target_pre_inferior (from_tty);
2040 /* Detach a target after doing deferred register stores. */
2043 target_detach (const char *args, int from_tty)
2045 struct target_ops* t;
2047 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2048 /* Don't remove global breakpoints here. They're removed on
2049 disconnection from the target. */
2052 /* If we're in breakpoints-always-inserted mode, have to remove
2053 them before detaching. */
2054 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2056 prepare_for_detach ();
2058 current_target.to_detach (¤t_target, args, from_tty);
2062 target_disconnect (const char *args, int from_tty)
2064 /* If we're in breakpoints-always-inserted mode or if breakpoints
2065 are global across processes, we have to remove them before
2067 remove_breakpoints ();
2069 current_target.to_disconnect (¤t_target, args, from_tty);
2073 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2075 return (current_target.to_wait) (¤t_target, ptid, status, options);
2079 target_pid_to_str (ptid_t ptid)
2081 return (*current_target.to_pid_to_str) (¤t_target, ptid);
2085 target_thread_name (struct thread_info *info)
2087 return current_target.to_thread_name (¤t_target, info);
2091 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2093 struct target_ops *t;
2095 target_dcache_invalidate ();
2097 current_target.to_resume (¤t_target, ptid, step, signal);
2099 registers_changed_ptid (ptid);
2100 /* We only set the internal executing state here. The user/frontend
2101 running state is set at a higher level. */
2102 set_executing (ptid, 1);
2103 clear_inline_frame_state (ptid);
2107 target_pass_signals (int numsigs, unsigned char *pass_signals)
2109 (*current_target.to_pass_signals) (¤t_target, numsigs, pass_signals);
2113 target_program_signals (int numsigs, unsigned char *program_signals)
2115 (*current_target.to_program_signals) (¤t_target,
2116 numsigs, program_signals);
2120 default_follow_fork (struct target_ops *self, int follow_child,
2123 /* Some target returned a fork event, but did not know how to follow it. */
2124 internal_error (__FILE__, __LINE__,
2125 _("could not find a target to follow fork"));
2128 /* Look through the list of possible targets for a target that can
2132 target_follow_fork (int follow_child, int detach_fork)
2134 return current_target.to_follow_fork (¤t_target,
2135 follow_child, detach_fork);
2139 default_mourn_inferior (struct target_ops *self)
2141 internal_error (__FILE__, __LINE__,
2142 _("could not find a target to follow mourn inferior"));
2146 target_mourn_inferior (void)
2148 current_target.to_mourn_inferior (¤t_target);
2150 /* We no longer need to keep handles on any of the object files.
2151 Make sure to release them to avoid unnecessarily locking any
2152 of them while we're not actually debugging. */
2153 bfd_cache_close_all ();
2156 /* Look for a target which can describe architectural features, starting
2157 from TARGET. If we find one, return its description. */
2159 const struct target_desc *
2160 target_read_description (struct target_ops *target)
2162 return target->to_read_description (target);
2165 /* This implements a basic search of memory, reading target memory and
2166 performing the search here (as opposed to performing the search in on the
2167 target side with, for example, gdbserver). */
2170 simple_search_memory (struct target_ops *ops,
2171 CORE_ADDR start_addr, ULONGEST search_space_len,
2172 const gdb_byte *pattern, ULONGEST pattern_len,
2173 CORE_ADDR *found_addrp)
2175 /* NOTE: also defined in find.c testcase. */
2176 #define SEARCH_CHUNK_SIZE 16000
2177 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2178 /* Buffer to hold memory contents for searching. */
2179 gdb_byte *search_buf;
2180 unsigned search_buf_size;
2181 struct cleanup *old_cleanups;
2183 search_buf_size = chunk_size + pattern_len - 1;
2185 /* No point in trying to allocate a buffer larger than the search space. */
2186 if (search_space_len < search_buf_size)
2187 search_buf_size = search_space_len;
2189 search_buf = malloc (search_buf_size);
2190 if (search_buf == NULL)
2191 error (_("Unable to allocate memory to perform the search."));
2192 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2194 /* Prime the search buffer. */
2196 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2197 search_buf, start_addr, search_buf_size) != search_buf_size)
2199 warning (_("Unable to access %s bytes of target "
2200 "memory at %s, halting search."),
2201 pulongest (search_buf_size), hex_string (start_addr));
2202 do_cleanups (old_cleanups);
2206 /* Perform the search.
2208 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2209 When we've scanned N bytes we copy the trailing bytes to the start and
2210 read in another N bytes. */
2212 while (search_space_len >= pattern_len)
2214 gdb_byte *found_ptr;
2215 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2217 found_ptr = memmem (search_buf, nr_search_bytes,
2218 pattern, pattern_len);
2220 if (found_ptr != NULL)
2222 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2224 *found_addrp = found_addr;
2225 do_cleanups (old_cleanups);
2229 /* Not found in this chunk, skip to next chunk. */
2231 /* Don't let search_space_len wrap here, it's unsigned. */
2232 if (search_space_len >= chunk_size)
2233 search_space_len -= chunk_size;
2235 search_space_len = 0;
2237 if (search_space_len >= pattern_len)
2239 unsigned keep_len = search_buf_size - chunk_size;
2240 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2243 /* Copy the trailing part of the previous iteration to the front
2244 of the buffer for the next iteration. */
2245 gdb_assert (keep_len == pattern_len - 1);
2246 memcpy (search_buf, search_buf + chunk_size, keep_len);
2248 nr_to_read = min (search_space_len - keep_len, chunk_size);
2250 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2251 search_buf + keep_len, read_addr,
2252 nr_to_read) != nr_to_read)
2254 warning (_("Unable to access %s bytes of target "
2255 "memory at %s, halting search."),
2256 plongest (nr_to_read),
2257 hex_string (read_addr));
2258 do_cleanups (old_cleanups);
2262 start_addr += chunk_size;
2268 do_cleanups (old_cleanups);
2272 /* Default implementation of memory-searching. */
2275 default_search_memory (struct target_ops *self,
2276 CORE_ADDR start_addr, ULONGEST search_space_len,
2277 const gdb_byte *pattern, ULONGEST pattern_len,
2278 CORE_ADDR *found_addrp)
2280 /* Start over from the top of the target stack. */
2281 return simple_search_memory (current_target.beneath,
2282 start_addr, search_space_len,
2283 pattern, pattern_len, found_addrp);
2286 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2287 sequence of bytes in PATTERN with length PATTERN_LEN.
2289 The result is 1 if found, 0 if not found, and -1 if there was an error
2290 requiring halting of the search (e.g. memory read error).
2291 If the pattern is found the address is recorded in FOUND_ADDRP. */
2294 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2295 const gdb_byte *pattern, ULONGEST pattern_len,
2296 CORE_ADDR *found_addrp)
2298 return current_target.to_search_memory (¤t_target, start_addr,
2300 pattern, pattern_len, found_addrp);
2303 /* Look through the currently pushed targets. If none of them will
2304 be able to restart the currently running process, issue an error
2308 target_require_runnable (void)
2310 struct target_ops *t;
2312 for (t = target_stack; t != NULL; t = t->beneath)
2314 /* If this target knows how to create a new program, then
2315 assume we will still be able to after killing the current
2316 one. Either killing and mourning will not pop T, or else
2317 find_default_run_target will find it again. */
2318 if (t->to_create_inferior != NULL)
2321 /* Do not worry about targets at certain strata that can not
2322 create inferiors. Assume they will be pushed again if
2323 necessary, and continue to the process_stratum. */
2324 if (t->to_stratum == thread_stratum
2325 || t->to_stratum == record_stratum
2326 || t->to_stratum == arch_stratum)
2329 error (_("The \"%s\" target does not support \"run\". "
2330 "Try \"help target\" or \"continue\"."),
2334 /* This function is only called if the target is running. In that
2335 case there should have been a process_stratum target and it
2336 should either know how to create inferiors, or not... */
2337 internal_error (__FILE__, __LINE__, _("No targets found"));
2340 /* Whether GDB is allowed to fall back to the default run target for
2341 "run", "attach", etc. when no target is connected yet. */
2342 static int auto_connect_native_target = 1;
2345 show_auto_connect_native_target (struct ui_file *file, int from_tty,
2346 struct cmd_list_element *c, const char *value)
2348 fprintf_filtered (file,
2349 _("Whether GDB may automatically connect to the "
2350 "native target is %s.\n"),
2354 /* Look through the list of possible targets for a target that can
2355 execute a run or attach command without any other data. This is
2356 used to locate the default process stratum.
2358 If DO_MESG is not NULL, the result is always valid (error() is
2359 called for errors); else, return NULL on error. */
2361 static struct target_ops *
2362 find_default_run_target (char *do_mesg)
2364 struct target_ops *runable = NULL;
2366 if (auto_connect_native_target)
2368 struct target_ops **t;
2371 for (t = target_structs; t < target_structs + target_struct_size;
2374 if ((*t)->to_can_run != delegate_can_run && target_can_run (*t))
2385 if (runable == NULL)
2388 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2399 find_attach_target (void)
2401 struct target_ops *t;
2403 /* If a target on the current stack can attach, use it. */
2404 for (t = current_target.beneath; t != NULL; t = t->beneath)
2406 if (t->to_attach != NULL)
2410 /* Otherwise, use the default run target for attaching. */
2412 t = find_default_run_target ("attach");
2420 find_run_target (void)
2422 struct target_ops *t;
2424 /* If a target on the current stack can attach, use it. */
2425 for (t = current_target.beneath; t != NULL; t = t->beneath)
2427 if (t->to_create_inferior != NULL)
2431 /* Otherwise, use the default run target. */
2433 t = find_default_run_target ("run");
2438 /* Implement the "info proc" command. */
2441 target_info_proc (const char *args, enum info_proc_what what)
2443 struct target_ops *t;
2445 /* If we're already connected to something that can get us OS
2446 related data, use it. Otherwise, try using the native
2448 if (current_target.to_stratum >= process_stratum)
2449 t = current_target.beneath;
2451 t = find_default_run_target (NULL);
2453 for (; t != NULL; t = t->beneath)
2455 if (t->to_info_proc != NULL)
2457 t->to_info_proc (t, args, what);
2460 fprintf_unfiltered (gdb_stdlog,
2461 "target_info_proc (\"%s\", %d)\n", args, what);
2471 find_default_supports_disable_randomization (struct target_ops *self)
2473 struct target_ops *t;
2475 t = find_default_run_target (NULL);
2476 if (t && t->to_supports_disable_randomization)
2477 return (t->to_supports_disable_randomization) (t);
2482 target_supports_disable_randomization (void)
2484 struct target_ops *t;
2486 for (t = ¤t_target; t != NULL; t = t->beneath)
2487 if (t->to_supports_disable_randomization)
2488 return t->to_supports_disable_randomization (t);
2494 target_get_osdata (const char *type)
2496 struct target_ops *t;
2498 /* If we're already connected to something that can get us OS
2499 related data, use it. Otherwise, try using the native
2501 if (current_target.to_stratum >= process_stratum)
2502 t = current_target.beneath;
2504 t = find_default_run_target ("get OS data");
2509 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
2512 static struct address_space *
2513 default_thread_address_space (struct target_ops *self, ptid_t ptid)
2515 struct inferior *inf;
2517 /* Fall-back to the "main" address space of the inferior. */
2518 inf = find_inferior_pid (ptid_get_pid (ptid));
2520 if (inf == NULL || inf->aspace == NULL)
2521 internal_error (__FILE__, __LINE__,
2522 _("Can't determine the current "
2523 "address space of thread %s\n"),
2524 target_pid_to_str (ptid));
2529 /* Determine the current address space of thread PTID. */
2531 struct address_space *
2532 target_thread_address_space (ptid_t ptid)
2534 struct address_space *aspace;
2536 aspace = current_target.to_thread_address_space (¤t_target, ptid);
2537 gdb_assert (aspace != NULL);
2543 /* Target file operations. */
2545 static struct target_ops *
2546 default_fileio_target (void)
2548 /* If we're already connected to something that can perform
2549 file I/O, use it. Otherwise, try using the native target. */
2550 if (current_target.to_stratum >= process_stratum)
2551 return current_target.beneath;
2553 return find_default_run_target ("file I/O");
2556 /* Open FILENAME on the target, using FLAGS and MODE. Return a
2557 target file descriptor, or -1 if an error occurs (and set
2560 target_fileio_open (const char *filename, int flags, int mode,
2563 struct target_ops *t;
2565 for (t = default_fileio_target (); t != NULL; t = t->beneath)
2567 if (t->to_fileio_open != NULL)
2569 int fd = t->to_fileio_open (t, filename, flags, mode, target_errno);
2572 fprintf_unfiltered (gdb_stdlog,
2573 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
2574 filename, flags, mode,
2575 fd, fd != -1 ? 0 : *target_errno);
2580 *target_errno = FILEIO_ENOSYS;
2584 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
2585 Return the number of bytes written, or -1 if an error occurs
2586 (and set *TARGET_ERRNO). */
2588 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
2589 ULONGEST offset, int *target_errno)
2591 struct target_ops *t;
2593 for (t = default_fileio_target (); t != NULL; t = t->beneath)
2595 if (t->to_fileio_pwrite != NULL)
2597 int ret = t->to_fileio_pwrite (t, fd, write_buf, len, offset,
2601 fprintf_unfiltered (gdb_stdlog,
2602 "target_fileio_pwrite (%d,...,%d,%s) "
2604 fd, len, pulongest (offset),
2605 ret, ret != -1 ? 0 : *target_errno);
2610 *target_errno = FILEIO_ENOSYS;
2614 /* Read up to LEN bytes FD on the target into READ_BUF.
2615 Return the number of bytes read, or -1 if an error occurs
2616 (and set *TARGET_ERRNO). */
2618 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
2619 ULONGEST offset, int *target_errno)
2621 struct target_ops *t;
2623 for (t = default_fileio_target (); t != NULL; t = t->beneath)
2625 if (t->to_fileio_pread != NULL)
2627 int ret = t->to_fileio_pread (t, fd, read_buf, len, offset,
2631 fprintf_unfiltered (gdb_stdlog,
2632 "target_fileio_pread (%d,...,%d,%s) "
2634 fd, len, pulongest (offset),
2635 ret, ret != -1 ? 0 : *target_errno);
2640 *target_errno = FILEIO_ENOSYS;
2644 /* Close FD on the target. Return 0, or -1 if an error occurs
2645 (and set *TARGET_ERRNO). */
2647 target_fileio_close (int fd, int *target_errno)
2649 struct target_ops *t;
2651 for (t = default_fileio_target (); t != NULL; t = t->beneath)
2653 if (t->to_fileio_close != NULL)
2655 int ret = t->to_fileio_close (t, fd, target_errno);
2658 fprintf_unfiltered (gdb_stdlog,
2659 "target_fileio_close (%d) = %d (%d)\n",
2660 fd, ret, ret != -1 ? 0 : *target_errno);
2665 *target_errno = FILEIO_ENOSYS;
2669 /* Unlink FILENAME on the target. Return 0, or -1 if an error
2670 occurs (and set *TARGET_ERRNO). */
2672 target_fileio_unlink (const char *filename, int *target_errno)
2674 struct target_ops *t;
2676 for (t = default_fileio_target (); t != NULL; t = t->beneath)
2678 if (t->to_fileio_unlink != NULL)
2680 int ret = t->to_fileio_unlink (t, filename, target_errno);
2683 fprintf_unfiltered (gdb_stdlog,
2684 "target_fileio_unlink (%s) = %d (%d)\n",
2685 filename, ret, ret != -1 ? 0 : *target_errno);
2690 *target_errno = FILEIO_ENOSYS;
2694 /* Read value of symbolic link FILENAME on the target. Return a
2695 null-terminated string allocated via xmalloc, or NULL if an error
2696 occurs (and set *TARGET_ERRNO). */
2698 target_fileio_readlink (const char *filename, int *target_errno)
2700 struct target_ops *t;
2702 for (t = default_fileio_target (); t != NULL; t = t->beneath)
2704 if (t->to_fileio_readlink != NULL)
2706 char *ret = t->to_fileio_readlink (t, filename, target_errno);
2709 fprintf_unfiltered (gdb_stdlog,
2710 "target_fileio_readlink (%s) = %s (%d)\n",
2711 filename, ret? ret : "(nil)",
2712 ret? 0 : *target_errno);
2717 *target_errno = FILEIO_ENOSYS;
2722 target_fileio_close_cleanup (void *opaque)
2724 int fd = *(int *) opaque;
2727 target_fileio_close (fd, &target_errno);
2730 /* Read target file FILENAME. Store the result in *BUF_P and
2731 return the size of the transferred data. PADDING additional bytes are
2732 available in *BUF_P. This is a helper function for
2733 target_fileio_read_alloc; see the declaration of that function for more
2737 target_fileio_read_alloc_1 (const char *filename,
2738 gdb_byte **buf_p, int padding)
2740 struct cleanup *close_cleanup;
2741 size_t buf_alloc, buf_pos;
2747 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
2751 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
2753 /* Start by reading up to 4K at a time. The target will throttle
2754 this number down if necessary. */
2756 buf = xmalloc (buf_alloc);
2760 n = target_fileio_pread (fd, &buf[buf_pos],
2761 buf_alloc - buf_pos - padding, buf_pos,
2765 /* An error occurred. */
2766 do_cleanups (close_cleanup);
2772 /* Read all there was. */
2773 do_cleanups (close_cleanup);
2783 /* If the buffer is filling up, expand it. */
2784 if (buf_alloc < buf_pos * 2)
2787 buf = xrealloc (buf, buf_alloc);
2794 /* Read target file FILENAME. Store the result in *BUF_P and return
2795 the size of the transferred data. See the declaration in "target.h"
2796 function for more information about the return value. */
2799 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
2801 return target_fileio_read_alloc_1 (filename, buf_p, 0);
2804 /* Read target file FILENAME. The result is NUL-terminated and
2805 returned as a string, allocated using xmalloc. If an error occurs
2806 or the transfer is unsupported, NULL is returned. Empty objects
2807 are returned as allocated but empty strings. A warning is issued
2808 if the result contains any embedded NUL bytes. */
2811 target_fileio_read_stralloc (const char *filename)
2815 LONGEST i, transferred;
2817 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
2818 bufstr = (char *) buffer;
2820 if (transferred < 0)
2823 if (transferred == 0)
2824 return xstrdup ("");
2826 bufstr[transferred] = 0;
2828 /* Check for embedded NUL bytes; but allow trailing NULs. */
2829 for (i = strlen (bufstr); i < transferred; i++)
2832 warning (_("target file %s "
2833 "contained unexpected null characters"),
2843 default_region_ok_for_hw_watchpoint (struct target_ops *self,
2844 CORE_ADDR addr, int len)
2846 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
2850 default_watchpoint_addr_within_range (struct target_ops *target,
2852 CORE_ADDR start, int length)
2854 return addr >= start && addr < start + length;
2857 static struct gdbarch *
2858 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
2860 return target_gdbarch ();
2864 return_zero (struct target_ops *ignore)
2870 return_zero_has_execution (struct target_ops *ignore, ptid_t ignore2)
2876 * Find the next target down the stack from the specified target.
2880 find_target_beneath (struct target_ops *t)
2888 find_target_at (enum strata stratum)
2890 struct target_ops *t;
2892 for (t = current_target.beneath; t != NULL; t = t->beneath)
2893 if (t->to_stratum == stratum)
2900 /* The inferior process has died. Long live the inferior! */
2903 generic_mourn_inferior (void)
2907 ptid = inferior_ptid;
2908 inferior_ptid = null_ptid;
2910 /* Mark breakpoints uninserted in case something tries to delete a
2911 breakpoint while we delete the inferior's threads (which would
2912 fail, since the inferior is long gone). */
2913 mark_breakpoints_out ();
2915 if (!ptid_equal (ptid, null_ptid))
2917 int pid = ptid_get_pid (ptid);
2918 exit_inferior (pid);
2921 /* Note this wipes step-resume breakpoints, so needs to be done
2922 after exit_inferior, which ends up referencing the step-resume
2923 breakpoints through clear_thread_inferior_resources. */
2924 breakpoint_init_inferior (inf_exited);
2926 registers_changed ();
2928 reopen_exec_file ();
2929 reinit_frame_cache ();
2931 if (deprecated_detach_hook)
2932 deprecated_detach_hook ();
2935 /* Convert a normal process ID to a string. Returns the string in a
2939 normal_pid_to_str (ptid_t ptid)
2941 static char buf[32];
2943 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
2948 default_pid_to_str (struct target_ops *ops, ptid_t ptid)
2950 return normal_pid_to_str (ptid);
2953 /* Error-catcher for target_find_memory_regions. */
2955 dummy_find_memory_regions (struct target_ops *self,
2956 find_memory_region_ftype ignore1, void *ignore2)
2958 error (_("Command not implemented for this target."));
2962 /* Error-catcher for target_make_corefile_notes. */
2964 dummy_make_corefile_notes (struct target_ops *self,
2965 bfd *ignore1, int *ignore2)
2967 error (_("Command not implemented for this target."));
2971 /* Set up the handful of non-empty slots needed by the dummy target
2975 init_dummy_target (void)
2977 dummy_target.to_shortname = "None";
2978 dummy_target.to_longname = "None";
2979 dummy_target.to_doc = "";
2980 dummy_target.to_supports_disable_randomization
2981 = find_default_supports_disable_randomization;
2982 dummy_target.to_stratum = dummy_stratum;
2983 dummy_target.to_has_all_memory = return_zero;
2984 dummy_target.to_has_memory = return_zero;
2985 dummy_target.to_has_stack = return_zero;
2986 dummy_target.to_has_registers = return_zero;
2987 dummy_target.to_has_execution = return_zero_has_execution;
2988 dummy_target.to_magic = OPS_MAGIC;
2990 install_dummy_methods (&dummy_target);
2995 target_close (struct target_ops *targ)
2997 gdb_assert (!target_is_pushed (targ));
2999 if (targ->to_xclose != NULL)
3000 targ->to_xclose (targ);
3001 else if (targ->to_close != NULL)
3002 targ->to_close (targ);
3005 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3009 target_thread_alive (ptid_t ptid)
3011 return current_target.to_thread_alive (¤t_target, ptid);
3015 target_find_new_threads (void)
3017 current_target.to_find_new_threads (¤t_target);
3021 target_stop (ptid_t ptid)
3025 warning (_("May not interrupt or stop the target, ignoring attempt"));
3029 (*current_target.to_stop) (¤t_target, ptid);
3032 /* Concatenate ELEM to LIST, a comma separate list, and return the
3033 result. The LIST incoming argument is released. */
3036 str_comma_list_concat_elem (char *list, const char *elem)
3039 return xstrdup (elem);
3041 return reconcat (list, list, ", ", elem, (char *) NULL);
3044 /* Helper for target_options_to_string. If OPT is present in
3045 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3046 Returns the new resulting string. OPT is removed from
3050 do_option (int *target_options, char *ret,
3051 int opt, char *opt_str)
3053 if ((*target_options & opt) != 0)
3055 ret = str_comma_list_concat_elem (ret, opt_str);
3056 *target_options &= ~opt;
3063 target_options_to_string (int target_options)
3067 #define DO_TARG_OPTION(OPT) \
3068 ret = do_option (&target_options, ret, OPT, #OPT)
3070 DO_TARG_OPTION (TARGET_WNOHANG);
3072 if (target_options != 0)
3073 ret = str_comma_list_concat_elem (ret, "unknown???");
3081 debug_print_register (const char * func,
3082 struct regcache *regcache, int regno)
3084 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3086 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3087 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3088 && gdbarch_register_name (gdbarch, regno) != NULL
3089 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3090 fprintf_unfiltered (gdb_stdlog, "(%s)",
3091 gdbarch_register_name (gdbarch, regno));
3093 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3094 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3096 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3097 int i, size = register_size (gdbarch, regno);
3098 gdb_byte buf[MAX_REGISTER_SIZE];
3100 regcache_raw_collect (regcache, regno, buf);
3101 fprintf_unfiltered (gdb_stdlog, " = ");
3102 for (i = 0; i < size; i++)
3104 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3106 if (size <= sizeof (LONGEST))
3108 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3110 fprintf_unfiltered (gdb_stdlog, " %s %s",
3111 core_addr_to_string_nz (val), plongest (val));
3114 fprintf_unfiltered (gdb_stdlog, "\n");
3118 target_fetch_registers (struct regcache *regcache, int regno)
3120 current_target.to_fetch_registers (¤t_target, regcache, regno);
3122 debug_print_register ("target_fetch_registers", regcache, regno);
3126 target_store_registers (struct regcache *regcache, int regno)
3128 struct target_ops *t;
3130 if (!may_write_registers)
3131 error (_("Writing to registers is not allowed (regno %d)"), regno);
3133 current_target.to_store_registers (¤t_target, regcache, regno);
3136 debug_print_register ("target_store_registers", regcache, regno);
3141 target_core_of_thread (ptid_t ptid)
3143 return current_target.to_core_of_thread (¤t_target, ptid);
3147 simple_verify_memory (struct target_ops *ops,
3148 const gdb_byte *data, CORE_ADDR lma, ULONGEST size)
3150 LONGEST total_xfered = 0;
3152 while (total_xfered < size)
3154 ULONGEST xfered_len;
3155 enum target_xfer_status status;
3157 ULONGEST howmuch = min (sizeof (buf), size - total_xfered);
3159 status = target_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
3160 buf, NULL, lma + total_xfered, howmuch,
3162 if (status == TARGET_XFER_OK
3163 && memcmp (data + total_xfered, buf, xfered_len) == 0)
3165 total_xfered += xfered_len;
3174 /* Default implementation of memory verification. */
3177 default_verify_memory (struct target_ops *self,
3178 const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3180 /* Start over from the top of the target stack. */
3181 return simple_verify_memory (current_target.beneath,
3182 data, memaddr, size);
3186 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3188 return current_target.to_verify_memory (¤t_target,
3189 data, memaddr, size);
3192 /* The documentation for this function is in its prototype declaration in
3196 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3198 return current_target.to_insert_mask_watchpoint (¤t_target,
3202 /* The documentation for this function is in its prototype declaration in
3206 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3208 return current_target.to_remove_mask_watchpoint (¤t_target,
3212 /* The documentation for this function is in its prototype declaration
3216 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
3218 return current_target.to_masked_watch_num_registers (¤t_target,
3222 /* The documentation for this function is in its prototype declaration
3226 target_ranged_break_num_registers (void)
3228 return current_target.to_ranged_break_num_registers (¤t_target);
3233 struct btrace_target_info *
3234 target_enable_btrace (ptid_t ptid)
3236 return current_target.to_enable_btrace (¤t_target, ptid);
3242 target_disable_btrace (struct btrace_target_info *btinfo)
3244 current_target.to_disable_btrace (¤t_target, btinfo);
3250 target_teardown_btrace (struct btrace_target_info *btinfo)
3252 current_target.to_teardown_btrace (¤t_target, btinfo);
3258 target_read_btrace (VEC (btrace_block_s) **btrace,
3259 struct btrace_target_info *btinfo,
3260 enum btrace_read_type type)
3262 return current_target.to_read_btrace (¤t_target, btrace, btinfo, type);
3268 target_stop_recording (void)
3270 current_target.to_stop_recording (¤t_target);
3276 target_save_record (const char *filename)
3278 current_target.to_save_record (¤t_target, filename);
3284 target_supports_delete_record (void)
3286 struct target_ops *t;
3288 for (t = current_target.beneath; t != NULL; t = t->beneath)
3289 if (t->to_delete_record != delegate_delete_record
3290 && t->to_delete_record != tdefault_delete_record)
3299 target_delete_record (void)
3301 current_target.to_delete_record (¤t_target);
3307 target_record_is_replaying (void)
3309 return current_target.to_record_is_replaying (¤t_target);
3315 target_goto_record_begin (void)
3317 current_target.to_goto_record_begin (¤t_target);
3323 target_goto_record_end (void)
3325 current_target.to_goto_record_end (¤t_target);
3331 target_goto_record (ULONGEST insn)
3333 current_target.to_goto_record (¤t_target, insn);
3339 target_insn_history (int size, int flags)
3341 current_target.to_insn_history (¤t_target, size, flags);
3347 target_insn_history_from (ULONGEST from, int size, int flags)
3349 current_target.to_insn_history_from (¤t_target, from, size, flags);
3355 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
3357 current_target.to_insn_history_range (¤t_target, begin, end, flags);
3363 target_call_history (int size, int flags)
3365 current_target.to_call_history (¤t_target, size, flags);
3371 target_call_history_from (ULONGEST begin, int size, int flags)
3373 current_target.to_call_history_from (¤t_target, begin, size, flags);
3379 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
3381 current_target.to_call_history_range (¤t_target, begin, end, flags);
3386 const struct frame_unwind *
3387 target_get_unwinder (void)
3389 return current_target.to_get_unwinder (¤t_target);
3394 const struct frame_unwind *
3395 target_get_tailcall_unwinder (void)
3397 return current_target.to_get_tailcall_unwinder (¤t_target);
3400 /* Default implementation of to_decr_pc_after_break. */
3403 default_target_decr_pc_after_break (struct target_ops *ops,
3404 struct gdbarch *gdbarch)
3406 return gdbarch_decr_pc_after_break (gdbarch);
3412 target_decr_pc_after_break (struct gdbarch *gdbarch)
3414 return current_target.to_decr_pc_after_break (¤t_target, gdbarch);
3420 target_prepare_to_generate_core (void)
3422 current_target.to_prepare_to_generate_core (¤t_target);
3428 target_done_generating_core (void)
3430 current_target.to_done_generating_core (¤t_target);
3434 setup_target_debug (void)
3436 memcpy (&debug_target, ¤t_target, sizeof debug_target);
3438 init_debug_target (¤t_target);
3442 static char targ_desc[] =
3443 "Names of targets and files being debugged.\nShows the entire \
3444 stack of targets currently in use (including the exec-file,\n\
3445 core-file, and process, if any), as well as the symbol file name.";
3448 default_rcmd (struct target_ops *self, const char *command,
3449 struct ui_file *output)
3451 error (_("\"monitor\" command not supported by this target."));
3455 do_monitor_command (char *cmd,
3458 target_rcmd (cmd, gdb_stdtarg);
3461 /* Print the name of each layers of our target stack. */
3464 maintenance_print_target_stack (char *cmd, int from_tty)
3466 struct target_ops *t;
3468 printf_filtered (_("The current target stack is:\n"));
3470 for (t = target_stack; t != NULL; t = t->beneath)
3472 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
3476 /* Controls if targets can report that they can/are async. This is
3477 just for maintainers to use when debugging gdb. */
3478 int target_async_permitted = 1;
3480 /* The set command writes to this variable. If the inferior is
3481 executing, target_async_permitted is *not* updated. */
3482 static int target_async_permitted_1 = 1;
3485 maint_set_target_async_command (char *args, int from_tty,
3486 struct cmd_list_element *c)
3488 if (have_live_inferiors ())
3490 target_async_permitted_1 = target_async_permitted;
3491 error (_("Cannot change this setting while the inferior is running."));
3494 target_async_permitted = target_async_permitted_1;
3498 maint_show_target_async_command (struct ui_file *file, int from_tty,
3499 struct cmd_list_element *c,
3502 fprintf_filtered (file,
3503 _("Controlling the inferior in "
3504 "asynchronous mode is %s.\n"), value);
3507 /* Temporary copies of permission settings. */
3509 static int may_write_registers_1 = 1;
3510 static int may_write_memory_1 = 1;
3511 static int may_insert_breakpoints_1 = 1;
3512 static int may_insert_tracepoints_1 = 1;
3513 static int may_insert_fast_tracepoints_1 = 1;
3514 static int may_stop_1 = 1;
3516 /* Make the user-set values match the real values again. */
3519 update_target_permissions (void)
3521 may_write_registers_1 = may_write_registers;
3522 may_write_memory_1 = may_write_memory;
3523 may_insert_breakpoints_1 = may_insert_breakpoints;
3524 may_insert_tracepoints_1 = may_insert_tracepoints;
3525 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
3526 may_stop_1 = may_stop;
3529 /* The one function handles (most of) the permission flags in the same
3533 set_target_permissions (char *args, int from_tty,
3534 struct cmd_list_element *c)
3536 if (target_has_execution)
3538 update_target_permissions ();
3539 error (_("Cannot change this setting while the inferior is running."));
3542 /* Make the real values match the user-changed values. */
3543 may_write_registers = may_write_registers_1;
3544 may_insert_breakpoints = may_insert_breakpoints_1;
3545 may_insert_tracepoints = may_insert_tracepoints_1;
3546 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
3547 may_stop = may_stop_1;
3548 update_observer_mode ();
3551 /* Set memory write permission independently of observer mode. */
3554 set_write_memory_permission (char *args, int from_tty,
3555 struct cmd_list_element *c)
3557 /* Make the real values match the user-changed values. */
3558 may_write_memory = may_write_memory_1;
3559 update_observer_mode ();
3564 initialize_targets (void)
3566 init_dummy_target ();
3567 push_target (&dummy_target);
3569 add_info ("target", target_info, targ_desc);
3570 add_info ("files", target_info, targ_desc);
3572 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
3573 Set target debugging."), _("\
3574 Show target debugging."), _("\
3575 When non-zero, target debugging is enabled. Higher numbers are more\n\
3579 &setdebuglist, &showdebuglist);
3581 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
3582 &trust_readonly, _("\
3583 Set mode for reading from readonly sections."), _("\
3584 Show mode for reading from readonly sections."), _("\
3585 When this mode is on, memory reads from readonly sections (such as .text)\n\
3586 will be read from the object file instead of from the target. This will\n\
3587 result in significant performance improvement for remote targets."),
3589 show_trust_readonly,
3590 &setlist, &showlist);
3592 add_com ("monitor", class_obscure, do_monitor_command,
3593 _("Send a command to the remote monitor (remote targets only)."));
3595 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
3596 _("Print the name of each layer of the internal target stack."),
3597 &maintenanceprintlist);
3599 add_setshow_boolean_cmd ("target-async", no_class,
3600 &target_async_permitted_1, _("\
3601 Set whether gdb controls the inferior in asynchronous mode."), _("\
3602 Show whether gdb controls the inferior in asynchronous mode."), _("\
3603 Tells gdb whether to control the inferior in asynchronous mode."),
3604 maint_set_target_async_command,
3605 maint_show_target_async_command,
3606 &maintenance_set_cmdlist,
3607 &maintenance_show_cmdlist);
3609 add_setshow_boolean_cmd ("may-write-registers", class_support,
3610 &may_write_registers_1, _("\
3611 Set permission to write into registers."), _("\
3612 Show permission to write into registers."), _("\
3613 When this permission is on, GDB may write into the target's registers.\n\
3614 Otherwise, any sort of write attempt will result in an error."),
3615 set_target_permissions, NULL,
3616 &setlist, &showlist);
3618 add_setshow_boolean_cmd ("may-write-memory", class_support,
3619 &may_write_memory_1, _("\
3620 Set permission to write into target memory."), _("\
3621 Show permission to write into target memory."), _("\
3622 When this permission is on, GDB may write into the target's memory.\n\
3623 Otherwise, any sort of write attempt will result in an error."),
3624 set_write_memory_permission, NULL,
3625 &setlist, &showlist);
3627 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
3628 &may_insert_breakpoints_1, _("\
3629 Set permission to insert breakpoints in the target."), _("\
3630 Show permission to insert breakpoints in the target."), _("\
3631 When this permission is on, GDB may insert breakpoints in the program.\n\
3632 Otherwise, any sort of insertion attempt will result in an error."),
3633 set_target_permissions, NULL,
3634 &setlist, &showlist);
3636 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
3637 &may_insert_tracepoints_1, _("\
3638 Set permission to insert tracepoints in the target."), _("\
3639 Show permission to insert tracepoints in the target."), _("\
3640 When this permission is on, GDB may insert tracepoints in the program.\n\
3641 Otherwise, any sort of insertion attempt will result in an error."),
3642 set_target_permissions, NULL,
3643 &setlist, &showlist);
3645 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
3646 &may_insert_fast_tracepoints_1, _("\
3647 Set permission to insert fast tracepoints in the target."), _("\
3648 Show permission to insert fast tracepoints in the target."), _("\
3649 When this permission is on, GDB may insert fast tracepoints.\n\
3650 Otherwise, any sort of insertion attempt will result in an error."),
3651 set_target_permissions, NULL,
3652 &setlist, &showlist);
3654 add_setshow_boolean_cmd ("may-interrupt", class_support,
3656 Set permission to interrupt or signal the target."), _("\
3657 Show permission to interrupt or signal the target."), _("\
3658 When this permission is on, GDB may interrupt/stop the target's execution.\n\
3659 Otherwise, any attempt to interrupt or stop will be ignored."),
3660 set_target_permissions, NULL,
3661 &setlist, &showlist);
3663 add_setshow_boolean_cmd ("auto-connect-native-target", class_support,
3664 &auto_connect_native_target, _("\
3665 Set whether GDB may automatically connect to the native target."), _("\
3666 Show whether GDB may automatically connect to the native target."), _("\
3667 When on, and GDB is not connected to a target yet, GDB\n\
3668 attempts \"run\" and other commands with the native target."),
3669 NULL, show_auto_connect_native_target,
3670 &setlist, &showlist);