1 /* Select target systems and architectures at runtime for GDB.
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
5 Contributed by Cygnus Support.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
26 #include "target-dcache.h"
37 #include "gdb_assert.h"
39 #include "exceptions.h"
40 #include "target-descriptions.h"
41 #include "gdbthread.h"
44 #include "inline-frame.h"
45 #include "tracepoint.h"
46 #include "gdb/fileio.h"
49 #include "target-debug.h"
51 static void target_info (char *, int);
53 static void generic_tls_error (void) ATTRIBUTE_NORETURN;
55 static void default_terminal_info (struct target_ops *, const char *, int);
57 static int default_watchpoint_addr_within_range (struct target_ops *,
58 CORE_ADDR, CORE_ADDR, int);
60 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
63 static void default_rcmd (struct target_ops *, const char *, struct ui_file *);
65 static ptid_t default_get_ada_task_ptid (struct target_ops *self,
68 static int default_follow_fork (struct target_ops *self, int follow_child,
71 static void default_mourn_inferior (struct target_ops *self);
73 static int default_search_memory (struct target_ops *ops,
75 ULONGEST search_space_len,
76 const gdb_byte *pattern,
78 CORE_ADDR *found_addrp);
80 static int default_verify_memory (struct target_ops *self,
82 CORE_ADDR memaddr, ULONGEST size);
84 static struct address_space *default_thread_address_space
85 (struct target_ops *self, ptid_t ptid);
87 static void tcomplain (void) ATTRIBUTE_NORETURN;
89 static int return_zero (struct target_ops *);
91 static int return_zero_has_execution (struct target_ops *, ptid_t);
93 static void target_command (char *, int);
95 static struct target_ops *find_default_run_target (char *);
97 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
100 static int dummy_find_memory_regions (struct target_ops *self,
101 find_memory_region_ftype ignore1,
104 static char *dummy_make_corefile_notes (struct target_ops *self,
105 bfd *ignore1, int *ignore2);
107 static char *default_pid_to_str (struct target_ops *ops, ptid_t ptid);
109 static enum exec_direction_kind default_execution_direction
110 (struct target_ops *self);
112 static CORE_ADDR default_target_decr_pc_after_break (struct target_ops *ops,
113 struct gdbarch *gdbarch);
115 static struct target_ops debug_target;
117 #include "target-delegates.c"
119 static void init_dummy_target (void);
121 static void update_current_target (void);
123 /* Pointer to array of target architecture structures; the size of the
124 array; the current index into the array; the allocated size of the
126 struct target_ops **target_structs;
127 unsigned target_struct_size;
128 unsigned target_struct_allocsize;
129 #define DEFAULT_ALLOCSIZE 10
131 /* The initial current target, so that there is always a semi-valid
134 static struct target_ops dummy_target;
136 /* Top of target stack. */
138 static struct target_ops *target_stack;
140 /* The target structure we are currently using to talk to a process
141 or file or whatever "inferior" we have. */
143 struct target_ops current_target;
145 /* Command list for target. */
147 static struct cmd_list_element *targetlist = NULL;
149 /* Nonzero if we should trust readonly sections from the
150 executable when reading memory. */
152 static int trust_readonly = 0;
154 /* Nonzero if we should show true memory content including
155 memory breakpoint inserted by gdb. */
157 static int show_memory_breakpoints = 0;
159 /* These globals control whether GDB attempts to perform these
160 operations; they are useful for targets that need to prevent
161 inadvertant disruption, such as in non-stop mode. */
163 int may_write_registers = 1;
165 int may_write_memory = 1;
167 int may_insert_breakpoints = 1;
169 int may_insert_tracepoints = 1;
171 int may_insert_fast_tracepoints = 1;
175 /* Non-zero if we want to see trace of target level stuff. */
177 static unsigned int targetdebug = 0;
180 set_targetdebug (char *args, int from_tty, struct cmd_list_element *c)
182 update_current_target ();
186 show_targetdebug (struct ui_file *file, int from_tty,
187 struct cmd_list_element *c, const char *value)
189 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
192 static void setup_target_debug (void);
194 /* The user just typed 'target' without the name of a target. */
197 target_command (char *arg, int from_tty)
199 fputs_filtered ("Argument required (target name). Try `help target'\n",
203 /* Default target_has_* methods for process_stratum targets. */
206 default_child_has_all_memory (struct target_ops *ops)
208 /* If no inferior selected, then we can't read memory here. */
209 if (ptid_equal (inferior_ptid, null_ptid))
216 default_child_has_memory (struct target_ops *ops)
218 /* If no inferior selected, then we can't read memory here. */
219 if (ptid_equal (inferior_ptid, null_ptid))
226 default_child_has_stack (struct target_ops *ops)
228 /* If no inferior selected, there's no stack. */
229 if (ptid_equal (inferior_ptid, null_ptid))
236 default_child_has_registers (struct target_ops *ops)
238 /* Can't read registers from no inferior. */
239 if (ptid_equal (inferior_ptid, null_ptid))
246 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
248 /* If there's no thread selected, then we can't make it run through
250 if (ptid_equal (the_ptid, null_ptid))
258 target_has_all_memory_1 (void)
260 struct target_ops *t;
262 for (t = current_target.beneath; t != NULL; t = t->beneath)
263 if (t->to_has_all_memory (t))
270 target_has_memory_1 (void)
272 struct target_ops *t;
274 for (t = current_target.beneath; t != NULL; t = t->beneath)
275 if (t->to_has_memory (t))
282 target_has_stack_1 (void)
284 struct target_ops *t;
286 for (t = current_target.beneath; t != NULL; t = t->beneath)
287 if (t->to_has_stack (t))
294 target_has_registers_1 (void)
296 struct target_ops *t;
298 for (t = current_target.beneath; t != NULL; t = t->beneath)
299 if (t->to_has_registers (t))
306 target_has_execution_1 (ptid_t the_ptid)
308 struct target_ops *t;
310 for (t = current_target.beneath; t != NULL; t = t->beneath)
311 if (t->to_has_execution (t, the_ptid))
318 target_has_execution_current (void)
320 return target_has_execution_1 (inferior_ptid);
323 /* Complete initialization of T. This ensures that various fields in
324 T are set, if needed by the target implementation. */
327 complete_target_initialization (struct target_ops *t)
329 /* Provide default values for all "must have" methods. */
331 if (t->to_has_all_memory == NULL)
332 t->to_has_all_memory = return_zero;
334 if (t->to_has_memory == NULL)
335 t->to_has_memory = return_zero;
337 if (t->to_has_stack == NULL)
338 t->to_has_stack = return_zero;
340 if (t->to_has_registers == NULL)
341 t->to_has_registers = return_zero;
343 if (t->to_has_execution == NULL)
344 t->to_has_execution = return_zero_has_execution;
346 /* These methods can be called on an unpushed target and so require
347 a default implementation if the target might plausibly be the
348 default run target. */
349 gdb_assert (t->to_can_run == NULL || (t->to_can_async_p != NULL
350 && t->to_supports_non_stop != NULL));
352 install_delegators (t);
355 /* This is used to implement the various target commands. */
358 open_target (char *args, int from_tty, struct cmd_list_element *command)
360 struct target_ops *ops = get_cmd_context (command);
363 fprintf_unfiltered (gdb_stdlog, "-> %s->to_open (...)\n",
366 ops->to_open (args, from_tty);
369 fprintf_unfiltered (gdb_stdlog, "<- %s->to_open (%s, %d)\n",
370 ops->to_shortname, args, from_tty);
373 /* Add possible target architecture T to the list and add a new
374 command 'target T->to_shortname'. Set COMPLETER as the command's
375 completer if not NULL. */
378 add_target_with_completer (struct target_ops *t,
379 completer_ftype *completer)
381 struct cmd_list_element *c;
383 complete_target_initialization (t);
387 target_struct_allocsize = DEFAULT_ALLOCSIZE;
388 target_structs = (struct target_ops **) xmalloc
389 (target_struct_allocsize * sizeof (*target_structs));
391 if (target_struct_size >= target_struct_allocsize)
393 target_struct_allocsize *= 2;
394 target_structs = (struct target_ops **)
395 xrealloc ((char *) target_structs,
396 target_struct_allocsize * sizeof (*target_structs));
398 target_structs[target_struct_size++] = t;
400 if (targetlist == NULL)
401 add_prefix_cmd ("target", class_run, target_command, _("\
402 Connect to a target machine or process.\n\
403 The first argument is the type or protocol of the target machine.\n\
404 Remaining arguments are interpreted by the target protocol. For more\n\
405 information on the arguments for a particular protocol, type\n\
406 `help target ' followed by the protocol name."),
407 &targetlist, "target ", 0, &cmdlist);
408 c = add_cmd (t->to_shortname, no_class, NULL, t->to_doc, &targetlist);
409 set_cmd_sfunc (c, open_target);
410 set_cmd_context (c, t);
411 if (completer != NULL)
412 set_cmd_completer (c, completer);
415 /* Add a possible target architecture to the list. */
418 add_target (struct target_ops *t)
420 add_target_with_completer (t, NULL);
426 add_deprecated_target_alias (struct target_ops *t, char *alias)
428 struct cmd_list_element *c;
431 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
433 c = add_cmd (alias, no_class, NULL, t->to_doc, &targetlist);
434 set_cmd_sfunc (c, open_target);
435 set_cmd_context (c, t);
436 alt = xstrprintf ("target %s", t->to_shortname);
437 deprecate_cmd (c, alt);
445 current_target.to_kill (¤t_target);
449 target_load (const char *arg, int from_tty)
451 target_dcache_invalidate ();
452 (*current_target.to_load) (¤t_target, arg, from_tty);
456 target_terminal_inferior (void)
458 /* A background resume (``run&'') should leave GDB in control of the
459 terminal. Use target_can_async_p, not target_is_async_p, since at
460 this point the target is not async yet. However, if sync_execution
461 is not set, we know it will become async prior to resume. */
462 if (target_can_async_p () && !sync_execution)
465 /* If GDB is resuming the inferior in the foreground, install
466 inferior's terminal modes. */
467 (*current_target.to_terminal_inferior) (¤t_target);
473 target_supports_terminal_ours (void)
475 struct target_ops *t;
477 for (t = current_target.beneath; t != NULL; t = t->beneath)
479 if (t->to_terminal_ours != delegate_terminal_ours
480 && t->to_terminal_ours != tdefault_terminal_ours)
490 error (_("You can't do that when your target is `%s'"),
491 current_target.to_shortname);
497 error (_("You can't do that without a process to debug."));
501 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
503 printf_unfiltered (_("No saved terminal information.\n"));
506 /* A default implementation for the to_get_ada_task_ptid target method.
508 This function builds the PTID by using both LWP and TID as part of
509 the PTID lwp and tid elements. The pid used is the pid of the
513 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
515 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
518 static enum exec_direction_kind
519 default_execution_direction (struct target_ops *self)
521 if (!target_can_execute_reverse)
523 else if (!target_can_async_p ())
526 gdb_assert_not_reached ("\
527 to_execution_direction must be implemented for reverse async");
530 /* Go through the target stack from top to bottom, copying over zero
531 entries in current_target, then filling in still empty entries. In
532 effect, we are doing class inheritance through the pushed target
535 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
536 is currently implemented, is that it discards any knowledge of
537 which target an inherited method originally belonged to.
538 Consequently, new new target methods should instead explicitly and
539 locally search the target stack for the target that can handle the
543 update_current_target (void)
545 struct target_ops *t;
547 /* First, reset current's contents. */
548 memset (¤t_target, 0, sizeof (current_target));
550 /* Install the delegators. */
551 install_delegators (¤t_target);
553 current_target.to_stratum = target_stack->to_stratum;
555 #define INHERIT(FIELD, TARGET) \
556 if (!current_target.FIELD) \
557 current_target.FIELD = (TARGET)->FIELD
559 /* Do not add any new INHERITs here. Instead, use the delegation
560 mechanism provided by make-target-delegates. */
561 for (t = target_stack; t; t = t->beneath)
563 INHERIT (to_shortname, t);
564 INHERIT (to_longname, t);
565 INHERIT (to_attach_no_wait, t);
566 INHERIT (to_have_steppable_watchpoint, t);
567 INHERIT (to_have_continuable_watchpoint, t);
568 INHERIT (to_has_thread_control, t);
572 /* Finally, position the target-stack beneath the squashed
573 "current_target". That way code looking for a non-inherited
574 target method can quickly and simply find it. */
575 current_target.beneath = target_stack;
578 setup_target_debug ();
581 /* Push a new target type into the stack of the existing target accessors,
582 possibly superseding some of the existing accessors.
584 Rather than allow an empty stack, we always have the dummy target at
585 the bottom stratum, so we can call the function vectors without
589 push_target (struct target_ops *t)
591 struct target_ops **cur;
593 /* Check magic number. If wrong, it probably means someone changed
594 the struct definition, but not all the places that initialize one. */
595 if (t->to_magic != OPS_MAGIC)
597 fprintf_unfiltered (gdb_stderr,
598 "Magic number of %s target struct wrong\n",
600 internal_error (__FILE__, __LINE__,
601 _("failed internal consistency check"));
604 /* Find the proper stratum to install this target in. */
605 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
607 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
611 /* If there's already targets at this stratum, remove them. */
612 /* FIXME: cagney/2003-10-15: I think this should be popping all
613 targets to CUR, and not just those at this stratum level. */
614 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
616 /* There's already something at this stratum level. Close it,
617 and un-hook it from the stack. */
618 struct target_ops *tmp = (*cur);
620 (*cur) = (*cur)->beneath;
625 /* We have removed all targets in our stratum, now add the new one. */
629 update_current_target ();
632 /* Remove a target_ops vector from the stack, wherever it may be.
633 Return how many times it was removed (0 or 1). */
636 unpush_target (struct target_ops *t)
638 struct target_ops **cur;
639 struct target_ops *tmp;
641 if (t->to_stratum == dummy_stratum)
642 internal_error (__FILE__, __LINE__,
643 _("Attempt to unpush the dummy target"));
645 /* Look for the specified target. Note that we assume that a target
646 can only occur once in the target stack. */
648 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
654 /* If we don't find target_ops, quit. Only open targets should be
659 /* Unchain the target. */
661 (*cur) = (*cur)->beneath;
664 update_current_target ();
666 /* Finally close the target. Note we do this after unchaining, so
667 any target method calls from within the target_close
668 implementation don't end up in T anymore. */
675 pop_all_targets_above (enum strata above_stratum)
677 while ((int) (current_target.to_stratum) > (int) above_stratum)
679 if (!unpush_target (target_stack))
681 fprintf_unfiltered (gdb_stderr,
682 "pop_all_targets couldn't find target %s\n",
683 target_stack->to_shortname);
684 internal_error (__FILE__, __LINE__,
685 _("failed internal consistency check"));
692 pop_all_targets (void)
694 pop_all_targets_above (dummy_stratum);
697 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
700 target_is_pushed (struct target_ops *t)
702 struct target_ops *cur;
704 /* Check magic number. If wrong, it probably means someone changed
705 the struct definition, but not all the places that initialize one. */
706 if (t->to_magic != OPS_MAGIC)
708 fprintf_unfiltered (gdb_stderr,
709 "Magic number of %s target struct wrong\n",
711 internal_error (__FILE__, __LINE__,
712 _("failed internal consistency check"));
715 for (cur = target_stack; cur != NULL; cur = cur->beneath)
722 /* Default implementation of to_get_thread_local_address. */
725 generic_tls_error (void)
727 throw_error (TLS_GENERIC_ERROR,
728 _("Cannot find thread-local variables on this target"));
731 /* Using the objfile specified in OBJFILE, find the address for the
732 current thread's thread-local storage with offset OFFSET. */
734 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
736 volatile CORE_ADDR addr = 0;
737 struct target_ops *target = ¤t_target;
739 if (gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
741 ptid_t ptid = inferior_ptid;
742 volatile struct gdb_exception ex;
744 TRY_CATCH (ex, RETURN_MASK_ALL)
748 /* Fetch the load module address for this objfile. */
749 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
752 addr = target->to_get_thread_local_address (target, ptid,
755 /* If an error occurred, print TLS related messages here. Otherwise,
756 throw the error to some higher catcher. */
759 int objfile_is_library = (objfile->flags & OBJF_SHARED);
763 case TLS_NO_LIBRARY_SUPPORT_ERROR:
764 error (_("Cannot find thread-local variables "
765 "in this thread library."));
767 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
768 if (objfile_is_library)
769 error (_("Cannot find shared library `%s' in dynamic"
770 " linker's load module list"), objfile_name (objfile));
772 error (_("Cannot find executable file `%s' in dynamic"
773 " linker's load module list"), objfile_name (objfile));
775 case TLS_NOT_ALLOCATED_YET_ERROR:
776 if (objfile_is_library)
777 error (_("The inferior has not yet allocated storage for"
778 " thread-local variables in\n"
779 "the shared library `%s'\n"
781 objfile_name (objfile), target_pid_to_str (ptid));
783 error (_("The inferior has not yet allocated storage for"
784 " thread-local variables in\n"
785 "the executable `%s'\n"
787 objfile_name (objfile), target_pid_to_str (ptid));
789 case TLS_GENERIC_ERROR:
790 if (objfile_is_library)
791 error (_("Cannot find thread-local storage for %s, "
792 "shared library %s:\n%s"),
793 target_pid_to_str (ptid),
794 objfile_name (objfile), ex.message);
796 error (_("Cannot find thread-local storage for %s, "
797 "executable file %s:\n%s"),
798 target_pid_to_str (ptid),
799 objfile_name (objfile), ex.message);
802 throw_exception (ex);
807 /* It wouldn't be wrong here to try a gdbarch method, too; finding
808 TLS is an ABI-specific thing. But we don't do that yet. */
810 error (_("Cannot find thread-local variables on this target"));
816 target_xfer_status_to_string (enum target_xfer_status status)
818 #define CASE(X) case X: return #X
821 CASE(TARGET_XFER_E_IO);
822 CASE(TARGET_XFER_UNAVAILABLE);
831 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
833 /* target_read_string -- read a null terminated string, up to LEN bytes,
834 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
835 Set *STRING to a pointer to malloc'd memory containing the data; the caller
836 is responsible for freeing it. Return the number of bytes successfully
840 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
846 int buffer_allocated;
848 unsigned int nbytes_read = 0;
852 /* Small for testing. */
853 buffer_allocated = 4;
854 buffer = xmalloc (buffer_allocated);
859 tlen = MIN (len, 4 - (memaddr & 3));
860 offset = memaddr & 3;
862 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
865 /* The transfer request might have crossed the boundary to an
866 unallocated region of memory. Retry the transfer, requesting
870 errcode = target_read_memory (memaddr, buf, 1);
875 if (bufptr - buffer + tlen > buffer_allocated)
879 bytes = bufptr - buffer;
880 buffer_allocated *= 2;
881 buffer = xrealloc (buffer, buffer_allocated);
882 bufptr = buffer + bytes;
885 for (i = 0; i < tlen; i++)
887 *bufptr++ = buf[i + offset];
888 if (buf[i + offset] == '\000')
890 nbytes_read += i + 1;
906 struct target_section_table *
907 target_get_section_table (struct target_ops *target)
909 return (*target->to_get_section_table) (target);
912 /* Find a section containing ADDR. */
914 struct target_section *
915 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
917 struct target_section_table *table = target_get_section_table (target);
918 struct target_section *secp;
923 for (secp = table->sections; secp < table->sections_end; secp++)
925 if (addr >= secp->addr && addr < secp->endaddr)
931 /* Read memory from more than one valid target. A core file, for
932 instance, could have some of memory but delegate other bits to
933 the target below it. So, we must manually try all targets. */
935 static enum target_xfer_status
936 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
937 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
938 ULONGEST *xfered_len)
940 enum target_xfer_status res;
944 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
945 readbuf, writebuf, memaddr, len,
947 if (res == TARGET_XFER_OK)
950 /* Stop if the target reports that the memory is not available. */
951 if (res == TARGET_XFER_UNAVAILABLE)
954 /* We want to continue past core files to executables, but not
955 past a running target's memory. */
956 if (ops->to_has_all_memory (ops))
963 /* The cache works at the raw memory level. Make sure the cache
964 gets updated with raw contents no matter what kind of memory
965 object was originally being written. Note we do write-through
966 first, so that if it fails, we don't write to the cache contents
967 that never made it to the target. */
969 && !ptid_equal (inferior_ptid, null_ptid)
970 && target_dcache_init_p ()
971 && (stack_cache_enabled_p () || code_cache_enabled_p ()))
973 DCACHE *dcache = target_dcache_get ();
975 /* Note that writing to an area of memory which wasn't present
976 in the cache doesn't cause it to be loaded in. */
977 dcache_update (dcache, res, memaddr, writebuf, *xfered_len);
983 /* Perform a partial memory transfer.
984 For docs see target.h, to_xfer_partial. */
986 static enum target_xfer_status
987 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
988 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
989 ULONGEST len, ULONGEST *xfered_len)
991 enum target_xfer_status res;
993 struct mem_region *region;
994 struct inferior *inf;
996 /* For accesses to unmapped overlay sections, read directly from
997 files. Must do this first, as MEMADDR may need adjustment. */
998 if (readbuf != NULL && overlay_debugging)
1000 struct obj_section *section = find_pc_overlay (memaddr);
1002 if (pc_in_unmapped_range (memaddr, section))
1004 struct target_section_table *table
1005 = target_get_section_table (ops);
1006 const char *section_name = section->the_bfd_section->name;
1008 memaddr = overlay_mapped_address (memaddr, section);
1009 return section_table_xfer_memory_partial (readbuf, writebuf,
1010 memaddr, len, xfered_len,
1012 table->sections_end,
1017 /* Try the executable files, if "trust-readonly-sections" is set. */
1018 if (readbuf != NULL && trust_readonly)
1020 struct target_section *secp;
1021 struct target_section_table *table;
1023 secp = target_section_by_addr (ops, memaddr);
1025 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1026 secp->the_bfd_section)
1029 table = target_get_section_table (ops);
1030 return section_table_xfer_memory_partial (readbuf, writebuf,
1031 memaddr, len, xfered_len,
1033 table->sections_end,
1038 /* Try GDB's internal data cache. */
1039 region = lookup_mem_region (memaddr);
1040 /* region->hi == 0 means there's no upper bound. */
1041 if (memaddr + len < region->hi || region->hi == 0)
1044 reg_len = region->hi - memaddr;
1046 switch (region->attrib.mode)
1049 if (writebuf != NULL)
1050 return TARGET_XFER_E_IO;
1054 if (readbuf != NULL)
1055 return TARGET_XFER_E_IO;
1059 /* We only support writing to flash during "load" for now. */
1060 if (writebuf != NULL)
1061 error (_("Writing to flash memory forbidden in this context"));
1065 return TARGET_XFER_E_IO;
1068 if (!ptid_equal (inferior_ptid, null_ptid))
1069 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1075 /* The dcache reads whole cache lines; that doesn't play well
1076 with reading from a trace buffer, because reading outside of
1077 the collected memory range fails. */
1078 && get_traceframe_number () == -1
1079 && (region->attrib.cache
1080 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1081 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1083 DCACHE *dcache = target_dcache_get_or_init ();
1085 return dcache_read_memory_partial (ops, dcache, memaddr, readbuf,
1086 reg_len, xfered_len);
1089 /* If none of those methods found the memory we wanted, fall back
1090 to a target partial transfer. Normally a single call to
1091 to_xfer_partial is enough; if it doesn't recognize an object
1092 it will call the to_xfer_partial of the next target down.
1093 But for memory this won't do. Memory is the only target
1094 object which can be read from more than one valid target.
1095 A core file, for instance, could have some of memory but
1096 delegate other bits to the target below it. So, we must
1097 manually try all targets. */
1099 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1102 /* If we still haven't got anything, return the last error. We
1107 /* Perform a partial memory transfer. For docs see target.h,
1110 static enum target_xfer_status
1111 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1112 gdb_byte *readbuf, const gdb_byte *writebuf,
1113 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1115 enum target_xfer_status res;
1117 /* Zero length requests are ok and require no work. */
1119 return TARGET_XFER_EOF;
1121 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1122 breakpoint insns, thus hiding out from higher layers whether
1123 there are software breakpoints inserted in the code stream. */
1124 if (readbuf != NULL)
1126 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1129 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1130 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, *xfered_len);
1135 struct cleanup *old_chain;
1137 /* A large write request is likely to be partially satisfied
1138 by memory_xfer_partial_1. We will continually malloc
1139 and free a copy of the entire write request for breakpoint
1140 shadow handling even though we only end up writing a small
1141 subset of it. Cap writes to 4KB to mitigate this. */
1142 len = min (4096, len);
1144 buf = xmalloc (len);
1145 old_chain = make_cleanup (xfree, buf);
1146 memcpy (buf, writebuf, len);
1148 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1149 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1152 do_cleanups (old_chain);
1159 restore_show_memory_breakpoints (void *arg)
1161 show_memory_breakpoints = (uintptr_t) arg;
1165 make_show_memory_breakpoints_cleanup (int show)
1167 int current = show_memory_breakpoints;
1169 show_memory_breakpoints = show;
1170 return make_cleanup (restore_show_memory_breakpoints,
1171 (void *) (uintptr_t) current);
1174 /* For docs see target.h, to_xfer_partial. */
1176 enum target_xfer_status
1177 target_xfer_partial (struct target_ops *ops,
1178 enum target_object object, const char *annex,
1179 gdb_byte *readbuf, const gdb_byte *writebuf,
1180 ULONGEST offset, ULONGEST len,
1181 ULONGEST *xfered_len)
1183 enum target_xfer_status retval;
1185 gdb_assert (ops->to_xfer_partial != NULL);
1187 /* Transfer is done when LEN is zero. */
1189 return TARGET_XFER_EOF;
1191 if (writebuf && !may_write_memory)
1192 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1193 core_addr_to_string_nz (offset), plongest (len));
1197 /* If this is a memory transfer, let the memory-specific code
1198 have a look at it instead. Memory transfers are more
1200 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1201 || object == TARGET_OBJECT_CODE_MEMORY)
1202 retval = memory_xfer_partial (ops, object, readbuf,
1203 writebuf, offset, len, xfered_len);
1204 else if (object == TARGET_OBJECT_RAW_MEMORY)
1206 /* Request the normal memory object from other layers. */
1207 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1211 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1212 writebuf, offset, len, xfered_len);
1216 const unsigned char *myaddr = NULL;
1218 fprintf_unfiltered (gdb_stdlog,
1219 "%s:target_xfer_partial "
1220 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1223 (annex ? annex : "(null)"),
1224 host_address_to_string (readbuf),
1225 host_address_to_string (writebuf),
1226 core_addr_to_string_nz (offset),
1227 pulongest (len), retval,
1228 pulongest (*xfered_len));
1234 if (retval == TARGET_XFER_OK && myaddr != NULL)
1238 fputs_unfiltered (", bytes =", gdb_stdlog);
1239 for (i = 0; i < *xfered_len; i++)
1241 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1243 if (targetdebug < 2 && i > 0)
1245 fprintf_unfiltered (gdb_stdlog, " ...");
1248 fprintf_unfiltered (gdb_stdlog, "\n");
1251 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1255 fputc_unfiltered ('\n', gdb_stdlog);
1258 /* Check implementations of to_xfer_partial update *XFERED_LEN
1259 properly. Do assertion after printing debug messages, so that we
1260 can find more clues on assertion failure from debugging messages. */
1261 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_UNAVAILABLE)
1262 gdb_assert (*xfered_len > 0);
1267 /* Read LEN bytes of target memory at address MEMADDR, placing the
1268 results in GDB's memory at MYADDR. Returns either 0 for success or
1269 TARGET_XFER_E_IO if any error occurs.
1271 If an error occurs, no guarantee is made about the contents of the data at
1272 MYADDR. In particular, the caller should not depend upon partial reads
1273 filling the buffer with good data. There is no way for the caller to know
1274 how much good data might have been transfered anyway. Callers that can
1275 deal with partial reads should call target_read (which will retry until
1276 it makes no progress, and then return how much was transferred). */
1279 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1281 /* Dispatch to the topmost target, not the flattened current_target.
1282 Memory accesses check target->to_has_(all_)memory, and the
1283 flattened target doesn't inherit those. */
1284 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1285 myaddr, memaddr, len) == len)
1288 return TARGET_XFER_E_IO;
1291 /* Like target_read_memory, but specify explicitly that this is a read
1292 from the target's raw memory. That is, this read bypasses the
1293 dcache, breakpoint shadowing, etc. */
1296 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1298 /* See comment in target_read_memory about why the request starts at
1299 current_target.beneath. */
1300 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1301 myaddr, memaddr, len) == len)
1304 return TARGET_XFER_E_IO;
1307 /* Like target_read_memory, but specify explicitly that this is a read from
1308 the target's stack. This may trigger different cache behavior. */
1311 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1313 /* See comment in target_read_memory about why the request starts at
1314 current_target.beneath. */
1315 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1316 myaddr, memaddr, len) == len)
1319 return TARGET_XFER_E_IO;
1322 /* Like target_read_memory, but specify explicitly that this is a read from
1323 the target's code. This may trigger different cache behavior. */
1326 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1328 /* See comment in target_read_memory about why the request starts at
1329 current_target.beneath. */
1330 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1331 myaddr, memaddr, len) == len)
1334 return TARGET_XFER_E_IO;
1337 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1338 Returns either 0 for success or TARGET_XFER_E_IO if any
1339 error occurs. If an error occurs, no guarantee is made about how
1340 much data got written. Callers that can deal with partial writes
1341 should call target_write. */
1344 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1346 /* See comment in target_read_memory about why the request starts at
1347 current_target.beneath. */
1348 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1349 myaddr, memaddr, len) == len)
1352 return TARGET_XFER_E_IO;
1355 /* Write LEN bytes from MYADDR to target raw memory at address
1356 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1357 if any error occurs. If an error occurs, no guarantee is made
1358 about how much data got written. Callers that can deal with
1359 partial writes should call target_write. */
1362 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1364 /* See comment in target_read_memory about why the request starts at
1365 current_target.beneath. */
1366 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1367 myaddr, memaddr, len) == len)
1370 return TARGET_XFER_E_IO;
1373 /* Fetch the target's memory map. */
1376 target_memory_map (void)
1378 VEC(mem_region_s) *result;
1379 struct mem_region *last_one, *this_one;
1381 struct target_ops *t;
1383 result = current_target.to_memory_map (¤t_target);
1387 qsort (VEC_address (mem_region_s, result),
1388 VEC_length (mem_region_s, result),
1389 sizeof (struct mem_region), mem_region_cmp);
1391 /* Check that regions do not overlap. Simultaneously assign
1392 a numbering for the "mem" commands to use to refer to
1395 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1397 this_one->number = ix;
1399 if (last_one && last_one->hi > this_one->lo)
1401 warning (_("Overlapping regions in memory map: ignoring"));
1402 VEC_free (mem_region_s, result);
1405 last_one = this_one;
1412 target_flash_erase (ULONGEST address, LONGEST length)
1414 current_target.to_flash_erase (¤t_target, address, length);
1418 target_flash_done (void)
1420 current_target.to_flash_done (¤t_target);
1424 show_trust_readonly (struct ui_file *file, int from_tty,
1425 struct cmd_list_element *c, const char *value)
1427 fprintf_filtered (file,
1428 _("Mode for reading from readonly sections is %s.\n"),
1432 /* Target vector read/write partial wrapper functions. */
1434 static enum target_xfer_status
1435 target_read_partial (struct target_ops *ops,
1436 enum target_object object,
1437 const char *annex, gdb_byte *buf,
1438 ULONGEST offset, ULONGEST len,
1439 ULONGEST *xfered_len)
1441 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
1445 static enum target_xfer_status
1446 target_write_partial (struct target_ops *ops,
1447 enum target_object object,
1448 const char *annex, const gdb_byte *buf,
1449 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
1451 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
1455 /* Wrappers to perform the full transfer. */
1457 /* For docs on target_read see target.h. */
1460 target_read (struct target_ops *ops,
1461 enum target_object object,
1462 const char *annex, gdb_byte *buf,
1463 ULONGEST offset, LONGEST len)
1467 while (xfered < len)
1469 ULONGEST xfered_len;
1470 enum target_xfer_status status;
1472 status = target_read_partial (ops, object, annex,
1473 (gdb_byte *) buf + xfered,
1474 offset + xfered, len - xfered,
1477 /* Call an observer, notifying them of the xfer progress? */
1478 if (status == TARGET_XFER_EOF)
1480 else if (status == TARGET_XFER_OK)
1482 xfered += xfered_len;
1492 /* Assuming that the entire [begin, end) range of memory cannot be
1493 read, try to read whatever subrange is possible to read.
1495 The function returns, in RESULT, either zero or one memory block.
1496 If there's a readable subrange at the beginning, it is completely
1497 read and returned. Any further readable subrange will not be read.
1498 Otherwise, if there's a readable subrange at the end, it will be
1499 completely read and returned. Any readable subranges before it
1500 (obviously, not starting at the beginning), will be ignored. In
1501 other cases -- either no readable subrange, or readable subrange(s)
1502 that is neither at the beginning, or end, nothing is returned.
1504 The purpose of this function is to handle a read across a boundary
1505 of accessible memory in a case when memory map is not available.
1506 The above restrictions are fine for this case, but will give
1507 incorrect results if the memory is 'patchy'. However, supporting
1508 'patchy' memory would require trying to read every single byte,
1509 and it seems unacceptable solution. Explicit memory map is
1510 recommended for this case -- and target_read_memory_robust will
1511 take care of reading multiple ranges then. */
1514 read_whatever_is_readable (struct target_ops *ops,
1515 ULONGEST begin, ULONGEST end,
1516 VEC(memory_read_result_s) **result)
1518 gdb_byte *buf = xmalloc (end - begin);
1519 ULONGEST current_begin = begin;
1520 ULONGEST current_end = end;
1522 memory_read_result_s r;
1523 ULONGEST xfered_len;
1525 /* If we previously failed to read 1 byte, nothing can be done here. */
1526 if (end - begin <= 1)
1532 /* Check that either first or the last byte is readable, and give up
1533 if not. This heuristic is meant to permit reading accessible memory
1534 at the boundary of accessible region. */
1535 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1536 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
1541 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1542 buf + (end-begin) - 1, end - 1, 1,
1543 &xfered_len) == TARGET_XFER_OK)
1554 /* Loop invariant is that the [current_begin, current_end) was previously
1555 found to be not readable as a whole.
1557 Note loop condition -- if the range has 1 byte, we can't divide the range
1558 so there's no point trying further. */
1559 while (current_end - current_begin > 1)
1561 ULONGEST first_half_begin, first_half_end;
1562 ULONGEST second_half_begin, second_half_end;
1564 ULONGEST middle = current_begin + (current_end - current_begin)/2;
1568 first_half_begin = current_begin;
1569 first_half_end = middle;
1570 second_half_begin = middle;
1571 second_half_end = current_end;
1575 first_half_begin = middle;
1576 first_half_end = current_end;
1577 second_half_begin = current_begin;
1578 second_half_end = middle;
1581 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
1582 buf + (first_half_begin - begin),
1584 first_half_end - first_half_begin);
1586 if (xfer == first_half_end - first_half_begin)
1588 /* This half reads up fine. So, the error must be in the
1590 current_begin = second_half_begin;
1591 current_end = second_half_end;
1595 /* This half is not readable. Because we've tried one byte, we
1596 know some part of this half if actually redable. Go to the next
1597 iteration to divide again and try to read.
1599 We don't handle the other half, because this function only tries
1600 to read a single readable subrange. */
1601 current_begin = first_half_begin;
1602 current_end = first_half_end;
1608 /* The [begin, current_begin) range has been read. */
1610 r.end = current_begin;
1615 /* The [current_end, end) range has been read. */
1616 LONGEST rlen = end - current_end;
1618 r.data = xmalloc (rlen);
1619 memcpy (r.data, buf + current_end - begin, rlen);
1620 r.begin = current_end;
1624 VEC_safe_push(memory_read_result_s, (*result), &r);
1628 free_memory_read_result_vector (void *x)
1630 VEC(memory_read_result_s) *v = x;
1631 memory_read_result_s *current;
1634 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
1636 xfree (current->data);
1638 VEC_free (memory_read_result_s, v);
1641 VEC(memory_read_result_s) *
1642 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
1644 VEC(memory_read_result_s) *result = 0;
1647 while (xfered < len)
1649 struct mem_region *region = lookup_mem_region (offset + xfered);
1652 /* If there is no explicit region, a fake one should be created. */
1653 gdb_assert (region);
1655 if (region->hi == 0)
1656 rlen = len - xfered;
1658 rlen = region->hi - offset;
1660 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
1662 /* Cannot read this region. Note that we can end up here only
1663 if the region is explicitly marked inaccessible, or
1664 'inaccessible-by-default' is in effect. */
1669 LONGEST to_read = min (len - xfered, rlen);
1670 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
1672 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
1673 (gdb_byte *) buffer,
1674 offset + xfered, to_read);
1675 /* Call an observer, notifying them of the xfer progress? */
1678 /* Got an error reading full chunk. See if maybe we can read
1681 read_whatever_is_readable (ops, offset + xfered,
1682 offset + xfered + to_read, &result);
1687 struct memory_read_result r;
1689 r.begin = offset + xfered;
1690 r.end = r.begin + xfer;
1691 VEC_safe_push (memory_read_result_s, result, &r);
1701 /* An alternative to target_write with progress callbacks. */
1704 target_write_with_progress (struct target_ops *ops,
1705 enum target_object object,
1706 const char *annex, const gdb_byte *buf,
1707 ULONGEST offset, LONGEST len,
1708 void (*progress) (ULONGEST, void *), void *baton)
1712 /* Give the progress callback a chance to set up. */
1714 (*progress) (0, baton);
1716 while (xfered < len)
1718 ULONGEST xfered_len;
1719 enum target_xfer_status status;
1721 status = target_write_partial (ops, object, annex,
1722 (gdb_byte *) buf + xfered,
1723 offset + xfered, len - xfered,
1726 if (status != TARGET_XFER_OK)
1727 return status == TARGET_XFER_EOF ? xfered : -1;
1730 (*progress) (xfered_len, baton);
1732 xfered += xfered_len;
1738 /* For docs on target_write see target.h. */
1741 target_write (struct target_ops *ops,
1742 enum target_object object,
1743 const char *annex, const gdb_byte *buf,
1744 ULONGEST offset, LONGEST len)
1746 return target_write_with_progress (ops, object, annex, buf, offset, len,
1750 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
1751 the size of the transferred data. PADDING additional bytes are
1752 available in *BUF_P. This is a helper function for
1753 target_read_alloc; see the declaration of that function for more
1757 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
1758 const char *annex, gdb_byte **buf_p, int padding)
1760 size_t buf_alloc, buf_pos;
1763 /* This function does not have a length parameter; it reads the
1764 entire OBJECT). Also, it doesn't support objects fetched partly
1765 from one target and partly from another (in a different stratum,
1766 e.g. a core file and an executable). Both reasons make it
1767 unsuitable for reading memory. */
1768 gdb_assert (object != TARGET_OBJECT_MEMORY);
1770 /* Start by reading up to 4K at a time. The target will throttle
1771 this number down if necessary. */
1773 buf = xmalloc (buf_alloc);
1777 ULONGEST xfered_len;
1778 enum target_xfer_status status;
1780 status = target_read_partial (ops, object, annex, &buf[buf_pos],
1781 buf_pos, buf_alloc - buf_pos - padding,
1784 if (status == TARGET_XFER_EOF)
1786 /* Read all there was. */
1793 else if (status != TARGET_XFER_OK)
1795 /* An error occurred. */
1797 return TARGET_XFER_E_IO;
1800 buf_pos += xfered_len;
1802 /* If the buffer is filling up, expand it. */
1803 if (buf_alloc < buf_pos * 2)
1806 buf = xrealloc (buf, buf_alloc);
1813 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
1814 the size of the transferred data. See the declaration in "target.h"
1815 function for more information about the return value. */
1818 target_read_alloc (struct target_ops *ops, enum target_object object,
1819 const char *annex, gdb_byte **buf_p)
1821 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
1824 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
1825 returned as a string, allocated using xmalloc. If an error occurs
1826 or the transfer is unsupported, NULL is returned. Empty objects
1827 are returned as allocated but empty strings. A warning is issued
1828 if the result contains any embedded NUL bytes. */
1831 target_read_stralloc (struct target_ops *ops, enum target_object object,
1836 LONGEST i, transferred;
1838 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
1839 bufstr = (char *) buffer;
1841 if (transferred < 0)
1844 if (transferred == 0)
1845 return xstrdup ("");
1847 bufstr[transferred] = 0;
1849 /* Check for embedded NUL bytes; but allow trailing NULs. */
1850 for (i = strlen (bufstr); i < transferred; i++)
1853 warning (_("target object %d, annex %s, "
1854 "contained unexpected null characters"),
1855 (int) object, annex ? annex : "(none)");
1862 /* Memory transfer methods. */
1865 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
1868 /* This method is used to read from an alternate, non-current
1869 target. This read must bypass the overlay support (as symbols
1870 don't match this target), and GDB's internal cache (wrong cache
1871 for this target). */
1872 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
1874 memory_error (TARGET_XFER_E_IO, addr);
1878 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
1879 int len, enum bfd_endian byte_order)
1881 gdb_byte buf[sizeof (ULONGEST)];
1883 gdb_assert (len <= sizeof (buf));
1884 get_target_memory (ops, addr, buf, len);
1885 return extract_unsigned_integer (buf, len, byte_order);
1891 target_insert_breakpoint (struct gdbarch *gdbarch,
1892 struct bp_target_info *bp_tgt)
1894 if (!may_insert_breakpoints)
1896 warning (_("May not insert breakpoints"));
1900 return current_target.to_insert_breakpoint (¤t_target,
1907 target_remove_breakpoint (struct gdbarch *gdbarch,
1908 struct bp_target_info *bp_tgt)
1910 /* This is kind of a weird case to handle, but the permission might
1911 have been changed after breakpoints were inserted - in which case
1912 we should just take the user literally and assume that any
1913 breakpoints should be left in place. */
1914 if (!may_insert_breakpoints)
1916 warning (_("May not remove breakpoints"));
1920 return current_target.to_remove_breakpoint (¤t_target,
1925 target_info (char *args, int from_tty)
1927 struct target_ops *t;
1928 int has_all_mem = 0;
1930 if (symfile_objfile != NULL)
1931 printf_unfiltered (_("Symbols from \"%s\".\n"),
1932 objfile_name (symfile_objfile));
1934 for (t = target_stack; t != NULL; t = t->beneath)
1936 if (!(*t->to_has_memory) (t))
1939 if ((int) (t->to_stratum) <= (int) dummy_stratum)
1942 printf_unfiltered (_("\tWhile running this, "
1943 "GDB does not access memory from...\n"));
1944 printf_unfiltered ("%s:\n", t->to_longname);
1945 (t->to_files_info) (t);
1946 has_all_mem = (*t->to_has_all_memory) (t);
1950 /* This function is called before any new inferior is created, e.g.
1951 by running a program, attaching, or connecting to a target.
1952 It cleans up any state from previous invocations which might
1953 change between runs. This is a subset of what target_preopen
1954 resets (things which might change between targets). */
1957 target_pre_inferior (int from_tty)
1959 /* Clear out solib state. Otherwise the solib state of the previous
1960 inferior might have survived and is entirely wrong for the new
1961 target. This has been observed on GNU/Linux using glibc 2.3. How
1973 Cannot access memory at address 0xdeadbeef
1976 /* In some OSs, the shared library list is the same/global/shared
1977 across inferiors. If code is shared between processes, so are
1978 memory regions and features. */
1979 if (!gdbarch_has_global_solist (target_gdbarch ()))
1981 no_shared_libraries (NULL, from_tty);
1983 invalidate_target_mem_regions ();
1985 target_clear_description ();
1988 agent_capability_invalidate ();
1991 /* Callback for iterate_over_inferiors. Gets rid of the given
1995 dispose_inferior (struct inferior *inf, void *args)
1997 struct thread_info *thread;
1999 thread = any_thread_of_process (inf->pid);
2002 switch_to_thread (thread->ptid);
2004 /* Core inferiors actually should be detached, not killed. */
2005 if (target_has_execution)
2008 target_detach (NULL, 0);
2014 /* This is to be called by the open routine before it does
2018 target_preopen (int from_tty)
2022 if (have_inferiors ())
2025 || !have_live_inferiors ()
2026 || query (_("A program is being debugged already. Kill it? ")))
2027 iterate_over_inferiors (dispose_inferior, NULL);
2029 error (_("Program not killed."));
2032 /* Calling target_kill may remove the target from the stack. But if
2033 it doesn't (which seems like a win for UDI), remove it now. */
2034 /* Leave the exec target, though. The user may be switching from a
2035 live process to a core of the same program. */
2036 pop_all_targets_above (file_stratum);
2038 target_pre_inferior (from_tty);
2041 /* Detach a target after doing deferred register stores. */
2044 target_detach (const char *args, int from_tty)
2046 struct target_ops* t;
2048 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2049 /* Don't remove global breakpoints here. They're removed on
2050 disconnection from the target. */
2053 /* If we're in breakpoints-always-inserted mode, have to remove
2054 them before detaching. */
2055 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2057 prepare_for_detach ();
2059 current_target.to_detach (¤t_target, args, from_tty);
2063 target_disconnect (const char *args, int from_tty)
2065 /* If we're in breakpoints-always-inserted mode or if breakpoints
2066 are global across processes, we have to remove them before
2068 remove_breakpoints ();
2070 current_target.to_disconnect (¤t_target, args, from_tty);
2074 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2076 return (current_target.to_wait) (¤t_target, ptid, status, options);
2080 target_pid_to_str (ptid_t ptid)
2082 return (*current_target.to_pid_to_str) (¤t_target, ptid);
2086 target_thread_name (struct thread_info *info)
2088 return current_target.to_thread_name (¤t_target, info);
2092 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2094 struct target_ops *t;
2096 target_dcache_invalidate ();
2098 current_target.to_resume (¤t_target, ptid, step, signal);
2100 registers_changed_ptid (ptid);
2101 /* We only set the internal executing state here. The user/frontend
2102 running state is set at a higher level. */
2103 set_executing (ptid, 1);
2104 clear_inline_frame_state (ptid);
2108 target_pass_signals (int numsigs, unsigned char *pass_signals)
2110 (*current_target.to_pass_signals) (¤t_target, numsigs, pass_signals);
2114 target_program_signals (int numsigs, unsigned char *program_signals)
2116 (*current_target.to_program_signals) (¤t_target,
2117 numsigs, program_signals);
2121 default_follow_fork (struct target_ops *self, int follow_child,
2124 /* Some target returned a fork event, but did not know how to follow it. */
2125 internal_error (__FILE__, __LINE__,
2126 _("could not find a target to follow fork"));
2129 /* Look through the list of possible targets for a target that can
2133 target_follow_fork (int follow_child, int detach_fork)
2135 return current_target.to_follow_fork (¤t_target,
2136 follow_child, detach_fork);
2140 default_mourn_inferior (struct target_ops *self)
2142 internal_error (__FILE__, __LINE__,
2143 _("could not find a target to follow mourn inferior"));
2147 target_mourn_inferior (void)
2149 current_target.to_mourn_inferior (¤t_target);
2151 /* We no longer need to keep handles on any of the object files.
2152 Make sure to release them to avoid unnecessarily locking any
2153 of them while we're not actually debugging. */
2154 bfd_cache_close_all ();
2157 /* Look for a target which can describe architectural features, starting
2158 from TARGET. If we find one, return its description. */
2160 const struct target_desc *
2161 target_read_description (struct target_ops *target)
2163 return target->to_read_description (target);
2166 /* This implements a basic search of memory, reading target memory and
2167 performing the search here (as opposed to performing the search in on the
2168 target side with, for example, gdbserver). */
2171 simple_search_memory (struct target_ops *ops,
2172 CORE_ADDR start_addr, ULONGEST search_space_len,
2173 const gdb_byte *pattern, ULONGEST pattern_len,
2174 CORE_ADDR *found_addrp)
2176 /* NOTE: also defined in find.c testcase. */
2177 #define SEARCH_CHUNK_SIZE 16000
2178 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2179 /* Buffer to hold memory contents for searching. */
2180 gdb_byte *search_buf;
2181 unsigned search_buf_size;
2182 struct cleanup *old_cleanups;
2184 search_buf_size = chunk_size + pattern_len - 1;
2186 /* No point in trying to allocate a buffer larger than the search space. */
2187 if (search_space_len < search_buf_size)
2188 search_buf_size = search_space_len;
2190 search_buf = malloc (search_buf_size);
2191 if (search_buf == NULL)
2192 error (_("Unable to allocate memory to perform the search."));
2193 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2195 /* Prime the search buffer. */
2197 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2198 search_buf, start_addr, search_buf_size) != search_buf_size)
2200 warning (_("Unable to access %s bytes of target "
2201 "memory at %s, halting search."),
2202 pulongest (search_buf_size), hex_string (start_addr));
2203 do_cleanups (old_cleanups);
2207 /* Perform the search.
2209 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2210 When we've scanned N bytes we copy the trailing bytes to the start and
2211 read in another N bytes. */
2213 while (search_space_len >= pattern_len)
2215 gdb_byte *found_ptr;
2216 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2218 found_ptr = memmem (search_buf, nr_search_bytes,
2219 pattern, pattern_len);
2221 if (found_ptr != NULL)
2223 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2225 *found_addrp = found_addr;
2226 do_cleanups (old_cleanups);
2230 /* Not found in this chunk, skip to next chunk. */
2232 /* Don't let search_space_len wrap here, it's unsigned. */
2233 if (search_space_len >= chunk_size)
2234 search_space_len -= chunk_size;
2236 search_space_len = 0;
2238 if (search_space_len >= pattern_len)
2240 unsigned keep_len = search_buf_size - chunk_size;
2241 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2244 /* Copy the trailing part of the previous iteration to the front
2245 of the buffer for the next iteration. */
2246 gdb_assert (keep_len == pattern_len - 1);
2247 memcpy (search_buf, search_buf + chunk_size, keep_len);
2249 nr_to_read = min (search_space_len - keep_len, chunk_size);
2251 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2252 search_buf + keep_len, read_addr,
2253 nr_to_read) != nr_to_read)
2255 warning (_("Unable to access %s bytes of target "
2256 "memory at %s, halting search."),
2257 plongest (nr_to_read),
2258 hex_string (read_addr));
2259 do_cleanups (old_cleanups);
2263 start_addr += chunk_size;
2269 do_cleanups (old_cleanups);
2273 /* Default implementation of memory-searching. */
2276 default_search_memory (struct target_ops *self,
2277 CORE_ADDR start_addr, ULONGEST search_space_len,
2278 const gdb_byte *pattern, ULONGEST pattern_len,
2279 CORE_ADDR *found_addrp)
2281 /* Start over from the top of the target stack. */
2282 return simple_search_memory (current_target.beneath,
2283 start_addr, search_space_len,
2284 pattern, pattern_len, found_addrp);
2287 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2288 sequence of bytes in PATTERN with length PATTERN_LEN.
2290 The result is 1 if found, 0 if not found, and -1 if there was an error
2291 requiring halting of the search (e.g. memory read error).
2292 If the pattern is found the address is recorded in FOUND_ADDRP. */
2295 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2296 const gdb_byte *pattern, ULONGEST pattern_len,
2297 CORE_ADDR *found_addrp)
2299 return current_target.to_search_memory (¤t_target, start_addr,
2301 pattern, pattern_len, found_addrp);
2304 /* Look through the currently pushed targets. If none of them will
2305 be able to restart the currently running process, issue an error
2309 target_require_runnable (void)
2311 struct target_ops *t;
2313 for (t = target_stack; t != NULL; t = t->beneath)
2315 /* If this target knows how to create a new program, then
2316 assume we will still be able to after killing the current
2317 one. Either killing and mourning will not pop T, or else
2318 find_default_run_target will find it again. */
2319 if (t->to_create_inferior != NULL)
2322 /* Do not worry about targets at certain strata that can not
2323 create inferiors. Assume they will be pushed again if
2324 necessary, and continue to the process_stratum. */
2325 if (t->to_stratum == thread_stratum
2326 || t->to_stratum == record_stratum
2327 || t->to_stratum == arch_stratum)
2330 error (_("The \"%s\" target does not support \"run\". "
2331 "Try \"help target\" or \"continue\"."),
2335 /* This function is only called if the target is running. In that
2336 case there should have been a process_stratum target and it
2337 should either know how to create inferiors, or not... */
2338 internal_error (__FILE__, __LINE__, _("No targets found"));
2341 /* Whether GDB is allowed to fall back to the default run target for
2342 "run", "attach", etc. when no target is connected yet. */
2343 static int auto_connect_native_target = 1;
2346 show_auto_connect_native_target (struct ui_file *file, int from_tty,
2347 struct cmd_list_element *c, const char *value)
2349 fprintf_filtered (file,
2350 _("Whether GDB may automatically connect to the "
2351 "native target is %s.\n"),
2355 /* Look through the list of possible targets for a target that can
2356 execute a run or attach command without any other data. This is
2357 used to locate the default process stratum.
2359 If DO_MESG is not NULL, the result is always valid (error() is
2360 called for errors); else, return NULL on error. */
2362 static struct target_ops *
2363 find_default_run_target (char *do_mesg)
2365 struct target_ops *runable = NULL;
2367 if (auto_connect_native_target)
2369 struct target_ops **t;
2372 for (t = target_structs; t < target_structs + target_struct_size;
2375 if ((*t)->to_can_run != delegate_can_run && target_can_run (*t))
2386 if (runable == NULL)
2389 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
2400 find_attach_target (void)
2402 struct target_ops *t;
2404 /* If a target on the current stack can attach, use it. */
2405 for (t = current_target.beneath; t != NULL; t = t->beneath)
2407 if (t->to_attach != NULL)
2411 /* Otherwise, use the default run target for attaching. */
2413 t = find_default_run_target ("attach");
2421 find_run_target (void)
2423 struct target_ops *t;
2425 /* If a target on the current stack can attach, use it. */
2426 for (t = current_target.beneath; t != NULL; t = t->beneath)
2428 if (t->to_create_inferior != NULL)
2432 /* Otherwise, use the default run target. */
2434 t = find_default_run_target ("run");
2439 /* Implement the "info proc" command. */
2442 target_info_proc (const char *args, enum info_proc_what what)
2444 struct target_ops *t;
2446 /* If we're already connected to something that can get us OS
2447 related data, use it. Otherwise, try using the native
2449 if (current_target.to_stratum >= process_stratum)
2450 t = current_target.beneath;
2452 t = find_default_run_target (NULL);
2454 for (; t != NULL; t = t->beneath)
2456 if (t->to_info_proc != NULL)
2458 t->to_info_proc (t, args, what);
2461 fprintf_unfiltered (gdb_stdlog,
2462 "target_info_proc (\"%s\", %d)\n", args, what);
2472 find_default_supports_disable_randomization (struct target_ops *self)
2474 struct target_ops *t;
2476 t = find_default_run_target (NULL);
2477 if (t && t->to_supports_disable_randomization)
2478 return (t->to_supports_disable_randomization) (t);
2483 target_supports_disable_randomization (void)
2485 struct target_ops *t;
2487 for (t = ¤t_target; t != NULL; t = t->beneath)
2488 if (t->to_supports_disable_randomization)
2489 return t->to_supports_disable_randomization (t);
2495 target_get_osdata (const char *type)
2497 struct target_ops *t;
2499 /* If we're already connected to something that can get us OS
2500 related data, use it. Otherwise, try using the native
2502 if (current_target.to_stratum >= process_stratum)
2503 t = current_target.beneath;
2505 t = find_default_run_target ("get OS data");
2510 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
2513 static struct address_space *
2514 default_thread_address_space (struct target_ops *self, ptid_t ptid)
2516 struct inferior *inf;
2518 /* Fall-back to the "main" address space of the inferior. */
2519 inf = find_inferior_pid (ptid_get_pid (ptid));
2521 if (inf == NULL || inf->aspace == NULL)
2522 internal_error (__FILE__, __LINE__,
2523 _("Can't determine the current "
2524 "address space of thread %s\n"),
2525 target_pid_to_str (ptid));
2530 /* Determine the current address space of thread PTID. */
2532 struct address_space *
2533 target_thread_address_space (ptid_t ptid)
2535 struct address_space *aspace;
2537 aspace = current_target.to_thread_address_space (¤t_target, ptid);
2538 gdb_assert (aspace != NULL);
2544 /* Target file operations. */
2546 static struct target_ops *
2547 default_fileio_target (void)
2549 /* If we're already connected to something that can perform
2550 file I/O, use it. Otherwise, try using the native target. */
2551 if (current_target.to_stratum >= process_stratum)
2552 return current_target.beneath;
2554 return find_default_run_target ("file I/O");
2557 /* Open FILENAME on the target, using FLAGS and MODE. Return a
2558 target file descriptor, or -1 if an error occurs (and set
2561 target_fileio_open (const char *filename, int flags, int mode,
2564 struct target_ops *t;
2566 for (t = default_fileio_target (); t != NULL; t = t->beneath)
2568 if (t->to_fileio_open != NULL)
2570 int fd = t->to_fileio_open (t, filename, flags, mode, target_errno);
2573 fprintf_unfiltered (gdb_stdlog,
2574 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
2575 filename, flags, mode,
2576 fd, fd != -1 ? 0 : *target_errno);
2581 *target_errno = FILEIO_ENOSYS;
2585 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
2586 Return the number of bytes written, or -1 if an error occurs
2587 (and set *TARGET_ERRNO). */
2589 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
2590 ULONGEST offset, int *target_errno)
2592 struct target_ops *t;
2594 for (t = default_fileio_target (); t != NULL; t = t->beneath)
2596 if (t->to_fileio_pwrite != NULL)
2598 int ret = t->to_fileio_pwrite (t, fd, write_buf, len, offset,
2602 fprintf_unfiltered (gdb_stdlog,
2603 "target_fileio_pwrite (%d,...,%d,%s) "
2605 fd, len, pulongest (offset),
2606 ret, ret != -1 ? 0 : *target_errno);
2611 *target_errno = FILEIO_ENOSYS;
2615 /* Read up to LEN bytes FD on the target into READ_BUF.
2616 Return the number of bytes read, or -1 if an error occurs
2617 (and set *TARGET_ERRNO). */
2619 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
2620 ULONGEST offset, int *target_errno)
2622 struct target_ops *t;
2624 for (t = default_fileio_target (); t != NULL; t = t->beneath)
2626 if (t->to_fileio_pread != NULL)
2628 int ret = t->to_fileio_pread (t, fd, read_buf, len, offset,
2632 fprintf_unfiltered (gdb_stdlog,
2633 "target_fileio_pread (%d,...,%d,%s) "
2635 fd, len, pulongest (offset),
2636 ret, ret != -1 ? 0 : *target_errno);
2641 *target_errno = FILEIO_ENOSYS;
2645 /* Close FD on the target. Return 0, or -1 if an error occurs
2646 (and set *TARGET_ERRNO). */
2648 target_fileio_close (int fd, int *target_errno)
2650 struct target_ops *t;
2652 for (t = default_fileio_target (); t != NULL; t = t->beneath)
2654 if (t->to_fileio_close != NULL)
2656 int ret = t->to_fileio_close (t, fd, target_errno);
2659 fprintf_unfiltered (gdb_stdlog,
2660 "target_fileio_close (%d) = %d (%d)\n",
2661 fd, ret, ret != -1 ? 0 : *target_errno);
2666 *target_errno = FILEIO_ENOSYS;
2670 /* Unlink FILENAME on the target. Return 0, or -1 if an error
2671 occurs (and set *TARGET_ERRNO). */
2673 target_fileio_unlink (const char *filename, int *target_errno)
2675 struct target_ops *t;
2677 for (t = default_fileio_target (); t != NULL; t = t->beneath)
2679 if (t->to_fileio_unlink != NULL)
2681 int ret = t->to_fileio_unlink (t, filename, target_errno);
2684 fprintf_unfiltered (gdb_stdlog,
2685 "target_fileio_unlink (%s) = %d (%d)\n",
2686 filename, ret, ret != -1 ? 0 : *target_errno);
2691 *target_errno = FILEIO_ENOSYS;
2695 /* Read value of symbolic link FILENAME on the target. Return a
2696 null-terminated string allocated via xmalloc, or NULL if an error
2697 occurs (and set *TARGET_ERRNO). */
2699 target_fileio_readlink (const char *filename, int *target_errno)
2701 struct target_ops *t;
2703 for (t = default_fileio_target (); t != NULL; t = t->beneath)
2705 if (t->to_fileio_readlink != NULL)
2707 char *ret = t->to_fileio_readlink (t, filename, target_errno);
2710 fprintf_unfiltered (gdb_stdlog,
2711 "target_fileio_readlink (%s) = %s (%d)\n",
2712 filename, ret? ret : "(nil)",
2713 ret? 0 : *target_errno);
2718 *target_errno = FILEIO_ENOSYS;
2723 target_fileio_close_cleanup (void *opaque)
2725 int fd = *(int *) opaque;
2728 target_fileio_close (fd, &target_errno);
2731 /* Read target file FILENAME. Store the result in *BUF_P and
2732 return the size of the transferred data. PADDING additional bytes are
2733 available in *BUF_P. This is a helper function for
2734 target_fileio_read_alloc; see the declaration of that function for more
2738 target_fileio_read_alloc_1 (const char *filename,
2739 gdb_byte **buf_p, int padding)
2741 struct cleanup *close_cleanup;
2742 size_t buf_alloc, buf_pos;
2748 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
2752 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
2754 /* Start by reading up to 4K at a time. The target will throttle
2755 this number down if necessary. */
2757 buf = xmalloc (buf_alloc);
2761 n = target_fileio_pread (fd, &buf[buf_pos],
2762 buf_alloc - buf_pos - padding, buf_pos,
2766 /* An error occurred. */
2767 do_cleanups (close_cleanup);
2773 /* Read all there was. */
2774 do_cleanups (close_cleanup);
2784 /* If the buffer is filling up, expand it. */
2785 if (buf_alloc < buf_pos * 2)
2788 buf = xrealloc (buf, buf_alloc);
2795 /* Read target file FILENAME. Store the result in *BUF_P and return
2796 the size of the transferred data. See the declaration in "target.h"
2797 function for more information about the return value. */
2800 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
2802 return target_fileio_read_alloc_1 (filename, buf_p, 0);
2805 /* Read target file FILENAME. The result is NUL-terminated and
2806 returned as a string, allocated using xmalloc. If an error occurs
2807 or the transfer is unsupported, NULL is returned. Empty objects
2808 are returned as allocated but empty strings. A warning is issued
2809 if the result contains any embedded NUL bytes. */
2812 target_fileio_read_stralloc (const char *filename)
2816 LONGEST i, transferred;
2818 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
2819 bufstr = (char *) buffer;
2821 if (transferred < 0)
2824 if (transferred == 0)
2825 return xstrdup ("");
2827 bufstr[transferred] = 0;
2829 /* Check for embedded NUL bytes; but allow trailing NULs. */
2830 for (i = strlen (bufstr); i < transferred; i++)
2833 warning (_("target file %s "
2834 "contained unexpected null characters"),
2844 default_region_ok_for_hw_watchpoint (struct target_ops *self,
2845 CORE_ADDR addr, int len)
2847 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
2851 default_watchpoint_addr_within_range (struct target_ops *target,
2853 CORE_ADDR start, int length)
2855 return addr >= start && addr < start + length;
2858 static struct gdbarch *
2859 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
2861 return target_gdbarch ();
2865 return_zero (struct target_ops *ignore)
2871 return_zero_has_execution (struct target_ops *ignore, ptid_t ignore2)
2877 * Find the next target down the stack from the specified target.
2881 find_target_beneath (struct target_ops *t)
2889 find_target_at (enum strata stratum)
2891 struct target_ops *t;
2893 for (t = current_target.beneath; t != NULL; t = t->beneath)
2894 if (t->to_stratum == stratum)
2901 /* The inferior process has died. Long live the inferior! */
2904 generic_mourn_inferior (void)
2908 ptid = inferior_ptid;
2909 inferior_ptid = null_ptid;
2911 /* Mark breakpoints uninserted in case something tries to delete a
2912 breakpoint while we delete the inferior's threads (which would
2913 fail, since the inferior is long gone). */
2914 mark_breakpoints_out ();
2916 if (!ptid_equal (ptid, null_ptid))
2918 int pid = ptid_get_pid (ptid);
2919 exit_inferior (pid);
2922 /* Note this wipes step-resume breakpoints, so needs to be done
2923 after exit_inferior, which ends up referencing the step-resume
2924 breakpoints through clear_thread_inferior_resources. */
2925 breakpoint_init_inferior (inf_exited);
2927 registers_changed ();
2929 reopen_exec_file ();
2930 reinit_frame_cache ();
2932 if (deprecated_detach_hook)
2933 deprecated_detach_hook ();
2936 /* Convert a normal process ID to a string. Returns the string in a
2940 normal_pid_to_str (ptid_t ptid)
2942 static char buf[32];
2944 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
2949 default_pid_to_str (struct target_ops *ops, ptid_t ptid)
2951 return normal_pid_to_str (ptid);
2954 /* Error-catcher for target_find_memory_regions. */
2956 dummy_find_memory_regions (struct target_ops *self,
2957 find_memory_region_ftype ignore1, void *ignore2)
2959 error (_("Command not implemented for this target."));
2963 /* Error-catcher for target_make_corefile_notes. */
2965 dummy_make_corefile_notes (struct target_ops *self,
2966 bfd *ignore1, int *ignore2)
2968 error (_("Command not implemented for this target."));
2972 /* Set up the handful of non-empty slots needed by the dummy target
2976 init_dummy_target (void)
2978 dummy_target.to_shortname = "None";
2979 dummy_target.to_longname = "None";
2980 dummy_target.to_doc = "";
2981 dummy_target.to_supports_disable_randomization
2982 = find_default_supports_disable_randomization;
2983 dummy_target.to_stratum = dummy_stratum;
2984 dummy_target.to_has_all_memory = return_zero;
2985 dummy_target.to_has_memory = return_zero;
2986 dummy_target.to_has_stack = return_zero;
2987 dummy_target.to_has_registers = return_zero;
2988 dummy_target.to_has_execution = return_zero_has_execution;
2989 dummy_target.to_magic = OPS_MAGIC;
2991 install_dummy_methods (&dummy_target);
2996 target_close (struct target_ops *targ)
2998 gdb_assert (!target_is_pushed (targ));
3000 if (targ->to_xclose != NULL)
3001 targ->to_xclose (targ);
3002 else if (targ->to_close != NULL)
3003 targ->to_close (targ);
3006 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3010 target_thread_alive (ptid_t ptid)
3012 return current_target.to_thread_alive (¤t_target, ptid);
3016 target_find_new_threads (void)
3018 current_target.to_find_new_threads (¤t_target);
3022 target_stop (ptid_t ptid)
3026 warning (_("May not interrupt or stop the target, ignoring attempt"));
3030 (*current_target.to_stop) (¤t_target, ptid);
3033 /* Concatenate ELEM to LIST, a comma separate list, and return the
3034 result. The LIST incoming argument is released. */
3037 str_comma_list_concat_elem (char *list, const char *elem)
3040 return xstrdup (elem);
3042 return reconcat (list, list, ", ", elem, (char *) NULL);
3045 /* Helper for target_options_to_string. If OPT is present in
3046 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3047 Returns the new resulting string. OPT is removed from
3051 do_option (int *target_options, char *ret,
3052 int opt, char *opt_str)
3054 if ((*target_options & opt) != 0)
3056 ret = str_comma_list_concat_elem (ret, opt_str);
3057 *target_options &= ~opt;
3064 target_options_to_string (int target_options)
3068 #define DO_TARG_OPTION(OPT) \
3069 ret = do_option (&target_options, ret, OPT, #OPT)
3071 DO_TARG_OPTION (TARGET_WNOHANG);
3073 if (target_options != 0)
3074 ret = str_comma_list_concat_elem (ret, "unknown???");
3082 debug_print_register (const char * func,
3083 struct regcache *regcache, int regno)
3085 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3087 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3088 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3089 && gdbarch_register_name (gdbarch, regno) != NULL
3090 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3091 fprintf_unfiltered (gdb_stdlog, "(%s)",
3092 gdbarch_register_name (gdbarch, regno));
3094 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3095 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3097 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3098 int i, size = register_size (gdbarch, regno);
3099 gdb_byte buf[MAX_REGISTER_SIZE];
3101 regcache_raw_collect (regcache, regno, buf);
3102 fprintf_unfiltered (gdb_stdlog, " = ");
3103 for (i = 0; i < size; i++)
3105 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3107 if (size <= sizeof (LONGEST))
3109 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3111 fprintf_unfiltered (gdb_stdlog, " %s %s",
3112 core_addr_to_string_nz (val), plongest (val));
3115 fprintf_unfiltered (gdb_stdlog, "\n");
3119 target_fetch_registers (struct regcache *regcache, int regno)
3121 current_target.to_fetch_registers (¤t_target, regcache, regno);
3123 debug_print_register ("target_fetch_registers", regcache, regno);
3127 target_store_registers (struct regcache *regcache, int regno)
3129 struct target_ops *t;
3131 if (!may_write_registers)
3132 error (_("Writing to registers is not allowed (regno %d)"), regno);
3134 current_target.to_store_registers (¤t_target, regcache, regno);
3137 debug_print_register ("target_store_registers", regcache, regno);
3142 target_core_of_thread (ptid_t ptid)
3144 return current_target.to_core_of_thread (¤t_target, ptid);
3148 simple_verify_memory (struct target_ops *ops,
3149 const gdb_byte *data, CORE_ADDR lma, ULONGEST size)
3151 LONGEST total_xfered = 0;
3153 while (total_xfered < size)
3155 ULONGEST xfered_len;
3156 enum target_xfer_status status;
3158 ULONGEST howmuch = min (sizeof (buf), size - total_xfered);
3160 status = target_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
3161 buf, NULL, lma + total_xfered, howmuch,
3163 if (status == TARGET_XFER_OK
3164 && memcmp (data + total_xfered, buf, xfered_len) == 0)
3166 total_xfered += xfered_len;
3175 /* Default implementation of memory verification. */
3178 default_verify_memory (struct target_ops *self,
3179 const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3181 /* Start over from the top of the target stack. */
3182 return simple_verify_memory (current_target.beneath,
3183 data, memaddr, size);
3187 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3189 return current_target.to_verify_memory (¤t_target,
3190 data, memaddr, size);
3193 /* The documentation for this function is in its prototype declaration in
3197 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3199 return current_target.to_insert_mask_watchpoint (¤t_target,
3203 /* The documentation for this function is in its prototype declaration in
3207 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
3209 return current_target.to_remove_mask_watchpoint (¤t_target,
3213 /* The documentation for this function is in its prototype declaration
3217 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
3219 return current_target.to_masked_watch_num_registers (¤t_target,
3223 /* The documentation for this function is in its prototype declaration
3227 target_ranged_break_num_registers (void)
3229 return current_target.to_ranged_break_num_registers (¤t_target);
3234 struct btrace_target_info *
3235 target_enable_btrace (ptid_t ptid)
3237 return current_target.to_enable_btrace (¤t_target, ptid);
3243 target_disable_btrace (struct btrace_target_info *btinfo)
3245 current_target.to_disable_btrace (¤t_target, btinfo);
3251 target_teardown_btrace (struct btrace_target_info *btinfo)
3253 current_target.to_teardown_btrace (¤t_target, btinfo);
3259 target_read_btrace (VEC (btrace_block_s) **btrace,
3260 struct btrace_target_info *btinfo,
3261 enum btrace_read_type type)
3263 return current_target.to_read_btrace (¤t_target, btrace, btinfo, type);
3269 target_stop_recording (void)
3271 current_target.to_stop_recording (¤t_target);
3277 target_save_record (const char *filename)
3279 current_target.to_save_record (¤t_target, filename);
3285 target_supports_delete_record (void)
3287 struct target_ops *t;
3289 for (t = current_target.beneath; t != NULL; t = t->beneath)
3290 if (t->to_delete_record != delegate_delete_record
3291 && t->to_delete_record != tdefault_delete_record)
3300 target_delete_record (void)
3302 current_target.to_delete_record (¤t_target);
3308 target_record_is_replaying (void)
3310 return current_target.to_record_is_replaying (¤t_target);
3316 target_goto_record_begin (void)
3318 current_target.to_goto_record_begin (¤t_target);
3324 target_goto_record_end (void)
3326 current_target.to_goto_record_end (¤t_target);
3332 target_goto_record (ULONGEST insn)
3334 current_target.to_goto_record (¤t_target, insn);
3340 target_insn_history (int size, int flags)
3342 current_target.to_insn_history (¤t_target, size, flags);
3348 target_insn_history_from (ULONGEST from, int size, int flags)
3350 current_target.to_insn_history_from (¤t_target, from, size, flags);
3356 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
3358 current_target.to_insn_history_range (¤t_target, begin, end, flags);
3364 target_call_history (int size, int flags)
3366 current_target.to_call_history (¤t_target, size, flags);
3372 target_call_history_from (ULONGEST begin, int size, int flags)
3374 current_target.to_call_history_from (¤t_target, begin, size, flags);
3380 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
3382 current_target.to_call_history_range (¤t_target, begin, end, flags);
3387 const struct frame_unwind *
3388 target_get_unwinder (void)
3390 return current_target.to_get_unwinder (¤t_target);
3395 const struct frame_unwind *
3396 target_get_tailcall_unwinder (void)
3398 return current_target.to_get_tailcall_unwinder (¤t_target);
3401 /* Default implementation of to_decr_pc_after_break. */
3404 default_target_decr_pc_after_break (struct target_ops *ops,
3405 struct gdbarch *gdbarch)
3407 return gdbarch_decr_pc_after_break (gdbarch);
3413 target_decr_pc_after_break (struct gdbarch *gdbarch)
3415 return current_target.to_decr_pc_after_break (¤t_target, gdbarch);
3421 target_prepare_to_generate_core (void)
3423 current_target.to_prepare_to_generate_core (¤t_target);
3429 target_done_generating_core (void)
3431 current_target.to_done_generating_core (¤t_target);
3435 setup_target_debug (void)
3437 memcpy (&debug_target, ¤t_target, sizeof debug_target);
3439 init_debug_target (¤t_target);
3443 static char targ_desc[] =
3444 "Names of targets and files being debugged.\nShows the entire \
3445 stack of targets currently in use (including the exec-file,\n\
3446 core-file, and process, if any), as well as the symbol file name.";
3449 default_rcmd (struct target_ops *self, const char *command,
3450 struct ui_file *output)
3452 error (_("\"monitor\" command not supported by this target."));
3456 do_monitor_command (char *cmd,
3459 target_rcmd (cmd, gdb_stdtarg);
3462 /* Print the name of each layers of our target stack. */
3465 maintenance_print_target_stack (char *cmd, int from_tty)
3467 struct target_ops *t;
3469 printf_filtered (_("The current target stack is:\n"));
3471 for (t = target_stack; t != NULL; t = t->beneath)
3473 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
3477 /* Controls if targets can report that they can/are async. This is
3478 just for maintainers to use when debugging gdb. */
3479 int target_async_permitted = 1;
3481 /* The set command writes to this variable. If the inferior is
3482 executing, target_async_permitted is *not* updated. */
3483 static int target_async_permitted_1 = 1;
3486 maint_set_target_async_command (char *args, int from_tty,
3487 struct cmd_list_element *c)
3489 if (have_live_inferiors ())
3491 target_async_permitted_1 = target_async_permitted;
3492 error (_("Cannot change this setting while the inferior is running."));
3495 target_async_permitted = target_async_permitted_1;
3499 maint_show_target_async_command (struct ui_file *file, int from_tty,
3500 struct cmd_list_element *c,
3503 fprintf_filtered (file,
3504 _("Controlling the inferior in "
3505 "asynchronous mode is %s.\n"), value);
3508 /* Temporary copies of permission settings. */
3510 static int may_write_registers_1 = 1;
3511 static int may_write_memory_1 = 1;
3512 static int may_insert_breakpoints_1 = 1;
3513 static int may_insert_tracepoints_1 = 1;
3514 static int may_insert_fast_tracepoints_1 = 1;
3515 static int may_stop_1 = 1;
3517 /* Make the user-set values match the real values again. */
3520 update_target_permissions (void)
3522 may_write_registers_1 = may_write_registers;
3523 may_write_memory_1 = may_write_memory;
3524 may_insert_breakpoints_1 = may_insert_breakpoints;
3525 may_insert_tracepoints_1 = may_insert_tracepoints;
3526 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
3527 may_stop_1 = may_stop;
3530 /* The one function handles (most of) the permission flags in the same
3534 set_target_permissions (char *args, int from_tty,
3535 struct cmd_list_element *c)
3537 if (target_has_execution)
3539 update_target_permissions ();
3540 error (_("Cannot change this setting while the inferior is running."));
3543 /* Make the real values match the user-changed values. */
3544 may_write_registers = may_write_registers_1;
3545 may_insert_breakpoints = may_insert_breakpoints_1;
3546 may_insert_tracepoints = may_insert_tracepoints_1;
3547 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
3548 may_stop = may_stop_1;
3549 update_observer_mode ();
3552 /* Set memory write permission independently of observer mode. */
3555 set_write_memory_permission (char *args, int from_tty,
3556 struct cmd_list_element *c)
3558 /* Make the real values match the user-changed values. */
3559 may_write_memory = may_write_memory_1;
3560 update_observer_mode ();
3565 initialize_targets (void)
3567 init_dummy_target ();
3568 push_target (&dummy_target);
3570 add_info ("target", target_info, targ_desc);
3571 add_info ("files", target_info, targ_desc);
3573 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
3574 Set target debugging."), _("\
3575 Show target debugging."), _("\
3576 When non-zero, target debugging is enabled. Higher numbers are more\n\
3580 &setdebuglist, &showdebuglist);
3582 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
3583 &trust_readonly, _("\
3584 Set mode for reading from readonly sections."), _("\
3585 Show mode for reading from readonly sections."), _("\
3586 When this mode is on, memory reads from readonly sections (such as .text)\n\
3587 will be read from the object file instead of from the target. This will\n\
3588 result in significant performance improvement for remote targets."),
3590 show_trust_readonly,
3591 &setlist, &showlist);
3593 add_com ("monitor", class_obscure, do_monitor_command,
3594 _("Send a command to the remote monitor (remote targets only)."));
3596 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
3597 _("Print the name of each layer of the internal target stack."),
3598 &maintenanceprintlist);
3600 add_setshow_boolean_cmd ("target-async", no_class,
3601 &target_async_permitted_1, _("\
3602 Set whether gdb controls the inferior in asynchronous mode."), _("\
3603 Show whether gdb controls the inferior in asynchronous mode."), _("\
3604 Tells gdb whether to control the inferior in asynchronous mode."),
3605 maint_set_target_async_command,
3606 maint_show_target_async_command,
3607 &maintenance_set_cmdlist,
3608 &maintenance_show_cmdlist);
3610 add_setshow_boolean_cmd ("may-write-registers", class_support,
3611 &may_write_registers_1, _("\
3612 Set permission to write into registers."), _("\
3613 Show permission to write into registers."), _("\
3614 When this permission is on, GDB may write into the target's registers.\n\
3615 Otherwise, any sort of write attempt will result in an error."),
3616 set_target_permissions, NULL,
3617 &setlist, &showlist);
3619 add_setshow_boolean_cmd ("may-write-memory", class_support,
3620 &may_write_memory_1, _("\
3621 Set permission to write into target memory."), _("\
3622 Show permission to write into target memory."), _("\
3623 When this permission is on, GDB may write into the target's memory.\n\
3624 Otherwise, any sort of write attempt will result in an error."),
3625 set_write_memory_permission, NULL,
3626 &setlist, &showlist);
3628 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
3629 &may_insert_breakpoints_1, _("\
3630 Set permission to insert breakpoints in the target."), _("\
3631 Show permission to insert breakpoints in the target."), _("\
3632 When this permission is on, GDB may insert breakpoints in the program.\n\
3633 Otherwise, any sort of insertion attempt will result in an error."),
3634 set_target_permissions, NULL,
3635 &setlist, &showlist);
3637 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
3638 &may_insert_tracepoints_1, _("\
3639 Set permission to insert tracepoints in the target."), _("\
3640 Show permission to insert tracepoints in the target."), _("\
3641 When this permission is on, GDB may insert tracepoints in the program.\n\
3642 Otherwise, any sort of insertion attempt will result in an error."),
3643 set_target_permissions, NULL,
3644 &setlist, &showlist);
3646 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
3647 &may_insert_fast_tracepoints_1, _("\
3648 Set permission to insert fast tracepoints in the target."), _("\
3649 Show permission to insert fast tracepoints in the target."), _("\
3650 When this permission is on, GDB may insert fast tracepoints.\n\
3651 Otherwise, any sort of insertion attempt will result in an error."),
3652 set_target_permissions, NULL,
3653 &setlist, &showlist);
3655 add_setshow_boolean_cmd ("may-interrupt", class_support,
3657 Set permission to interrupt or signal the target."), _("\
3658 Show permission to interrupt or signal the target."), _("\
3659 When this permission is on, GDB may interrupt/stop the target's execution.\n\
3660 Otherwise, any attempt to interrupt or stop will be ignored."),
3661 set_target_permissions, NULL,
3662 &setlist, &showlist);
3664 add_setshow_boolean_cmd ("auto-connect-native-target", class_support,
3665 &auto_connect_native_target, _("\
3666 Set whether GDB may automatically connect to the native target."), _("\
3667 Show whether GDB may automatically connect to the native target."), _("\
3668 When on, and GDB is not connected to a target yet, GDB\n\
3669 attempts \"run\" and other commands with the native target."),
3670 NULL, show_auto_connect_native_target,
3671 &setlist, &showlist);